Esempio n. 1
0
def main():
    # image shape: N x H x W, pixel [0, 255]
    # label shape: N x 10
    with np.load('mnist.npz', allow_pickle=True) as f:
        x_train, y_train = f['x_train'], f['y_train']
        x_test, y_test = f['x_test'], f['y_test']

    print(x_train.shape, x_train[0].max(),
          x_train[0].min())  #(60000, 28, 28) 255 0 5
    print(x_test.shape, x_test[0].max(),
          x_test[0].min())  #(10000, 28, 28) 255 0 7

    x_train = normalize_image(x_train)
    x_test = normalize_image(x_test)
    y_train = one_hot_labels(y_train)
    y_test = one_hot_labels(y_test)

    lr = 1.5e-4
    batch_size = 8

    net = LeNet()

    avgtime, accuracy = \
        net.fit(x_train, y_train, x_test, y_test, epoches=5, batch_size=batch_size, lr=lr)
    accu = net.evaluate(x_test, labels=y_test)
    print('avgtime: ', avgtime)
    #print('accuracy: ', accuracy)
    '''
    plt.plot(accuracy)
    plt.savefig('lr={}.jpg'.format(lr))
    plt.show()
	'''
    print("final accuracy {}".format(accu))
Esempio n. 2
0
def main():
    # image shape: N x H x W, pixel [0, 255]
    # label shape: N x 10
    with np.load('mnist.npz', allow_pickle=True) as f:
        x_train, y_train = f['x_train'], f['y_train']
        x_test, y_test = f['x_test'], f['y_test']

    plt.imshow(x_train[59999], cmap='gray')
    plt.show()
    print(x_train.shape, x_train[0].max(),
          x_train[0].min())  #(60000, 28, 28) 255 0 5
    print(x_test.shape, x_test[0].max(),
          x_test[0].min())  #(10000, 28, 28) 255 0 7

    x_train = normalize_image(x_train)
    x_test = normalize_image(x_test)
    y_train = one_hot_labels(y_train)
    y_test = one_hot_labels(y_test)

    net = LeNet()
    net.fit(x_train,
            y_train,
            x_test,
            y_test,
            epoches=10,
            batch_size=16,
            lr=1e-3)
    accu = net.evaluate(x_test, labels=y_test)

    print("final accuracy {}".format(accu))
Esempio n. 3
0
def LeNet_test():
    # initializing the network
    network = LeNet(BATCH_SIZE)
    network.getTheParas(MODEL_FILE)

    # load the test data
    _, _, test_imgs, _, _, test_label = util.load_data(MNIST_PATH, False)

    log_string('------------start test-----------')

    num_batch = test_imgs.shape[0] // BATCH_SIZE
    start = 0
    end = start + BATCH_SIZE
    loss = 0.0
    total_correct = 0.0
    total_seen = 0
    for n in range(num_batch):
        log_string('--------{}/{}(batchs) completed!'.format(n + 1, num_batch))
        current_img = test_imgs[start:end, ...]
        current_label = test_label[start:end, ...]
        start = end
        end += BATCH_SIZE
        predict_val, loss_val = network.forward(current_img, current_label)
        correct = np.sum(predict_val == current_label)
        total_correct += correct
        loss += loss_val
        total_seen += BATCH_SIZE
    log_string('eval mean loss: {}'.format(loss / num_batch))
    log_string('eval accuracy: {}'.format(total_correct / total_seen))
Esempio n. 4
0
def main():
    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        './data',
        train=True,
        download=True,
        transform=transforms.Compose([transforms.ToTensor()])),
                                               batch_size=256,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        './data',
        train=False,
        download=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
        ])),
                                              batch_size=9,
                                              shuffle=True)

    test_batch = None
    for x in test_loader:
        test_batch = x[0]
        break

    # Use cpu if no cuda available
    device = torch.device("cuda")
    #device = torch.device("cpu")

    model = LeNet().to(device)
    optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)

    train_with_logging(model, device, train_loader, optimizer, 200, 5,
                       test_batch)
Esempio n. 5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--train-file',
        required=True,
        help=(
            'Training dataset, as per the format from Kaggle: '
            'https://www.kaggle.com/c/digit-recognizer/data?select=train.csv'))
    parser.add_argument(
        '--test-file',
        required=True,
        help=(
            'Testing dataset, as per the format from Kaggle: '
            'https://www.kaggle.com/c/digit-recognizer/data?select=test.csv'))
    args = parser.parse_args()

    train_df = pd.read_csv(args.train_file)
    test_df = pd.read_csv(args.test_file)

    train_X = train_df.drop(columns=['label']).values
    train_y = train_df['label'].values
    test_X = test_df.values

    model = LeNet()
    print(model.classifier)
    name = 'lenet_relu'
    model.train_on_dataset(train_X, train_y, save_path=name)

    test_predictions = model.predict_on_dataset(test_X)
    pred_df = pd.DataFrame({
        'ImageId': np.arange(1, test_X.shape[0] + 1),
        'Label': test_predictions
    })
    pred_df.to_csv(f'{name}_predictions.csv', index=False)
 def __init__(self, img_shape=(160,320,3), model_file="lenet.h5",  prev_model=None, batch_size=128, epochs=5):
     # net = NvidiaNet()
     net = LeNet()
     self.nnModel = net.network(img_shape=img_shape)
     self.modelLoaded = False
     self.modelFile = model_file
     self.prevModel = prev_model
     self.batchSize = batch_size
     self.epochs = epochs
Esempio n. 7
0
def inference():
    # initializing the network
    network = LeNet(BATCH_SIZE)
    network.getTheParas(MODEL_FILE)
    print(IMAGE_PATH)
    image_paths = glob.glob(os.path.join(IMAGE_PATH, '*'))

    for image_path in image_paths:
        image_data = cv2.imread(image_path, 0)
        image_data = image_data[newaxis, :, :, newaxis]
        predict_val = network.inference(image_data)
        print(image_path, ':', predict_val[0][0])
Esempio n. 8
0
def aiTest(images, shape):
    model = LeNet()
    y_test = []
    x_test = images
    generate_images = []
    for image in x_test:
        confidence = model.predict(image)[0]
        predicted_class = np.argmax(confidence)
        y_test.append(predicted_class)
    attacker = PixelAttacker((x_test, y_test))
    for i in range(len(x_test)):
        generate_images.append(attacker.attack(i, model, verbose=False)[10])
    return generate_images
Esempio n. 9
0
def train(aug, trainX, trainY, testX, testY):
    # initialize the model
    print("[INFO] compiling model...")
    model = LeNet.build(width=norm_size,
                        height=norm_size,
                        depth=1,
                        classes=CLASS_NUM)
    #model = MyModel.build(width=norm_size, height=norm_size, depth=1, classes=CLASS_NUM)
    #model = InceptionV3(weights='imagenet', include_top=True)
    #model = InceptionV3.build(width=norm_size, height=norm_size, depth=1, calsses=CLASS_NUM)
    opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    # train the network
    print("[INFO] training network...")
    H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),
                            validation_data=(testX, testY),
                            steps_per_epoch=len(trainX) // BS,
                            epochs=EPOCHS,
                            verbose=1)

    # save the model to disk
    print("[INFO] serializing network...")
    model.save('./model')
Esempio n. 10
0
def main():
    # hyper-parameters
    epochs = 10
    batch_size = 64

    # build LeNet model
    mnist_data = MNIST_Dataset()
    LeNet_model = LeNet.build_model(mnist_data.input_shape, classes=10)
    LeNet_model.summary()
    LeNet_model.compile(loss=keras.losses.categorical_crossentropy,
                        optimizer='SGD')

    # train & test the LeNet model
    history = LeNet_model.fit(mnist_data.x_train,
                              mnist_data.y_train,
                              batch_size=batch_size,
                              epochs=epochs,
                              validation_split=0.2)
    score = LeNet_model.evaluate(mnist_data.y_train,
                                 mnist_data.y_test,
                                 batch_size=batch_size)
    print('Test Score: {}'.format(score))

    plot_loss(history)
    plt.show()
Esempio n. 11
0
def train(aug, train_X, train_Y, test_X, test_Y):
    print("[INFO] compiling model...")
    model = LeNet.build(width=WIDTH, height=HEIGHT, depth=3, classes=CLASS_NUM)
    opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    print("[INFO] training network...")
    _history = model.fit_generator(aug.flow(train_X,
                                            train_Y,
                                            batch_size=BATCH_SIZE),
                                   validation_data=(test_X, test_Y),
                                   steps_per_epoch=len(train_X),
                                   epochs=EPOCHS,
                                   verbose=1)
    #steps_per_epoch是每次迭代,需要迭代多少个batch_size,validation_data为test数据,直接做验证,不参与训练
    model.save("./save/model.h5")  #保存模型
    plt.style.use("ggplot")
    plt.figure()
    N = EPOCHS
    plt.plot(np.arange(0, N), _history.history["loss"], label="train_loss")
    plt.plot(np.arange(0, N), _history.history["val_loss"], label="val_loss")
    plt.plot(np.arange(0, N), _history.history["acc"], label="train_acc")
    plt.plot(np.arange(0, N), _history.history["val_acc"], label="val_acc")
    plt.title("loss and accuracy")
    plt.xlabel("epoch")
    plt.ylabel("loss/acc")
    plt.legend(loc="best")
    plt.savefig("./result/result.png")
    plt.show()
Esempio n. 12
0
def test_different_networks(amount):
    newmodel = LeNet.build(width=28,
                           height=28,
                           depth=1,
                           classes=num_classes,
                           weightsPath='output/lenet_weights_' + str(amount) +
                           'noise.hdf5')
    newmodel.compile(loss="categorical_crossentropy",
                     optimizer=opt,
                     metrics=["accuracy"])

    base_dir = os.path.join(os.path.dirname(__file__), '../datasets/')
    x_test_shapes = np.load(
        base_dir + 'random_shapes/20shapes/op0.4.npz')['arr_0'][5000:10000]
    x_test_bars = np.load(base_dir +
                          'bar_noise/op0.4.npz')['arr_0'][5000:10000]
    x_test_pixels = np.load(base_dir +
                            'random_pixels/op0.4.npz')['arr_0'][5000:10000]
    x_test_numbers = np.load(base_dir +
                             'number_noise/op0.4.npz')['arr_0'][5000:10000]
    x_test_extended = np.concatenate(
        (x_test, x_test_shapes, x_test_bars, x_test_pixels, x_test_numbers),
        axis=0)
    y_test_extended = np.concatenate(
        (y_test, y_test[5000:10000], y_test[5000:10000], y_test[5000:10000],
         y_test[5000:10000]))
    print(str(len(x_test_extended)))
    print(str(len(y_test_extended)))
    (loss, accuracy) = newmodel.evaluate(x_test_extended,
                                         y_test_extended,
                                         batch_size=batch_size,
                                         verbose=1)
    print("Percentage of noise added to the dataset" +
          str(amount /
              (amount + 10000) * 100) + ", accuracy: " + str(accuracy))
Esempio n. 13
0
class MNISTServer:
	def __init__(self):
		self.lenet = LeNet()

	def predict(self, image):
		preprocessed_image = np.array(image, dtype='float32')
		pmf = self.lenet.predict(preprocessed_image)
		return [Prediction(digit=digit, probability=probability) for digit, probability in pmf]
Esempio n. 14
0
def read_date(arg,
              weightsPath,
              numChannels=1,
              imgRows=28,
              imgCols=28,
              numClasses=10,
              filename='hwdates.jpg',
              folder=tmp_folder):
    outDt = ''
    #showim(formDt); #print(formDt.shape)
    model = LeNet.build(numChannels=1,
                        imgRows=28,
                        imgCols=28,
                        numClasses=10,
                        weightsPath=weightsPath)
    if type(arg) is np.ndarray:
        img = arg.copy()
        formDt = arg.copy()
    else:
        img = cv2.imread(arg)
        formDt = cv2.imread(arg, 0)
    ret, thresh = cv2.threshold(~formDt, 127, 255, 0)
    image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
                                                  cv2.CHAIN_APPROX_SIMPLE)
    (sorted_ctrs, boundingBoxes) = sort_contours(contours,
                                                 method="left-to-right")
    for i, c in enumerate(sorted_ctrs):
        tmp_img = np.zeros(formDt.shape, dtype=np.uint8)
        res = cv2.drawContours(tmp_img, [c], -1, 255, cv2.FILLED)
        tmp_img = np.bitwise_and(tmp_img, ~formDt)
        ret, inverted = cv2.threshold(tmp_img, 127, 255, cv2.THRESH_BINARY_INV)
        cnt = sorted_ctrs[i]
        x, y, w, h = cv2.boundingRect(cnt)
        cv2.rectangle(img, (x - 1, y - 1), (x + w + 1, y + h + 1), (0, 255, 0),
                      2)
        cropped = inverted[y:y + h, x:x + w]
        if (w < 15 and h < 15): continue
        cropped = cv2.bitwise_not(cropped)
        thresh = cv2.threshold(cropped, 0, 255,
                               cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
        kernel = np.ones((2, 2), np.uint8)
        gray_dilation = cv2.dilate(thresh, kernel, iterations=1)
        gray_erosion = cv2.erode(gray_dilation, kernel, iterations=1)
        gray_erosion = cv2.copyMakeBorder(gray_erosion,
                                          top=15,
                                          bottom=15,
                                          left=15,
                                          right=15,
                                          borderType=cv2.BORDER_CONSTANT,
                                          value=[0, 0, 0])
        the_img = cv2.resize(gray_erosion, (28, 28))
        the_img = np.reshape(the_img, (1, 28, 28, 1))
        probs = model.predict(the_img)
        prediction = probs.argmax(axis=1)
        outDt = outDt + str(prediction[0])
    cv2.imwrite(os.path.join(folder, filename), img)
    K.clear_session()
    return outDt[:2] + '-' + outDt[3:5] + '-' + outDt[6:]
Esempio n. 15
0
def LeNet_train(aug, trainX, trainY, testX, testY):
    # initialize the model
    weights_path = './save_weights/leNet.ckpt'
    model = LeNet.build(width=norm_size,
                        height=norm_size,
                        depth=3,
                        classes=CLASS_NUM)

    print("[INFO] compiling model...")
    checkpoint_path = './save_weights/leNet.ckpt'
    # model.load_weights(checkpoint_path)
    # if os.path.exists(checkpoint_path):
    #     model.load_weights(checkpoint_path)
    #     # 若成功加载前面保存的参数,输出下列信息
    #     print("checkpoint_loaded")

    opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
    model.compile(
        loss="categorical_crossentropy",
        optimizer=
        opt,  # TODO:任务是一个多分类问题,可以使用类别交叉熵(categorical_crossentropy)。但如果执行的分类任务仅有两类,那损失函数应更换为二进制交叉熵损失函数(binary cross-entropy)
        metrics=["accuracy"])

    # 断点续训
    checkpoint = ModelCheckpoint(checkpoint_path,
                                 monitor='acc',
                                 save_weights_only=True,
                                 verbose=1,
                                 save_best_only=True,
                                 period=1)

    # train the network
    print("[INFO] training network...")
    H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),
                            validation_data=(testX, testY),
                            validation_steps=12,
                            steps_per_epoch=len(trainX) // BS,
                            epochs=EPOCHS,
                            verbose=1,
                            callbacks=[checkpoint])

    # save the model to disk
    print("[INFO] serializing network...")
    # model.save("test_sign.model")

    # plot the training loss and accuracy
    plt.style.use("ggplot")
    plt.figure()
    N = EPOCHS
    plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
    plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
    plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc")
    plt.plot(np.arange(0, N), H.history["val_acc"], label="val_acc")
    plt.title("Training Loss and Accuracy on traffic-sign classifier")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend(loc="lower left")
    plt.savefig("plot.png")
Esempio n. 16
0
def main():
    batch_size = 128
    epoch = 15

    data = DATA()
    model = LeNet(data.input_shape, data.num_classes)

    hist = model.fit(data.x_train,
                     data.y_train,
                     batch_size=batch_size,
                     epochs=epoch,
                     validation_split=0.2)
    score = model.evaluate(data.x_test, data.y_test, batch_size=batch_size)

    print()
    print('Test Loss= ', score)

    plot_loss(hist)
    plt.show()
Esempio n. 17
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model-file',
                        required=True,
                        help='File path to a trained PyTorch model')
    parser.add_argument(
        '--target-digit',
        required=True,
        type=int,
        choices=range(10),
        help='The digit that we want to find the \"epitome\" for')
    parser.add_argument('--num-samples',
                        type=int,
                        default=10,
                        help=('The number of epitome samples to generate'))
    parser.add_argument(
        '--use-training-file',
        default=None,
        help=
        ('Start constructions with entries from the given training set, '
         'rather than from uniform noise. The format of the file should match: '
         'https://www.kaggle.com/c/digit-recognizer/data?select=train.csv'))
    args = parser.parse_args()

    model = LeNet()
    model.load_saved_state(args.model_file)

    starter_X = None
    if args.use_training_file:
        train_df = pd.read_csv(args.use_training_file)
        starter_df = train_df[train_df['label'] == args.target_digit]
        starter_X = starter_df.drop(
            columns=['label']).values[:args.num_samples]

    opt_inputs = model.optimize_for_digit(
        args.target_digit,
        num_samples=args.num_samples,
        starter_X=starter_X,
    )
    for i in range(opt_inputs.shape[0]):
        img = Image.fromarray(opt_inputs[i], 'L')
        img.save(f'digit_{args.target_digit}_{i}.png')
Esempio n. 18
0
def evaluate(net_file, model_file):
    """ main
    """
    #1, build model
    net_path = os.path.dirname(net_file)
    if net_path not in sys.path:
        sys.path.insert(0, net_path)

    from lenet import LeNet as MyNet

    #1, define network topology
    images = fluid.layers.data(name='image',
                               shape=[1, 28, 28],
                               dtype='float32')
    label = fluid.layers.data(name='label', shape=[1], dtype='int64')

    net = MyNet({'data': images})
    prediction = net.layers['prob']
    acc = fluid.layers.accuracy(input=prediction, label=label)

    place = fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

    #2, load weights
    if model_file.find('.npy') > 0:
        net.load(data_path=model_file, exe=exe, place=place)
    else:
        net.load(data_path=model_file, exe=exe)

    #3, test this model
    test_program = fluid.default_main_program().clone()
    test_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=128)

    feeder = fluid.DataFeeder(feed_list=[images, label], place=place)
    fetch_list = [acc, prediction]

    print('go to test model using test set')
    acc_val = test_model(exe, test_program, \
            fetch_list, test_reader, feeder)

    print('test accuracy is [%.4f], expected value[0.919]' % (acc_val))
Esempio n. 19
0
class MNISTServer:
    def __init__(self):
        self.lenet = LeNet()

    def predict(self, image):
        preprocessed_image = np.array(image, dtype='float32')
        pmf = self.lenet.predict(preprocessed_image)
        return [
            Prediction(digit=digit, probability=probability)
            for digit, probability in pmf
        ]
Esempio n. 20
0
def LeNet_train():
    # initializing the network
    network = LeNet(BATCH_SIZE)
    # load the data
    train_imgs, val_imgs, _, train_label, val_label, _ = util.load_data(
        MNIST_PATH)

    for epoch in range(MAX_EPOCH):
        eval_one_epoch(network, val_imgs, val_label)
        log_string('------------start train {}/{}----------'.format(
            epoch, MAX_EPOCH))
        train_one_epoch(network, train_imgs, train_label, epoch)
Esempio n. 21
0
    def torch_setup(self):
        self.data_train = MNIST('./data/mnist',
                            train=True,
                            download=True,
                            transform=self.compose)
                            
        self.data_test = MNIST('./data/mnist',
                            train=False,
                            download=True,
                            transform=self.compose)
        self.data_train_loader = DataLoader(self.data_train, batch_size=self.batch_size, shuffle=True, num_workers=4)
        self.data_test_loader = DataLoader(self.data_test, batch_size=self.batch_size, num_workers=4)
        self.criterion = nn.CrossEntropyLoss()
        self.net = LeNet()
        self.optimizer = getattr(optim, self.opt)(self.net.parameters(), lr=self.learning_rate)

        try:
            self.net.load_state_dict(load('model_params.pkl'))
            self.loaded = True
            print("Loaded")
        except Exception as e:
            print(e)
            self.loaded = False
            print("Not loaded")
Esempio n. 22
0
def test_image(filename, num_class, weights_path='Default'):
    img_string = tf.gfile.FastGFile(filename, 'rb').read()
    img_decoded = tf.image.decode_jpeg(img_string, channels=channels)
    img_resized = tf.image.resize_images(img_decoded, [image_size, image_size])
    img_reshape = tf.reshape(img_resized,
                             shape=[1, image_size, image_size, channels])

    model = LeNet(img_reshape, keep_prob, num_classes, batch_size, image_size,
                  channels)
    score = tf.nn.softmax(model.fc6)
    max = tf.argmax(score, 1)
    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, "./tmp/checkpoint/model_epoch10.ckpt")
        print(sess.run(model.fc6))
        prob = sess.run(max)[0]
        print("The number is %s" % (class_name[prob]))
Esempio n. 23
0
    def get_model(self):

        if self.exp_type == 'pnml_cifar10':
            model = load_pretrained_resnet20_cifar10_model(resnet20())
        elif self.exp_type == 'random_labels':
            model = WideResNet()
        elif self.exp_type == 'out_of_dist_svhn':
            model = load_pretrained_resnet20_cifar10_model(resnet20())
        elif self.exp_type == 'out_of_dist_noise':
            model = load_pretrained_resnet20_cifar10_model(resnet20())
        elif self.exp_type == 'pnml_mnist':
            model = Net()
        elif self.exp_type == 'adversarial':
            model = load_pretrained_resnet20_cifar10_model(resnet20())
        elif self.exp_type == 'pnml_cifar10_lenet':
            model = LeNet()  # VGG('VGG16')
        else:
            raise NameError('No experiment type: %s' % self.exp_type)

        return model
Esempio n. 24
0
def train_with_noise(amount):
    # initialize the model with the given parameters.
    newmodel = LeNet.build(
        width=28,
        height=28,
        depth=1,
        classes=num_classes,
        weightsPath=args["weights"] if args["load_model"] > 0 else None)

    # we use categorical_crossentropy as our loss function
    newmodel.compile(loss="categorical_crossentropy",
                     optimizer=opt,
                     metrics=["accuracy"])

    base_dir = os.path.join(os.path.dirname(__file__), '../datasets/')

    x_train_shapes = np.load(
        base_dir +
        'random_shapes/20shapes/op0.4.npz')['arr_0'][0:math.ceil(amount / 4)]
    x_train_bars = np.load(base_dir +
                           'bar_noise/op0.4.npz')['arr_0'][0:math.ceil(amount /
                                                                       4)]
    x_train_pixels = np.load(
        base_dir + 'random_pixels/op0.4.npz')['arr_0'][0:math.ceil(amount / 4)]
    x_train_numbers = np.load(
        base_dir + 'number_noise/op0.4.npz')['arr_0'][0:math.ceil(amount / 4)]
    x_train_extended = np.concatenate((x_train, x_train_shapes, x_train_bars,
                                       x_train_pixels, x_train_numbers),
                                      axis=0)
    y_train_extended = np.concatenate(
        (y_train, y_train[0:math.ceil(amount / 4)],
         y_train[0:math.ceil(amount / 4)], y_train[0:math.ceil(amount / 4)],
         y_train[0:math.ceil(amount / 4)]))
    print("[INFO] training with noise:" + str(amount))
    newmodel.fit(x_train_extended,
                 y_train_extended,
                 batch_size=batch_size,
                 epochs=epochs,
                 verbose=1)
    newmodel.save_weights('output/lenet_weights_' + str(amount) + 'noise.hdf5',
                          overwrite=True)
Esempio n. 25
0
def train(aug, trainX, trainY, testX, testY, args):
    # initialize the model
    print("[INFO] compiling model...")
    model = LeNet.build(width=norm_size,
                        height=norm_size,
                        depth=3,
                        classes=CLASS_NUM)
    opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
    #    opt = Adam(lr=INIT_LR)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    # train the network
    print("[INFO] training network...")
    H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),
                            validation_data=(testX, testY),
                            steps_per_epoch=len(trainX) // BS,
                            epochs=EPOCHS,
                            verbose=1)

    # save the model to disk
    print("[INFO] serializing network...")
    model.save(args["model"])

    # plot the training loss and accuracy
    plt.style.use("ggplot")
    plt.figure()
    N = EPOCHS
    plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
    plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
    plt.plot(np.arange(0, N), H.history["acc"], label="train_acc")
    plt.plot(np.arange(0, N), H.history["val_acc"], label="val_acc")
    plt.title("Training Loss and Accuracy on Invoice classifier")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend(loc="lower left")
    plt.savefig(args["plot"])
Esempio n. 26
0
def train(aug, trainX, trainY, testX, testY, args):
    # initialize the model
    print("[INFO] compiling model...")
    model = LeNet.build(width=NORM_SIZE,
                        height=NORM_SIZE,
                        depth=3,
                        classes=CLASS_NUM)
    opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    # train the network
    print("[INFO] training network...")
    H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),
                            validation_data=(testX, testY),
                            steps_per_epoch=len(trainX) // BS,
                            epochs=EPOCHS,
                            verbose=1)

    # save the model to disk
    print("[INFO] serializing network...")
    model.save(args["model"])
Esempio n. 27
0
	def __init__(self):
		self.lenet = LeNet()
Esempio n. 28
0
    data = data.reshape(data.shape[0], 1, 28, 28)
else:
    data = data.reshape(data.shape[0], 28, 28, 1)

(trainX, testX, trainY, testY) = train_test_split(data / 255.0,
                                                  dataset.target.astype("int"),
                                                  test_size=0.25,
                                                  random_state=42)

lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
testY = lb.transform(testY)

print("[INFO] compiling model...")
opt = SGD(lr=0.01)
model = LeNet.build(width=28, height=28, depth=1, classes=10)
model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=["accuracy"])

print("[INFO] training network...")
H = model.fit(trainX,
              trainY,
              validation_data=(testX, testY),
              batch_size=128,
              epochs=20,
              verbose=1)

print("[INFO] evaluating network...")
predictions = model.predict(testX, batch_size=128)
print(
Esempio n. 29
0
#!/usr/bin/env python3

import torch as th
import torchvision as tv
import torch.nn as nn
import torch.optim as optim

from torch.autograd import Variable as V
from torchvision import transforms

from lenet import LeNet
from sobolev import SobolevLoss

USE_SOBOLEV = False

student = LeNet()
teacher = LeNet()
teacher.load_state_dict(th.load('teacher.pth'))
student = student.cuda()
teacher = teacher.cuda()

transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
Esempio n. 30
0
def main():
    global args, rank, world_size, best_prec1, dataset_len

    if args.dist == 1:
        rank, world_size = dist_init()
    else:
        rank = 0
        world_size = 1

    model = LeNet()
    model.cuda()

    param_copy = [
        param.clone().type(torch.cuda.FloatTensor).detach()
        for param in model.parameters()
    ]

    for param in param_copy:
        param.requires_grad = True

    if args.dist == 1:
        model = DistModule(model)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss()

    optimizer = torch.optim.SGD(param_copy,
                                args.base_lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    last_iter = -1

    # Data loading code
    train_dataset = datasets.MNIST(root='./data',
                                   train=True,
                                   transform=transforms.ToTensor(),
                                   download=False)
    val_dataset = datasets.MNIST(root='./data',
                                 train=False,
                                 transform=transforms.ToTensor(),
                                 download=False)

    dataset_len = len(train_dataset)
    args.max_iter = math.ceil(
        (dataset_len * args.epoch) / (world_size * args.batch_size))

    if args.dist == 1:
        train_sampler = DistributedGivenIterationSampler(train_dataset,
                                                         args.max_iter,
                                                         args.batch_size,
                                                         last_iter=last_iter)
        val_sampler = DistributedSampler(val_dataset, round_up=False)
    else:
        train_sampler = DistributedGivenIterationSampler(train_dataset,
                                                         args.max_iter,
                                                         args.batch_size,
                                                         world_size=1,
                                                         rank=0,
                                                         last_iter=last_iter)
        val_sampler = None

    # pin_memory if true, will copy the tensor to cuda pinned memory
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=False,
                              num_workers=args.workers,
                              pin_memory=True,
                              sampler=train_sampler)

    val_loader = DataLoader(val_dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.workers,
                            pin_memory=True,
                            sampler=val_sampler)

    train(train_loader, val_loader, model, criterion, optimizer, param_copy)
Esempio n. 31
0
# dimensions of our images.
img_width, img_height = 150, 150
train_data_dir = os.path.join(args['dataset'], 'train')
validation_data_dir = os.path.join(args['dataset'], 'validation')
test_data_dir = os.path.join(args['dataset'], 'minitest')
nb_epoch = 100
nb_train_samples = 2000
nb_validation_samples = 800

class_labels = os.listdir(train_data_dir)
class_labels.sort()
clog.info('Classes: {}'.format(class_labels))

# Initialize the optimizer and model.
clog.info('Initializing model...')
model = LeNet.build(width=img_width, height=img_height, depth=3, num_classes=2,
    weights_path=args['weights_file'] if args['load_model'] > 0 else None)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop',
    metrics=['accuracy'])

# Only train and evaluate the model if we *are not* loading a
# pre-existing model.
if not args['load_model']:
  # this is the augmentation configuration we will use for training
  train_datagen = ImageDataGenerator(
    rescale=1./255,
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True)
Esempio n. 32
0
# convert the labels from integers to vectors
trainY = to_categorical(trainY, num_classes=6)
testY = to_categorical(testY, num_classes=6)

# construct the image generator for data augmentation
aug = ImageDataGenerator(rotation_range=30,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         shear_range=0.2,
                         zoom_range=0.2,
                         horizontal_flip=True,
                         fill_mode="nearest")

# initialize the model
print("[INFO] compiling model...")
model = LeNet.build(width=width, height=height, depth=3, classes=6)
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=["accuracy"])

# train the network
print("[INFO] training network...")
H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),
                        validation_data=(testX, testY),
                        steps_per_epoch=len(trainX) // BS,
                        epochs=EPOCHS,
                        verbose=1)

# save the model to disk
print("[INFO] serializing network...")
Esempio n. 33
0
# convert the labels from integers to vectors
trainY = to_categorical(trainY, num_classes=2)
testY = to_categorical(testY, num_classes=2)

# construct the image generator for data augmentation
aug = ImageDataGenerator(rotation_range=30,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         shear_range=0.2,
                         zoom_range=0.2,
                         horizontal_flip=True,
                         fill_mode="nearest")

# initialize the model
print("[INFO] compiling model...")
model = LeNet.build(width=image_size, height=image_size, depth=3, classes=2)
opt = Adam(lr=learning_rate, decay=learning_rate / epochs)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])

# train the network
print("[INFO] training network...")
H = model.fit_generator(aug.flow(trainX, trainY, batch_size=batch_size),
                        validation_data=(testX, testY),
                        steps_per_epoch=len(trainX) // batch_size,
                        epochs=epochs,
                        verbose=1)

# save the model to disk
print("[INFO] serializing network...")
model.save(args["model"])