Пример #1
0
def main():
    x_train, y_train = load_mnist()
    x_test, y_test = load_mnist(kind='t10k')
    # normalize
    x_train = (x_train / 255).astype(np.float32)
    x_test = (x_test / 255).astype(np.float32)

    # hyper parameters
    model_prefix = 'weights/LDA'
    if not os.path.isdir(model_prefix):
        os.makedirs(model_prefix)

    weights = []
    biases = []
    # train each single digit
    for j in range(10):
        print('Training for digit %d' % j)
        weight_file = os.path.join(model_prefix, '%d.npy' % j)
        # deal with soft label
        target_digit = j
        label_test = (y_test == target_digit).astype(np.uint8)
        weight = cal_LDA_weight(target_digit, x_train, y_train)
        bias = cal_LDA_bias(target_digit, x_train, y_train, weight, lamb=0.6)
        accuracy, recall = test(x_test, label_test, weight, bias)
        # print('acc', accuracy)
        print('rec', recall)

        weights.append(weight)
        biases.append(bias)
        np.save(weight_file, weight)

    final_accuracy = multi_test(x_test, y_test, weights, biases)
    print('Final accuracy:', final_accuracy)
def main():
    x_train, y_train = load_mnist()
    x_test, y_test = load_mnist(kind='t10k')
    # normalize
    x_train = (x_train / 255).astype(np.float32)
    x_test = (x_test / 255).astype(np.float32)

    # hyper parameters
    lr = 0.0001
    regular = 0.5
    epoch = 50
    print_freq = 10
    batch_size = 60000
    model_prefix = 'weights/logistic_regression_ridge_loss'
    if not os.path.isdir(model_prefix):
        os.makedirs(model_prefix)

    weights = []
    # train each single digit
    for j in range(10):
        print('Training for digit %d' % j)
        weight_file = os.path.join(model_prefix, '%d.npy' % j)
        weight = np.zeros(PIXELS)
        best_accuracy = 0
        best_weight = np.zeros(PIXELS)
        # deal with soft label, label is +1 or -1
        target_digit = j
        label_train = (y_train == target_digit).astype(np.int32)
        label_test = (y_test == target_digit).astype(np.int32)
        label_train = label_train * 2 - 1
        label_test = label_test * 2 - 1

        for i in range(epoch):
            for batch in range(TRAIN_SAMPLES // batch_size):
                weight = train(x_train[batch * batch_size:],
                               label_train[batch * batch_size:],
                               weight,
                               regular,
                               lr,
                               i,
                               num_samples=batch_size)
            accuracy = test(x_test, label_test, weight, i)
            if accuracy > best_accuracy:
                best_accuracy = accuracy
                best_weight = weight
            if i > 0 and i % print_freq == 0:
                print('Epoch[%d/%d]' % (i, epoch),
                      'current accuracy: %.4f' % accuracy)

        weights.append(best_weight)
        np.save(weight_file, best_weight)
        print('Finish training for digit %d, best accuracy %.4f\n' %
              (j, best_accuracy))

    final_accuracy = multi_test(x_test, y_test, weights)
    print('Final accuracy:', final_accuracy)
def main():
    x_train, y_train = load_mnist()
    x_test, y_test = load_mnist(kind='t10k')
    # normalize
    x_train = (x_train / 255).astype(np.float32)
    x_test = (x_test / 255).astype(np.float32)

    # hyper parameters
    lr = 0.0001
    epoch = 50
    print_freq = 10
    model_prefix = 'weights/logistic_regression'
    if not os.path.isdir(model_prefix):
        os.makedirs(model_prefix)

    weights = []
    # train each single digit
    for j in range(10):
        print('Training for digit %d' % j)
        weight_file = os.path.join(model_prefix, '%d.npy' % j)
        weight = np.zeros(PIXELS)
        best_accuracy = 0
        best_weight = np.zeros(PIXELS)
        # deal with soft label
        target_digit = j
        label_train = (y_train == target_digit).astype(np.uint8)
        label_test = (y_test == target_digit).astype(np.uint8)

        for i in range(epoch):
            weight = train(x_train, label_train, weight, lr, i)
            accuracy = test(x_test, label_test, weight, i)
            if accuracy > best_accuracy:
                best_accuracy = accuracy
                best_weight = weight
            if i > 0 and i % print_freq == 0:
                print('Epoch[%d/%d]' % (i, epoch),
                      'current accuracy: %.4f' % accuracy)

        weights.append(best_weight)
        np.save(weight_file, best_weight)
        print('Finish training for digit %d, best accuracy %.4f\n' %
              (j, best_accuracy))

    final_accuracy = multi_test(x_test, y_test, weights)
    print('Final accuracy:', final_accuracy)
Пример #4
0
 def __init__(self, cfg, phase):
     self.num_category = 10
     self.cfg = cfg
     self.phase = phase  # train or test
     if phase == 'test':
         phase = 't10k'
     self.images, self.labels = load_mnist(kind=phase)
     self.resolution = cfg.resolution
     self.transforms = transforms.ToTensor()
     self.db = self._get_db()
     logger.info('=> Loading {} images from {}'.format(
         self.phase, self.cfg.root))
     logger.info('=> num_images: {}'.format(len(self.db['x'])))
            if not category_id == target_id:
                continue
            raw_image = self.input[i]
            for j in range(self.fmaps.shape[1]):
                fmap = self.fmaps[i][j]
                grad = self.grads[category_id][j]
                gcam += fmap * grad

            gcam -= gcam.min()
            if (gcam.max() != 0):
                gcam /= gcam.max()
            gcam = cv2.resize(gcam,
                              (self.init_image_size, self.init_image_size))
            self.save(gcam, raw_image, i, category_id)
        print('Group %d finish!' % self.group)


if __name__ == "__main__":
    x, y = load_mnist(kind='t10k')

    x_4 = x[y == 4]
    y_4 = y[y == 4]
    x_9 = x[y == 9]
    y_9 = y[y == 9]

    x_group = np.concatenate((x_4, x_9), axis=0)
    y_group = np.concatenate((y_4, y_9), axis=0)

    # print(y_group)
    tSNE(x_group, y_group.astype(np.uint8), [4, 9])
import numpy as np
from MnistData import load_mnist
import matplotlib.pyplot as plt
import random
import copy

imgs, _ = load_mnist()
imgs = imgs[:15]

fig, ax = plt.subplots(
    nrows=3,
    ncols=5,
    sharex=True,
    sharey=True,
)

ax = ax.flatten()
for k in range(15):
    img = imgs[k].reshape(28, 28, 1)
    buffer = []
    block_num = 4
    block_size = 28 // block_num
    for i in range(block_num):
        for j in range(block_num):
            block = copy.deepcopy(img[i * block_size:(i + 1) * block_size,
                                      j * block_size:(j + 1) * block_size])
            buffer.append(block)
    random.shuffle(buffer)
    # img = np.zeros((28,28))
    for i in range(block_num):
        for j in range(block_num):
Пример #7
0
from sklearn import svm
import numpy as np
from MnistData import load_mnist

x_train, y_train = load_mnist()
x_test, y_test = load_mnist(kind='t10k')
train_num = 6000
test_num = 1000

predictor = svm.SVC(gamma='scale',
                    C=1.0,
                    decision_function_shape='ovr',
                    kernel='sigmoid')
predictor.fit(x_train[:train_num], y_train[:train_num])
result = predictor.predict(x_test[:test_num])
accurancy = np.sum(np.equal(result, y_test[:test_num])) / test_num
print(accurancy)