Example #1
0
def load_fashion_mnist_idx(data_type):
    import mnist_reader
    data_dir = 'datasets/fmnist/'
    if data_type == "train":
        X, y = mnist_reader.load_mnist(data_dir, kind='train')
    elif data_type == "test" or data_type == "val":
        X, y = mnist_reader.load_mnist(data_dir, kind='t10k')
        if data_type == "test":
            X = X[:4000,:]
            y = y[:4000]
        else:
            X = X[4000:,:]
            y = y[4000:]
    X = X.reshape((X.shape[0],28,28,1))        
    X = X.astype(np.float)
    y = y.astype(np.int)
    
    idxUse = np.arange(0,y.shape[0])
    seed = 547
    np.random.seed(seed)
    np.random.shuffle(X)
    np.random.seed(seed)
    np.random.shuffle(y)
    np.random.seed(seed)
    np.random.shuffle(idxUse)
    return X/255.,y,idxUse
Example #2
0
def load_fashion(data_dir):

    X_train, y_train = mnist_reader.load_mnist(data_dir, kind='train')
    X_test, y_test = mnist_reader.load_mnist(data_dir, kind='t10k')

    train_images = []
    test_images = []
    train_labels = []
    test_labels = []

    for t, l in zip(X_train, y_train):
        label = np.zeros((10))
        label[l] = 1
        train_labels.append(label)
        t = normalize(t)
        train_images.append(np.reshape(t, (28, 28, 1)))
    for t, l in zip(X_test, y_test):
        label = np.zeros((10))
        label[l] = 1
        test_labels.append(label)
        t = normalize(t)
        test_images.append(np.reshape(t, (28, 28, 1)))

    return np.asarray(train_images), np.asarray(train_labels), np.asarray(
        test_images), np.asarray(test_labels)
Example #3
0
def main():
    X_test, y_test = mnist_reader.load_mnist('data/fashion', kind='t10k')
    X_train, y_train = mnist_reader.load_mnist('data/fashion', kind='train')
    results = []
    start = time.time()
    for j in range(2):
        if j ==0:
            X_train, X_test = reduce_dimensionality_LDA()
        if j ==1:
            X_train, X_test, n = reduce_dimensionality_PCA()
        end1 = time.time()
        print('Dimensionality reduced to: ', X_train.shape[1])
        Reduction_time = end1 - start
            
        start = time.time()
        params = ['linear']
        train_accuracy, test_accuracy= compute_svm(X_train, y_train, X_test, y_test, params)
        end = time.time()
        Computation_time = end - start
        results.append([j ,params, train_accuracy*100, test_accuracy*100, Reduction_time, Computation_time]) 
    import tabulate
    import csv
    myFile = open('csvexample_linear.csv', 'w')
    with myFile:
       writer = csv.writer(myFile)
       writer.writerows(results)
       print(tabulate.tabulate(results))
Example #4
0
def main():
    file = 'out.txt'
    train_data, train_labels = mnist_reader.load_mnist('data/fashion', kind='train')
    train_data = 5 * (train_data / 255 - 0.5) / 0.5
    test_data, test_labels = mnist_reader.load_mnist('data/fashion', kind='t10k')
    test_data = 5 * (test_data / 255 - 0.5) / 0.5

    clf = SVM(train_data, train_labels, test_data, test_labels, file)
    clf.train('linear').test()
    clf.train('polynomial').test()
    clf.train('RBF').test()

    pca = PCA(n_components=100)
    pca.fit(train_data)
    pca_train_data = pca.transform(train_data)
    pca_test_data = pca.transform(test_data)
    clf = SVM(pca_train_data, train_labels, pca_test_data, test_labels, file)
    clf.train('linear').test()
    clf.train('polynomial').test()
    clf.train('RBF').test()

    lda = LDA(n_components=9)
    lda.fit(train_data, train_labels)
    lda_train_data = lda.transform(train_data)
    lda_test_data = lda.transform(test_data)
    clf = SVM(lda_train_data, train_labels, lda_test_data, test_labels, file)
    clf.train('linear').test()
    clf.train('polynomial').test()
    clf.train('RBF').test()
def logisticRegressionCustom(layer):
    try:
        layer = int(layer)
    except Exception as e:
        print(e)
    if (layer > 3 or layer < 1):
        print("Invalid Layer Number")
        sys.exit(0)
    from sklearn.linear_model import LogisticRegression
    from sklearn.model_selection import train_test_split
    import mnist_reader
    X_train, y_train = mnist_reader.load_mnist('../data/fashion', kind='train')
    X_train, X_valid, y_train, y_valid = train_test_split(X_train,
                                                          y_train,
                                                          test_size=(1 / 6),
                                                          random_state=48)
    X_test, y_test = mnist_reader.load_mnist('../data/fashion', kind='t10k')
    # Normalize input
    X_train = -0.5 + (X_train / 255.0)
    X_valid = -0.5 + (X_valid / 255.0)
    X_test = -0.5 + (X_test / 255.0)
    # Logits
    tf.compat.v1.reset_default_graph()
    saver = tf.compat.v1.train.import_meta_graph("weight/model.ckpt.meta")
    x = tf.compat.v1.placeholder(tf.float64, [None, 784])
    y_ = tf.compat.v1.placeholder(tf.float64, [None, 10])
    with tf.compat.v1.Session() as sess:
        saver.restore(sess, tf.train.latest_checkpoint("weight/"))
        graph = tf.compat.v1.get_default_graph()
        w1 = graph.get_tensor_by_name("w1:0")
        b1 = graph.get_tensor_by_name("b1:0")
        w2 = graph.get_tensor_by_name("w2:0")
        b2 = graph.get_tensor_by_name("b2:0")
        w3 = graph.get_tensor_by_name("w3:0")
        b3 = graph.get_tensor_by_name("b3:0")
        w4 = graph.get_tensor_by_name("w4:0")
        b4 = graph.get_tensor_by_name("b4:0")
        h1 = tf.add(tf.matmul(x, w1), b1)
        h1 = reluActivation(h1)
        h2 = tf.add(tf.matmul(h1, w2), b2)
        h2 = reluActivation(h2)
        h3 = tf.add(tf.matmul(h2, w3), b3)
        h3 = reluActivation(h3)
        if (layer == 1):
            logisticTrain = sess.run(h1, feed_dict={x: X_train})
            logisticTest = sess.run(h1, feed_dict={x: X_test})
        elif (layer == 2):
            logisticTrain = sess.run(h2, feed_dict={x: X_train})
            logisticTest = sess.run(h2, feed_dict={x: X_test})
        elif (layer == 3):
            logisticTrain = sess.run(h3, feed_dict={x: X_train})
            logisticTest = sess.run(h3, feed_dict={x: X_test})
        logisticregression = LogisticRegression(max_iter=1000)
        logisticregression.fit(logisticTrain, y_train)
        test_prediction = logisticregression.predict(logisticTest)
        print("Logistic Regression Output")
        print('Using layer', layer, 'as input')
        print("*" * 20)
        print('Accuracy of logistic regression classifier on test set:',
              logisticregression.score(logisticTest, y_test) * 100, "%")
Example #6
0
def load_data():
    X_train, y_train = mnist_reader.load_mnist('fashion', kind='train')
    X_test, y_test = mnist_reader.load_mnist('fashion', kind='t10k')
    data = {'Xval': X_test,
            'Xtrain': X_train,
            'yval': y_test,
            'ytrain': y_train}
    return data
Example #7
0
def load_data():
    base_dir = os.getcwd()
    path = os.path.join(base_dir, "fashion")
    print(path)
    x_train, y_train = mnist_reader.load_mnist(
        path, kind='train')  # x,images; y,labels
    x_test, y_test = mnist_reader.load_mnist(path, kind='t10k')
    return x_train, y_train, x_test, y_test
Example #8
0
def runCNN(params):
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    for key in params:
        if 'Filter' in key or 'Kernel' in key:
            params[key] = int(params[key])

    num_classes = 10
    filepath = os.path.dirname(os.path.abspath(__file__))+'/data/fashion/'
    x_train, y_train = mnist_reader.load_mnist(filepath, kind='train')
    x_test, y_test = mnist_reader.load_mnist(filepath, kind='t10k')
    x_train = x_train.reshape((60000,28,28,1))
    x_test = x_test.reshape((10000,28,28,1))
    y_train = tf.keras.utils.to_categorical(y_train, num_classes=num_classes)
    y_test = tf.keras.utils.to_categorical(y_test, num_classes=num_classes)

    model = None
    tf.reset_default_graph()
    sess = tf.InteractiveSession()

    model = tf.keras.Sequential()
    model.add(Conv2D(params['layer1Filters'], params['layer1Kernel'], padding='same',
                    input_shape=x_train.shape[1:]))
    model.add(Activation('relu'))
    model.add(Conv2D(params['layer2Filters'], params['layer2Kernel']))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(params['firstDropout']))

    model.add(Conv2D(params['layer3Filters'], params['layer3Kernel'], padding='same'))
    model.add(Activation('relu'))
    model.add(Dropout(.5))
    model.add(Conv2D(params['layer4Filters'], params['layer4Kernel']))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(params['secondDropout']))

    model.add(Flatten())
    model.add(Dense(params['denseNodes']))
    model.add(Activation('relu'))
    model.add(Dropout(.3))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))

    optimizer = Adam(lr=params['learningRate'])

    model.compile(optimizer=optimizer,
                loss='categorical_crossentropy',
                metrics=['accuracy'])
    
    for i in range(100):
        model.fit(x=x_train, y=y_train, batch_size=params['batchSize'], epochs=1)
        score = model.evaluate(x_test, y_test, batch_size=128)
        print("Accuracy on Testing Data:",str(score[1]*100)+"%")

    print("Hyperparameters: "+ str(params))
    sess.close()
    return {'loss': score[0], 'status': STATUS_OK }
def main():
    x_train, y_train = mnist_reader.load_mnist("data/fashion", kind="train")
    x_train = prepare_data(x_train)
    x_train, y_train, x_validation, y_validation = segregate_data(
        x_train, y_train)

    config = {
        "initialGuess": np.ones((len(labels), x_train.shape[1])),
        "learningRate": 0.1,
        "iterations": 1000
    }
    print(
        f'Using learning rate = {config["learningRate"]} and {config["iterations"]} iterations'
    )

    start_time = time.time()

    y_hot_encoding = one_hot_encode(y_train, len(labels))

    # train
    mlr = MultinomialLogisticRegression(config)
    cost_iterations = mlr.fit(x_train, y_hot_encoding.T, "Multinomial",
                              cross_entropy)

    title = "Multinomial"
    file_name = "Multinomial"
    plot_cost(cost_iterations, title, file_name, config["iterations"],
              config["learningRate"])

    # predict validation
    print("Predicting for validation set...")
    predictions = (mlr.predict(x_validation, "Multinomial"))

    predicted_class = []
    for pred in predictions:
        ind = np.argmax(pred)
        predicted_class.append(ind)

    show_metrics(predicted_class, title, list(labels.keys()),
                 y_validation.tolist(), "ConfusionMatrixMultinomial")

    # predict test
    x_test, y_test = mnist_reader.load_mnist("data/fashion", kind="t10k")
    x_test = np.c_[np.ones(x_test.shape[0]), x_test]
    print("Predicting for test set...")
    predictions = (mlr.predict(x_test, "Multinomial"))

    predicted_class = []
    for pred in predictions:
        ind = np.argmax(pred)
        predicted_class.append(ind)

    show_metrics(predicted_class, title + " Test", list(labels.keys()),
                 y_test.tolist(), "ConfusionMatrixMultinomialTest")

    elapsed_time = time.time() - start_time
    print("Elapsed time: %1f s" % (elapsed_time))
Example #10
0
def load_mnist_fashion():
    x_train, y_train = mnist_reader.load_mnist('data/fashion', kind='train')
    x_test, y_test = mnist_reader.load_mnist('data/fashion', kind='t10k')
    # normalize our inputs to be in the range[-1, 1]
    x_train = (x_train.astype(np.float32) - 127.5) / 127.5
    # convert x_train with a shape of (n_imgs, height, width)
    # to (n_imgs, height*width)
    x_height_width = (28, 28)
    return (x_train, y_train, x_test, y_test), x_height_width
Example #11
0
def load_data_wrapper():
    x_train, y_train = mnist_reader.load_mnist('./data/fashion', kind='train')
    x_train_v = [np.reshape(x, (784, 1)) for x in x_train]
    y_train_v = [vectorized_result(y) for y in y_train]
    train_dataset = list(zip(x_train_v, y_train_v))
    x_test, y_test = mnist_reader.load_mnist('./data/fashion', kind='t10k')
    x_test_v = [np.reshape(x, (784, 1)) for x in x_test]
    #y_test_v = [vectorized_result(y) for y in y_test]
    test_dataset = list(zip(x_test_v, y_test))
    return train_dataset, test_dataset
Example #12
0
def model_fashion():
    path = './fashion-mnist/data/fashion'
    X_train, y_train = mnist_reader.load_mnist(path, kind='train')
    X_test, y_test = mnist_reader.load_mnist(path, kind='t10k')
    X_train = X_train.astype('float32').reshape(-1, 28, 28, 1)
    X_test = X_test.astype('float32').reshape(-1, 28, 28, 1)
    X_train /= 255
    X_test /= 255
    print('Train:{},Test:{}'.format(len(X_train), len(X_test)))
    nb_classes = 10
    y_train = np_utils.to_categorical(y_train, nb_classes)
    y_test = np_utils.to_categorical(y_test, nb_classes)
    print('data success')
    input_tensor = Input((28, 28, 1))
    #28*28
    temp = Conv2D(filters=6,
                  kernel_size=(5, 5),
                  padding='valid',
                  use_bias=False)(input_tensor)
    temp = Activation('relu')(temp)
    #24*24
    temp = MaxPooling2D(pool_size=(2, 2))(temp)
    #12*12
    temp = Conv2D(filters=16,
                  kernel_size=(5, 5),
                  padding='valid',
                  use_bias=False)(temp)
    temp = Activation('relu')(temp)
    #8*8
    temp = MaxPooling2D(pool_size=(2, 2))(temp)
    #4*4
    #1*1
    temp = Flatten()(temp)
    temp = Dense(120, activation='relu')(temp)
    temp = Dense(84, activation='relu')(temp)
    output = Dense(nb_classes, activation='softmax')(temp)
    model = Model(input=input_tensor, outputs=output)
    model.summary()
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    checkpoint = ModelCheckpoint(filepath='./model/model_fashion.hdf5',
                                 monitor='val_acc',
                                 mode='auto',
                                 save_best_only='True')
    model.fit(X_train,
              y_train,
              batch_size=64,
              nb_epoch=15,
              validation_data=(X_test, y_test),
              callbacks=[checkpoint])
    model = load_model('./model/model_fashion.hdf5')
    score = model.evaluate(X_test, y_test, verbose=0)
    print(score)
Example #13
0
def process_data():
    x_train, y_train = load_mnist('data', kind='train')
    x_test, y_test = load_mnist('data', kind='t10k')

    x_train, y_train = filter_data(x_train, y_train)
    x_test, y_test = filter_data(x_test, y_test)

    mid = int(len(x_train) / 2)
    return ((x_train, y_train), (x_test, y_test)), ((x_train[:mid],
                                                     y_train[:mid]), (x_test,
                                                                      y_test))
Example #14
0
    def __call__(self):
        x_train, y_train = load_mnist('data', kind='train')
        x_test, y_test = load_mnist('data', kind='t10k')

        x_train, y_train = self.__filter_data(x_train, y_train)
        x_test, y_test = self.__filter_data(x_test, y_test)

        mid = int(len(x_train) / 2)
        return ((x_train, y_train),
                (x_test, y_test)), ((x_train[:mid], y_train[:mid]), (x_test,
                                                                     y_test))
Example #15
0
def generate_fashion_sample(label,attack):
    path='./fashion-mnist/data/fashion'
    X_train, Y_train = mnist_reader.load_mnist(path, kind='train')
    X_test, Y_test = mnist_reader.load_mnist(path, kind='t10k')
    X_train = X_train.astype('float32').reshape(-1,28,28,1)
    X_test = X_test.astype('float32').reshape(-1,28,28,1)
    X_train /= 255
    X_test /= 255

    image_org=X_test[Y_test==label]
    adv=adv_func(image_org,label,model_path='./model/model_fashion.hdf5',dataset='mnist',attack=attack)
    return adv
Example #16
0
def main():
    X_test, y_test = mnist_reader.load_mnist('data/fashion', kind='t10k')
    X_train, y_train = mnist_reader.load_mnist('data/fashion', kind='train')
    start = time.time()
    covariance = [0.99, .95, .9, .8]
    X_train, X_test = reduce_dimensionality_LDA()
    #         X_train, X_test, n = reduce_dimensionality_PCA(var)
    print('Dimensionality reduced to: ', X_train.shape[1])
    accuracy = test(X_train, y_train, X_test, y_test)
    end = time.time()
    total_time = end - start
    print('time: ', total_time)
Example #17
0
def load_dataset():
    # loading the train and test variables
    X_train, y_train = mnist_reader.load_mnist(
        'fashion-mnist-master/data/fashion', kind='train')
    X_test, y_test = mnist_reader.load_mnist(
        'fashion-mnist-master/data/fashion', kind='t10k')

    print("X_train shape: ", X_train.shape)
    print("y_train shape: ", y_train.shape)
    print("X_test shape: ", X_test.shape)
    print("y_test shape: ", y_test.shape)

    return X_train, y_train, X_test, y_test
Example #18
0
File: svm.py Project: nserr/SENG474
def init():
    X_train, y_train = mnist_reader.load_mnist(
        'fashion-mnist-master/data/fashion', kind='train')
    X_test, y_test = mnist_reader.load_mnist(
        'fashion-mnist-master/data/fashion', kind='t10k')

    X_train = np.array(X_train)
    y_train = np.array(y_train)
    X_test = np.array(X_test)
    y_test = np.array(y_test)

    X_train_temp, y_train_temp = [], []
    X_test_temp, y_test_temp = [], []

    train_len = len(X_train)
    test_len = len(X_test)

    sandal_count = 0
    sneaker_count = 0

    # Convert training data to binary classification problem.
    for i in range(train_len):

        if y_train[i] == 5 and sandal_count < 1000:
            y_train_temp.append(0)
            X_train_temp.append(X_train[i])
            sandal_count += 1

        if y_train[i] == 7 and sneaker_count < 1000:
            y_train_temp.append(1)
            X_train_temp.append(X_train[i])
            sneaker_count += 1

    # Convert test data to binary classification problem.
    for i in range(test_len):

        if y_test[i] == 5:
            y_test_temp.append(0)
            X_test_temp.append(X_test[i])

        if y_test[i] == 7:
            y_test_temp.append(1)
            X_test_temp.append(X_test[i])

    X_train = np.array(X_train_temp) / 255
    y_train = y_train_temp
    X_test = np.array(X_test_temp) / 255
    y_test = y_test_temp

    return X_train, y_train, X_test, y_test
def load_data():
    x_train, y_train = mnist_reader.load_mnist('../fashion_mnist/data/fashion',
                                               kind='train')

    x_test, y_test = mnist_reader.load_mnist('../fashion_mnist/data/fashion',
                                             kind='t10k')

    print(f'x train shape: {x_train.shape}')
    print(f'y train shape: {y_train.shape} with labels {set(y_train)}')
    print(f'x test shape: {x_test.shape}')
    print(f'y test shape: {y_test.shape} with labels {set(y_test)}')
    print(f'number of x_train: {x_train.shape[0]} ')
    print(f'number of x_test: {x_test.shape[0]} ')

    return x_train, y_train, x_test, y_test
Example #20
0
    def load_data(self,
                  file_name='fashion-mnist/data/fashion',
                  datatype='train'):
        base_path = os.getcwd() + '/../'
        if datatype == 'train':
            x, y = mnist_reader.load_mnist(base_path + file_name, kind='train')
        elif datatype == 'test':
            x, y = mnist_reader.load_mnist(base_path + file_name, kind='t10k')
        else:
            print('datatype must be either \'train\' or \'test\'')
            exit(-1)

        if self._net_type == 'conv':
            x = x.reshape(-1, 28, 28, 1)
        y = keras.utils.to_categorical(y, num_classes=10)

        return x, y
def load_and_preprocess_data():

	X_train, y_train = mnist_reader.load_mnist('../data/fashion', kind='train')
	X_test, y_test = mnist_reader.load_mnist('../data/fashion', kind='t10k')

	# X_train = 1.0 * X_train / 255
	# X_test = 1.0 * X_test / 255

	# subtract the mean image
	mean_img = np.mean(X_train, axis=0)
	stddev_img = np.std(X_train, axis=0)

	# normalize the data
	X_train = (X_train - mean_img) / stddev_img
	X_test = (X_test - mean_img) / stddev_img

	return add_bias(X_train), y_train, add_bias(X_test), y_test
Example #22
0
def load_mnist(path, kind):
    (xs, ys) = mnist_reader.load_mnist(path, kind=kind)

    ts = []
    for i in range(len(xs)):
        if ys[i] > 1: continue
        y = 1 if ys[i] == 0 else -1
        ts.append((xs[i], y))

    return ts
Example #23
0
def load_dataset():
    # loading the train and test variables
    X_train, y_train = mnist_reader.load_mnist(
        'fashion-mnist-master/data/fashion', kind='train')
    X_test, y_test = mnist_reader.load_mnist(
        'fashion-mnist-master/data/fashion', kind='t10k')

    X_train = X_train.reshape((X_train.shape[0], 28, 28, 1))
    X_test = X_test.reshape((X_test.shape[0], 28, 28, 1))

    # one hot encoding on the y values
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    print("X_train shape: ", X_train.shape)
    print("y_train shape: ", y_train.shape)
    print("X_test shape: ", X_test.shape)
    print("y_test shape: ", y_test.shape)

    return X_train, y_train, X_test, y_test
Example #24
0
def generate_fashion_sample(label, ratio=0.1):
    path = '/home/qingkaishi/Diversity/Test-diversity/fashion-mnist/data/fashion'
    X_train, Y_train = mnist_reader.load_mnist(path, kind='train')
    X_test, Y_test = mnist_reader.load_mnist(path, kind='t10k')
    X_train = X_train.astype('float32').reshape(-1, 28, 28, 1)
    X_test = X_test.astype('float32').reshape(-1, 28, 28, 1)
    X_train /= 255
    X_test /= 255

    image_org = X_test[Y_test == label]

    choice_index = np.random.choice(range(len(image_org)),
                                    size=int(len(image_org) * ratio),
                                    replace=False)
    image_org = image_org[choice_index]

    adv = adv_example(image_org,
                      label,
                      model_path='./model/model_fashion.hdf5',
                      dataset='mnist')
    return image_org, adv
def Test(factNormalizar, neuronasOcultas, nombreModelo):
    #Constantes
    '''
	Factor por el que se divide para
	que no baje tan rapido la sigmoide
	'''
    NORMALIZAR = factNormalizar

    #Cantidad de neuronas en la
    #Capa oculta
    NEURONAS_OCULTAS = neuronasOcultas

    #Neuronas en la capa salida
    NEURONAS_SALIDA = 10

    #Lectura de datos
    print("Lectura de datos...")
    X_test, y_test = mnist_reader.load_mnist('./data/fashion', kind='t10k')

    #TEST

    modeloCarga = open(nombreModelo, 'rb')
    flat_thetas = pickle.load(modeloCarga)
    modeloCarga.close()

    X = X_test / NORMALIZAR

    m, n = X.shape

    #Y son las labels de cada imagen
    y = y_test.reshape(m, 1)

    Y = (y == np.array(range(10))).astype(int)

    #Se construye el modelo
    theta_shapes = np.array([[NEURONAS_OCULTAS, n + 1],
                             [NEURONAS_SALIDA, NEURONAS_OCULTAS + 1]])

    resultado = feed_forward(inflate_matrixes(flat_thetas, theta_shapes), X)

    prediccion = np.argmax(resultado[-1], axis=1).reshape(m, 1)

    correctos = ((prediccion == y) * 1).sum()
    incorrectos = len(y) - correctos
    print(nombreModelo)
    print("Correctas " + str(correctos))
    print("Incorrectos " + str(incorrectos))
    print("Exactitud " + str(correctos * 100 / float(len(y))))
    print()

    return y, prediccion
def main():
    x_train, y_train = mnist_reader.load_mnist("data/fashion", kind="train")
    x_train = prepare_data(x_train)
    x_train, y_train, x_validation, y_validation = segregate_data(
        x_train, y_train)

    config = {
        "initialGuess": np.ones(x_train.shape[1]),
        "learningRate": 0.01,
        "iterations": 1000
    }
    print(
        f'Using learning rate = {config["learningRate"]} and {config["iterations"]} iterations'
    )

    predictions = list()
    for label in labels:
        start_time = time.time()

        y_binary_train = toggle_class(y_train, label)
        y_binary_validation = toggle_class(y_validation, label)

        lr = LogisticRegression(config)
        cost_iterations = lr.fit(x_train, y_binary_train, labels[label],
                                 cost_function)

        predicted = lr.predict(x_validation, labels[label])
        predictions.append(predicted)

        title = "One Vs All " + labels[label]
        plot_cost(cost_iterations, title, title.replace(" ", ""),
                  config["iterations"], config["learningRate"])
        binarized_predicted = [1 if p >= 0.5 else 0 for p in predicted]
        show_metrics(binarized_predicted, title,
                     ["Not " + labels[label], labels[label]],
                     y_binary_validation,
                     "ConfusionMatrix" + labels[label].replace("/", "-"))

        elapsed_time = time.time() - start_time
        print("Elapsed time: %1f s" % (elapsed_time))

    print("Predicting for all validation set...")
    predictions = np.array(predictions)

    predicted_class = []
    for i in range(predictions.shape[1]):
        column = predictions[:, i]
        predicted_class.append(np.argmax(column))

    show_metrics(predicted_class, "One Vs All", list(labels.keys()),
                 y_validation, "ConfusionMatrixLogisticOneVsAll")
Example #27
0
def get_data(train_size, test_size):
    total_size = train_size + test_size
    if torch.cuda.is_available():
        #data, _ = mnist_reader.load_mnist('/content/', kind='train')
        data, _ = mnist_reader.load_mnist(dir_path + '/data/fashion',
                                          kind='train')
    else:
        data, _ = mnist_reader.load_mnist(dir_path + '/data/fashion',
                                          kind='train')
    data_demo = data[data.shape[0] - 1, :]
    data_demo = np.resize(data_demo, (28, 28))
    data_demo = (np.pad(data_demo,
                        ((2, 2), (2, 2)), 'constant') / 255).astype('float64')

    data = data[0:total_size, :]
    data = np.resize(data, (total_size, 28, 28))
    data = (np.pad(data, ((0, 0), (2, 2), (2, 2)), 'constant') / 255).astype(
        'float64')  # get to 32x32

    data_train = data[0:train_size]
    data_test = data[train_size:total_size]

    return (data_train, data_test, data_demo)
Example #28
0
def main():

    # Reading the dataset
    X_train, y_train = mnist_reader.load_mnist('../data/fashion', kind='train')
    X_test, y_test = mnist_reader.load_mnist('../data/fashion', kind='t10k')

    # Splitting into train and validation set
    X_train2, X_val, y_train2, y_val = train_test_split(X_train,
                                                        y_train,
                                                        test_size=1 / 6,
                                                        random_state=42)

    # Converting the image lables(numbers) into one hot vectors
    y_train_onehot = np.eye(10)[y_train]
    y_train2_onehot = np.eye(10)[y_train2]
    y_val_onehot = np.eye(10)[y_val]
    y_test_onehot = np.eye(10)[y_test]

    # Scaling the pixel values to prevent overflow
    X_train = X_train / 255
    X_train2 = X_train2 / 255
    X_val = X_val / 255
    X_test = X_test / 255

    if args.train:
        train(X_train2, y_train2_onehot, X_val, y_val_onehot)
    if args.test:
        test(X_test, y_test_onehot)
    if args.layer == '1':
        logistic(X_train, y_train, y_train_onehot, X_test, y_test,
                 y_test_onehot)
    if args.layer == '2':
        logistic(X_train, y_train, y_train_onehot, X_test, y_test,
                 y_test_onehot)
    if args.layer == '3':
        logistic(X_train, y_train, y_train_onehot, X_test, y_test,
                 y_test_onehot)
Example #29
0
def gen_data(use_adv=True,deepxplore=False):
    path='/home/qingkaishi/Diversity/Test-diversity/fashion-mnist/data/fashion'
    X_train, Y_train = mnist_reader.load_mnist(path, kind='train')
    X_test, Y_test = mnist_reader.load_mnist(path, kind='t10k')
    X_train = X_train.astype('float32').reshape(-1,28,28,1)
    X_test = X_test.astype('float32').reshape(-1,28,28,1)
    X_train /= 255
    X_test /= 255
    model_path='./model/model_fashion.hdf5'
    if use_adv:
        attack_lst=['fgsm','jsma','bim','cw']
        adv_image_all=[]
        adv_label_all=[]
        for attack in attack_lst:
            adv_image_all.append(np.load('./adv_image/{}_fashion_image.npy'.format(attack)))
            adv_label_all.append(np.load('./adv_image/{}_fashion_label.npy'.format(attack)))
        adv_image_all=np.concatenate(adv_image_all,axis=0)
        adv_label_all=np.concatenate(adv_label_all,axis=0)
        test=np.concatenate([X_test,adv_image_all],axis=0)
        true_test=np.concatenate([Y_test,adv_label_all],axis=0)
    else:
        test=X_test
        true_test=Y_test
    train=X_train
    model=load_model(model_path)
    pred_test_prob=model.predict(test)
    pred_test=np.argmax(pred_test_prob,axis=1)
    input=model.layers[0].output
    if not deepxplore:
        layers=[model.layers[2].output,model.layers[3].output,model.layers[5].output,model.layers[6].output,model.layers[8].output,model.layers[9].output,model.layers[10].output]
    else:
        layers=[model.layers[1].output,model.layers[3].output,model.layers[4].output,model.layers[8].output,model.layers[8].output,model.layers[9].output,model.layers[10].output]


    layers=list(zip(4*['conv']+3*['dense'],layers))

    return input,layers,test,train,pred_test,true_test,pred_test_prob
Example #30
0
 def loadData(self):
     print("started loading data")
     startTime = time.time()
     self.X_raw_train, self.y_train = mnist_reader.load_mnist(
         "/home/user/git-repos/fashion-mnist/data/fashion", kind="train")
     #self.X_raw_train = [[255,100,20,11,50], [223, 52, 12,255, 11]]
     #self.y_train = [1, 3]
     self.inputLen = len(self.X_raw_train[0])
     self.X_train = []
     for x in self.X_raw_train:
         new_column = []
         for y in x:
             new_column.append(y / 255)
         self.X_train.append(new_column)
     print("done")
     print("it took: ", time.time() - startTime)
Example #31
0
# In[ ]:


import numpy as np
import sys
import math
from helper import save_data,load_data
import mnist_reader


# In[ ]:



X_train, y_train = mnist_reader.load_mnist('data/fashion', kind='train')
X_test, y_test = mnist_reader.load_mnist('data/fashion', kind='t10k')


# In[ ]:


training_data=(X_train>127)
validating_data=(X_test>127)


# In[ ]:


def sigmoid(x):
    x=np.array(x)