コード例 #1
0
ファイル: mnist.py プロジェクト: ahborba/computacao_grafica
def main():
    pre_processing(['./train/','./val/'])
    x,y = load_data('./train/')
    k = NN(x,y,20)
    x = k.normalize(x)
    k.add_layer(50,tf.nn.relu)
    k.add_layer(50,tf.nn.relu)
    k.add_layer(10,tf.nn.softmax)
    h = k.train()
    x_val,y_val = load_data('./val/')
    predictions = [ np.argmax(yi) for yi in np.array(k.predict(x_val))]
    print(k.confusion_matrix(y_val,predictions))
    loss, acc = k.evaluate(x_val,y_val)
    print('loss: ',loss)
    print('acc: ',acc*100,'%')
    # Plot training & validation accuracy values
    plt.plot(h.history['acc'])
    # plt.plot(h2.history['acc'])
    plt.title('Model accuracy')
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()
コード例 #2
0
    def __call__(self):
        images = load_hdf5(self.images_path)
        labels = load_hdf5(self.labels_path)
        masks = load_hdf5(self.mask_path)

        images = pre_processing(images)
        if np.max(labels) > 1:
            labels = labels / 255.
        masks = masks / 255.


        if self.train_test == 'train':
            return self.extract_ordered(images, labels)

        if self.train_test == 'test':
            sub_images, sub_labels = self.extract_ordered(images, labels)
            return (sub_images, images, labels, masks)
コード例 #3
0
    def __call__(self):
        images = load_hdf5(self.images_path)
        labels = load_hdf5(self.labels_path)
        masks = load_hdf5(self.mask_path)

        assert(images.shape[1]==self.height and images.shape[2]==self.width)
        assert(labels.shape[1]==self.height and labels.shape[2]==self.width)
        assert(masks.shape[1]==self.height and masks.shape[2]==self.width)

        images = pre_processing(images)
        if np.max(labels) > 1:
            labels = labels / 255.
        masks = masks / 255.

        images, labels, masks = self.padding(images, labels, masks)

        print('images:', images.shape, images.dtype, np.min(images), np.max(images))
        print('labels:', labels.shape, labels.dtype, np.min(labels), np.max(labels))
        print('masks:', masks.shape, masks.dtype, np.min(masks), np.max(masks))

        return images, labels, masks
コード例 #4
0
ファイル: generator.py プロジェクト: peterrosetu/retina_unet
    def __call__(self):
        images = load_hdf5(self.images_path)
        labels = load_hdf5(self.labels_path)
        masks = load_hdf5(self.mask_path)

        images = pre_processing(images)
        if np.max(labels) > 1:
            labels = labels / 255.
        masks = masks / 255.

        #visualize(group_images(images, 4)).show()
        #visualize(group_images(labels, 4)).show()
        #visualize(group_images(masks, 4)).show()

        #print(images.shape, images.dtype, np.min(images), np.max(images))
        #print(labels.shape, labels.dtype, np.min(labels), np.max(labels))
        #print(masks.shape, masks.dtype, np.min(masks), np.max(masks))

        if self.train_test == 'train':
            return self.extract_ordered(images, labels)

        if self.train_test == 'test':
            sub_images, sub_labels = self.extract_ordered(images, labels)
            return (sub_images, images, labels, masks)
コード例 #5
0
import pre_process
import logistic_reg
import svm
import random_forest
output_file = "D:/data/output_1.csv"
pre_process.pre_processing("D:/data/Epi.pkl")

# Modoule selection

choice = input(
    "choose your modoule : 1.Logistic Regression  2.SVM  3.Random Forest :")
if choice == '1':
    logistic_reg.logistic_reg("D:/data/output_3.pkl")
elif choice == '2':
    svm.svm("D:/data/output_3.pkl")
elif choice == '3':
    random_forest.random_forest("D:/data/output_3.pkl")