Пример #1
0
import var5
import numpy as np
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Convolution2D, MaxPooling2D, Dense, Flatten
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder


# Подготовка данных
data, labels = var5.gen_data()  # генерация данных
sz = data.shape[0]
img_size = data.shape[1]
labels = labels.reshape(sz)
encoder = LabelEncoder()  # кодирование меток
encoder.fit(labels)
labels = encoder.transform(labels).reshape(sz, 1)
data = data.reshape(sz, img_size**2)
data = np.hstack((data, labels))
rng = np.random.default_rng()
rng.shuffle(data)  # перемешивание данных
labels = data[:, -1].reshape(sz, 1)
data = data[:, :data.shape[1]-1].reshape(sz, img_size, img_size, 1)
data /= np.max(data)  # нормализация
tr_sz = int(sz*0.9 // 1)  # разбиение на тренировочное и тестовое множества
train_data = data[:tr_sz, :]
train_labels = labels[:tr_sz, :]
test_data = data[tr_sz:, :]
test_labels = labels[tr_sz:, :]

# Задание параметров
batch_size = 10
Пример #2
0
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from sklearn.preprocessing import LabelEncoder
import numpy as np
import matplotlib.pyplot as plt
from var5 import gen_data


def build_model():

    return model


size = 3000
test_count = size // 5
data, labels = gen_data(size)
labels = np.asarray(labels).flatten()

encoder = LabelEncoder()
encoder.fit(labels)
labels = encoder.transform(labels)

temp = list(zip(data, labels))
np.random.shuffle(temp)
data, labels = zip(*temp)
data = np.asarray(data).reshape(size, 50, 50, 1)
labels = np.asarray(labels).flatten()

test_data = data[:test_count]
test_labels = labels[:test_count]
train_data = data[test_count:size]
Пример #3
0
import var5

batch_size = 32
num_epochs = 200
kernel_size = 4
pool_size = 2
conv_depth_1 = 32
conv_depth_2 = 64
drop_prob_1 = 0.25
drop_prob_2 = 0.5
hidden_size = 512

size = 200
img_size = 50

data, labels = var5.gen_data(size, img_size)
labels.reshape(labels.size)
num_classes = np.unique(labels).shape[0]
le.fit(np.unique(labels))
labels = le.transform(labels)
labels = utils.to_categorical(labels, num_classes)  # One-hot encode the labels

inp = Input(shape=(img_size, img_size, 1))

# Conv [32] -> Conv [32] -> Pool (with dropout on the pooling layer)
layer = Convolution2D(conv_depth_1, (kernel_size, kernel_size),
                      padding='same',
                      activation='relu')(inp)
layer = Convolution2D(conv_depth_1, (kernel_size, kernel_size),
                      padding='same',
                      activation='relu')(layer)
Пример #4
0
from sklearn.utils import shuffle
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D, Dense, Dropout, Flatten
import numpy as np
from matplotlib import pyplot as plt
from var5 import gen_data


[data, labels] = gen_data(1000)
[data, labels] = shuffle(data, labels)

data /= np.max(data)

encoder = LabelEncoder()
encoder.fit(labels.ravel())
labels = encoder.transform(labels.ravel())


data_len, height, width = data.shape

validation_ratio = 0.2
test_ratio = 0.2

train_data = data[: int(data_len*(1 - validation_ratio - test_ratio))]
train_labels = labels[: int(data_len*(1 - validation_ratio - test_ratio))]

validation_data = data[int(data_len*(1 - validation_ratio - test_ratio)): int(data_len*(1 - test_ratio))]
validation_labels = labels[int(data_len*(1 - validation_ratio - test_ratio)): int(data_len*(1 - test_ratio))]

test_data = data[int(data_len*(1 - test_ratio)):]
Пример #5
0
            self.table["ind"].append(min_acc_ind)
            self.table["class"].append(int(min_acc_class))
            self.table["acc"].append(min_acc)
            self.table["loss"].append(min_acc_loss)

    def on_train_end(self, logs=None):
        df = pd.DataFrame(data=self.table)
        df.to_csv("LowestAccuracyLog.csv")


def accuracy(y_true, y_pred):
    return 1 - abs(y_pred - y_true)


# Подготовка данных
data, labels = var5.gen_data()
sz = data.shape[0]
img_size = data.shape[1]
labels = labels.reshape(sz)
encoder = LabelEncoder()
encoder.fit(labels)
labels = encoder.transform(labels).reshape(sz, 1)
data = data.reshape(sz, img_size**2)
data = np.hstack((data, labels))
rng = np.random.default_rng()
rng.shuffle(data)
labels = data[:, -1].reshape(sz, 1)
data = data[:, :data.shape[1] - 1].reshape(sz, img_size, img_size, 1)
data /= np.max(data)
tr_sz = int(sz * 0.9)
train_data = data[:tr_sz, :]
Пример #6
0
# Подключение модулей
import numpy as np  # Предоставляет общие математические и числовые операции
from sklearn.preprocessing import LabelEncoder  # Для преобразования категорий в понятные модели числовые данные
from tensorflow.keras.models import Sequential  # Для создания простых моделей используют Sequential,
# При использовании Sequential достаточно добавить несколько своих слоев
from sklearn.model_selection import train_test_split  # Деление выборки на тренировочную и тестовую часть
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D

from var5 import gen_data

# Загрузка данных
size = 13000
dataset, labels = gen_data(
    size)  # dataset - входные данные, labels - выходные данные
dataset = np.asarray(dataset)
labels = np.asarray(labels)

dataset_train, dataset_test, labels_train, labels_test = train_test_split(
    dataset, labels, test_size=0.2)
dataset_train = dataset_train.reshape(dataset_train.shape[0], 50, 50, 1)
dataset_test = dataset_test.reshape(dataset_test.shape[0], 50, 50, 1)

dataset_train = dataset_train.astype('float32')
dataset_test = dataset_test.astype('float32')
# Нормализация
dataset_train /= 255
dataset_test /= 255

# Переход от текстовых меток к категориальному вектору (Вектор -> матрица)
encoder = LabelEncoder()
encoder.fit(