Пример #1
0
 def __init__(self, params, dfs):
     assert 0 == len(dfs)
     super().__init__(params, dfs)
     (self.train_data, self.train_targets), (
         self.test_data,
         self.test_targets,
     ) = boston_housing.load_data()
Пример #2
0
    def _load_data(self):  # pragma: no cover
        (x_train, y_train), (x_test, y_test) = boston_housing.load_data()

        train = Dataset(x_train, y_train, self.x_type, self.y_type)
        test = Dataset(x_test, y_test, self.x_type, self.y_type)

        return train, test, None
Пример #3
0
 def initialize(self):
     this = self.storage
     (this.train_X,
      this.train_Y), (this.test_X,
                      this.test_Y) = boston_housing.load_data()
     print(f'확률변수 X의 길이 : {len(this.train_X)}')
     print(f'확률변수 Y의 길이 : {len(this.train_Y)}')
     print(f'확률변수 X[0] : {this.train_X[0]}')
     print(f'확률변수 Y[0] : {this.train_Y[0]}')
Пример #4
0
def read_data():
    (train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
    mean = train_data.mean(axis=0)
    train_data -= mean
    std = train_data.std(axis=0)
    train_data /= std
    test_data -= mean
    test_data /= std
    return (train_data, train_targets), (test_data, test_targets)
Пример #5
0
def load_data() -> Tuple[Tuple[Any, Any], Tuple[Any, Any]]:
    (train_data, train_targets), (test_data,
                                  test_targets) = boston_housing.load_data()

    mean = train_data.mean(axis=0)
    train_data -= mean

    std = train_data.std(axis=0)
    train_data /= std
    test_data -= mean
    test_data /= std

    return (train_data, train_targets), (test_data, test_targets)
Пример #6
0
def bostonhousing():
    from tensorflow.keras.datasets import boston_housing
    (x_train, y_train), (x_test, y_test) = boston_housing.load_data()

    model = Sequential()
    model.add(Dense(1, input_shape=(x_train.shape[1],), activation='linear'))
    model.compile(optimizer='adam', loss='mse', metrics=['mse', 'mae'])
    learner = ktrain.get_learner(model, train_data=(x_train, y_train), val_data=(x_test, y_test))
    learner.lr_find(max_epochs=5) # use max_epochs until TF 2.4
    hist = learner.fit(0.05, 8, cycle_len=1, cycle_mult=2)
    learner.view_top_losses(n=5)
    learner.validate()
    return hist
Пример #7
0
    def _init_frame(self):
        # 处理数据集
        (self.x_train,self.y_train),(self.x_test,self.y_test) = boston_housing.load_data()

        # 对数据大于或小于23进行二分
        def to_binary_class(y):
            for i,label in enumerate(y):
                if label >=2.3:
                    y[i] = 1
                else:
                    y[i] = 0
            return y
        self.y_train_bin = to_binary_class(copy.deepcopy(self.y_train))
        self.y_test_bin = to_binary_class(copy.deepcopy(self.y_test))
Пример #8
0
def load_boston_housing():
    (Xtrain, Ytrain), (Xtest, Ytest) = boston_housing.load_data()
    Ytrain = Ytrain.reshape(-1, 1)
    Ytest = Ytest.reshape(-1, 1)

    X_mean, X_std = Xtrain.mean(axis=0), Xtrain.std(axis=0)

    Xtrain = (Xtrain - X_mean) / X_std
    Xtest = (Xtest - X_mean) / X_std

    Y_mean, Y_std = Ytrain.mean(axis=0), Ytrain.std(axis=0)

    Ytrain = (Ytrain - Y_mean) / Y_std
    Ytest = (Ytest - Y_mean) / Y_std
    return (Xtrain, Ytrain), (Xtest, Ytest), (X_mean, X_std), (Y_mean, Y_std)
Пример #9
0
 def __init__(self, epochs_count=100):
     (self.train_data,
      self.train_targets), (self.test_data,
                            self.test_targets) = boston_housing.load_data()
     self.mean = self.train_data.mean(axis=0)
     self.train_data -= self.mean
     self.std = self.train_data.std(axis=0)
     self.train_data /= self.std
     self.model = self.build_model()
     self.test_data -= self.mean
     self.test_data /= self.std
     self.k = 10
     self.num_val_samples = len(self.train_data) // self.k
     self.num_epochs = epochs_count
     self.all_scores = []
Пример #10
0
def test_regression():
    (x_train, y_train), (x_test, y_test) = boston_housing.load_data()

    supervision_metric = 'mae'
    ivis_boston = Ivis(k=15,
                       batch_size=16,
                       epochs=2,
                       supervision_metric=supervision_metric)
    ivis_boston.fit(x_train, y_train)

    embeddings = ivis_boston.transform(x_train)
    y_pred = ivis_boston.score_samples(x_train)

    loss_name = ivis_boston.model_.loss['supervised'].__name__
    assert losses.get(loss_name).__name__ == losses.get(
        supervision_metric).__name__
    assert ivis_boston.model_.layers[-1].activation.__name__ == 'linear'
    assert ivis_boston.model_.layers[-1].output_shape[-1] == 1
Пример #11
0
def train():
    (x_train, y_train), (x_test, y_test) = boston_housing.load_data()
    mean = x_train.mean(axis=0)
    std = x_train.mean(axis=0)
    x_train = x_train - mean
    x_test = x_test - mean
    x_train = x_train / std
    x_test = x_test / std
    new_model = build_sequential_model([x_train.shape[1], 128, 1], 'adam',
                                       'mse', ['mae'], None)
    print(new_model.summary())
    new_model.fit(x_train,
                  y_train,
                  epochs=100,
                  validation_split=0.05,
                  verbose=1)
    mse, mae = new_model.evaluate(x_test, y_test, verbose=0)
    print(f'MSE on test: {mse}, MAE: {mae}')
    new_model.save('boston_housing.h5')
    return new_model
Пример #12
0
 def __init__(self, validation_size: float = 0.33) -> None:
     # User-definen constants
     self.num_targets = 1
     # Load the data set
     (x_train, y_train), (x_test, y_test) = boston_housing.load_data()
     # Split the dataset
     x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=validation_size)
     # Preprocess x data
     self.x_train = x_train.astype(np.float32)
     self.x_test = x_test.astype(np.float32)
     self.x_val = x_val.astype(np.float32)
     # Preprocess y data
     self.y_train = np.reshape(y_train, (-1, self.num_targets)).astype(np.float32)
     self.y_test = np.reshape(y_test, (-1, self.num_targets)).astype(np.float32)
     self.y_val = np.reshape(y_val, (-1, self.num_targets)).astype(np.float32)
     # Dataset attributes
     self.train_size = self.x_train.shape[0]
     self.test_size = self.x_test.shape[0]
     self.num_features = self.x_train.shape[1]
     self.num_targets = self.y_train.shape[1]
Пример #13
0
def Train_model(model, epochs, json_file):

    #telegram callback
    telegram_token = "TOKEN"
    telegram_user_id = None
    bot = DLBot(token=telegram_token, user_id=telegram_user_id)
    telegram_callback = TelegramBotCallback(bot)

    #loading config
    with open(json_file, 'r') as f:
        config = json.load(f)
    category = config['category']

    #for classification
    if category == 1:
        (X_train, y_train), (X_test, y_test) = mnist.load_data()
        X_train = tf.keras.utils.normalize(X_train, axis=1)
        X_test = tf.keras.utils.normalize(X_test, axis=1)

        model.fit(X_train,
                  y_train,
                  epochs=epochs,
                  validation_data=(X_test, y_test),
                  verbose=1)

        score = model.evaluate(X_test, y_test, verbose=0)
        bot.send_message('Test loss:' + str(score[0]))
        bot.send_message('Test accuracy:' + str(score[1]))

    #for regression
    else:
        (X_train, y_train), (X_test, y_test) = boston_housing.load_data()

        model.fit(
            X_train,
            y_train,
            epochs=3,
            validation_data=(X_test, y_test),
            verbose=1)  #score = model.evaluate(X_test, y_test, verbose=0)
        bot.send_message('Test loss:' + str(score[0]))
        bot.send_message('Test accuracy:' + str(score[1]))
Пример #14
0
 def __init__(self, validation_size: float = 0.33) -> None:
     # User-definen constants
     self.num_targets = 1
     self.batch_size = 128
     # Load the data set
     (x_train, y_train), (x_test, y_test) = boston_housing.load_data()
     # Split the dataset
     x_train, x_val, y_train, y_val = train_test_split(
         x_train, y_train, test_size=validation_size)
     # Preprocess x data
     self.x_train = x_train.astype(np.float32)
     self.x_test = x_test.astype(np.float32)
     self.x_val = x_val.astype(np.float32)
     # Preprocess y data
     self.y_train = np.reshape(y_train,
                               (-1, self.num_targets)).astype(np.float32)
     self.y_test = np.reshape(y_test,
                              (-1, self.num_targets)).astype(np.float32)
     self.y_val = np.reshape(y_val,
                             (-1, self.num_targets)).astype(np.float32)
     # Dataset attributes
     self.train_size = self.x_train.shape[0]
     self.test_size = self.x_test.shape[0]
     self.num_features = self.x_train.shape[1]
     self.num_targets = self.y_train.shape[1]
     # Normalization variables
     self.normalization_layer = Normalization()
     self.normalization_layer.adapt(self.x_train)
     # tf.data Datasets
     self.train_dataset = tf.data.Dataset.from_tensor_slices(
         (self.x_train, self.y_train))
     self.test_dataset = tf.data.Dataset.from_tensor_slices(
         (self.x_test, self.y_test))
     self.val_dataset = tf.data.Dataset.from_tensor_slices(
         (self.x_val, self.y_val))
     # Dataset preparation
     self.train_dataset = self._prepare_dataset(self.train_dataset,
                                                shuffle=True)
     self.test_dataset = self._prepare_dataset(self.test_dataset)
     self.val_dataset = self._prepare_dataset(self.val_dataset)
Пример #15
0
 def __init__(self):
     # Load the data set
     (self.x_train,
      self.y_train), (self.x_test,
                      self.y_test) = boston_housing.load_data()
     self.x_train_ = None
     self.x_val = None
     self.y_train_ = None
     self.y_val = None
     # Convert to float32
     self.x_train = self.x_train.astype(np.float32)
     self.y_train = self.y_train.astype(np.float32)
     self.x_test = self.x_test.astype(np.float32)
     self.y_test = self.y_test.astype(np.float32)
     # Save important data attributes as variables
     self.train_size = self.x_train.shape[0]
     self.test_size = self.x_test.shape[0]
     self.train_splitted_size = 0
     self.val_size = 0
     self.num_targets = 1  # Eindimensionale Outputdaten
     self.num_features = self.x_train.shape[1]
     # Addtional class attributes
     self.scaler = None
def save_boston_housing():
    OUT_DIR = 'boston_housing'
    os.makedirs(OUT_DIR, exist_ok=True)

    # Load data from keras API
    (x_train, y_train), (x_test, y_test) = boston_housing.load_data()

    # define feature names from https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html
    feature_list = [
        'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX',
        'PTRATIO', 'B', 'LSTAT', 'MEDV'
    ]

    # convert train data
    with open(os.path.join(OUT_DIR, 'train_data.csv'),
              mode='w',
              encoding='utf-8') as f:
        # write header
        f.write(','.join(feature_list) + '\n')
        # write feature data
        for x_data, y_data in zip(x_train, y_train):
            f.write(','.join(map(str, np.append(x_data, y_data))) + '\n')

    # convert test data
    with open(os.path.join(OUT_DIR, 'test_data.csv'),
              mode='w',
              encoding='utf-8') as f:
        # write header
        f.write(','.join(feature_list) + '\n')
        # write feature data
        for x_data, y_data in zip(x_test, y_test):
            f.write(','.join(map(str, np.append(x_data, y_data))) + '\n')

    print()
    print('Saved to ' + OUT_DIR + '/')
    print()
Пример #17
0
4# 2개의 파일을 만드시오
# 1. EarlySropping을 적용하지 않은 최고의 모델
# 2. EarlySropping을 적용한 최고의 모델
import numpy as np
from tensorflow.keras.datasets import boston_housing
from sklearn.preprocessing import MinMaxScaler
#데이터 사용방식 찾아야함.
#이걸로 만들기
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Input

#1. 데이터
(x_train, y_train), (x_test, y_test) = boston_housing.load_data() #이미 나누어져서 나옴(?)
print(x_train.shape) #(404, 13)
print(x_test.shape) # (102, 13)
print(y_train.shape) #(404,   )
print(y_test.shape) # (102,)

#1_2. 데이터 전처리(MinMaxScalar)
#ex 0~711 = 최댓값으로 나눈다  0~711/711
# X - 최소값 / 최대값 - 최소값
print("===================")
print(x_train[:5]) # 0~4
print(y_train[:10]) 
print(np.max(x_train), np.min(x_train)) # max값 min값
#print(dataset.feature_names)
#print(dataset.DESCR) #묘사
scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
Пример #18
0
# 1. EarlyStopping을 적용한 최고의 모델 만들것
# (단, 사용 모듈: from tensorflow.keras.datasets import boston_housing)
# Tip: sklearn과 제공한 데이터와 비슷하지만 x와 y로 나누지 않음

#1. data
import numpy as np
from tensorflow.keras.datasets import boston_housing
(x_train, y_train), (x_test, y_test) = boston_housing.load_data(test_split=0.2,
                                                                seed=113)
# load_data() 함수를 통해 훈련 데이터와 테스트 데이터로 나누게 된다

from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)  # 전처리

#2. model
from tensorflow.keras import Model
from tensorflow.keras.layers import Input, Dense

input1 = Input(shape=(13, ))
dense1 = Dense(128, activation='relu')(input1)
dense1 = Dense(128, activation='relu')(dense1)
dense1 = Dense(128, activation='relu')(dense1)
dense1 = Dense(64, activation='relu')(dense1)
dense1 = Dense(64, activation='relu')(dense1)
dense1 = Dense(64, activation='relu')(dense1)
output1 = Dense(1)(dense1)
model = Model(inputs=input1, outputs=output1)
Пример #19
0
from tensorflow.keras.datasets import boston_housing
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Dense
from sklearn import preprocessing
# load the data
(x_train, y_train), (x_test, y_test) = boston_housing.load_data()

# preprocess the data
x_train_scaled = preprocessing.scale(x_train)
scaler = preprocessing.StandardScaler().fit(x_train)
# x_test_scaled = preprocessing.StandardScaler.transform(x_test)

model = Sequential()
model.add(Dense(64, activation='relu', kernel_initializer='normal', input_shape=(13, )))
model.add(Dense(64, activation='relu'))
model.add(Dense(1))

model.compile(optimizer='rmsprop', loss='mse', metrics=['mean_absolute_error'])

model.fit(x_train_scaled, y_train, epochs=200, batch_size=128, callbacks=[EarlyStopping(monitor='loss', patience=20)])
Пример #20
0
    epochs = range(1, len(mae) + 1)
    plt.plot(epochs,
             avg_val_mae_,
             'b',
             label='Validation AVG MAE',
             color="indigo")
    plt.title('Validation AVG MAE')
    plt.xlabel('Epochs')
    plt.ylabel('VAL AVG MAE')
    plt.legend()
    plt.show()


#1-загрузка данных
(train_data, train_targets), (test_data,
                              test_targets) = boston_housing.load_data()

mean = train_data.mean(axis=0)  # среднее значение
std = train_data.std(axis=0)  # стандартное отклонение

train_data -= mean
train_data /= std

test_data -= mean
test_data /= std

# перекрестная проверка по К блокам
k = 4
num_val_samples = len(train_data) // k
num_epochs = 50
all_scores = []  # массив оценок
Пример #21
0
#!/usr/bin/env python
# encoding: utf-8
"""
@author: HuRuiFeng
@file: 3-6-predicting-house-prices.py
@time: 2020/4/9 16:51
@project: deep-learning-with-python-notebooks
@desc: 3.6 预测房价: 回归问题
"""

from tensorflow.keras import models, layers
from tensorflow.keras.datasets import boston_housing

# 加载数据
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()

# 数据预处理:归一化
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std

test_data -= mean
test_data /= std


# 模型构建
def build_model():
    # Because we will need to instantiate
    # the same model multiple times,
    # we use a function to construct it.
Пример #22
0
4  # 2개의 파일을 만드시오
# 1. EarlySropping을 적용하지 않은 최고의 모델
# 2. EarlySropping을 적용한 최고의 모델
import numpy as np
from tensorflow.keras.datasets import boston_housing
from sklearn.preprocessing import MinMaxScaler
#데이터 사용방식 찾아야함.
#이걸로 만들기
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Input

#1. 데이터
(x_train, y_train), (x_test,
                     y_test) = boston_housing.load_data()  #이미 나누어져서 나옴(?)
print(x_train.shape)  #(404, 13)
print(x_test.shape)  # (102, 13)
print(y_train.shape)  #(404,   )
print(y_test.shape)  # (102,)

#1_2. 데이터 전처리(MinMaxScalar)
#ex 0~711 = 최댓값으로 나눈다  0~711/711
# X - 최소값 / 최대값 - 최소값
print("===================")
print(x_train[:5])  # 0~4
print(y_train[:10])
print(np.max(x_train), np.min(x_train))  # max값 min값
#print(dataset.feature_names)
#print(dataset.DESCR) #묘사
scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
Пример #23
0
import tensorflow as tf
from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten
from tensorflow.keras.datasets.boston_housing import load_data
from tensorflow.keras.models import Sequential

EPOCHS = 100
BATCH_SIZE = 32

(X_train, y_train), (X_test, y_test) = load_data(test_split=0.2)

mean = X_train.mean(axis=0)
X_train -= mean
std = X_train.std(axis=0)
X_train /= std

X_test -= mean
X_test /= std

# X_train = tf.keras.utils.normalize(X_train)
# X_test = tf.keras.utils.normalize(X_test)

num_layers = [1, 2, 3]
num_neurons = [16, 32, 64]
dropouts = [0.0, 0.2, 0.5]
best_layer_amt = 0
best_neuron_amt = 0
best_dropout = 0
best_mae = 10000

for i in num_layers:
    for k in num_neurons:
Пример #24
0
import tensorflow as tf
from tensorflow import keras

from tensorflow.keras.datasets import boston_housing
(train_data, train_label), (test_data, test_label) = boston_housing.load_data()
train_data, test_data = train_data / 1000, test_data / 1000

model = keras.Sequential([
    keras.layers.Dense(13),
    keras.layers.Dense(8, activation=tf.nn.relu),
    keras.layers.Dropout(0.5),
    keras.layers.Dense(4, activation=tf.nn.relu),
    keras.layers.Dropout(0.5),
    keras.layers.Dense(1, activation=tf.nn.softmax),
])

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

history = model.fit(train_data,
                    train_label,
                    batch_size=4,
                    epochs=int(input("Epochs: ")),
                    validation_split=0.1)
score = model.evaluate(test_data, test_label, batch_size=4)
predictions = model.predict(test_data)

plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
Пример #25
0
import matplotlib.pyplot as plt
import tensorflow.keras as k
from tensorflow.keras.datasets import boston_housing

(train_X, train_Y), (test_X, test_Y) = boston_housing.load_data()

print(len(train_X), len(test_X))
print(train_X[0])
print(train_Y[0])

x_mean = train_X.mean()
x_std = train_X.std()
train_X -= x_mean
train_X /= x_std
test_X -= x_mean
test_X /= x_std

y_mean = train_Y.mean()
y_std = train_Y.std()
train_Y -= y_mean
train_Y /= y_std
test_Y -= y_mean
test_Y /= y_std

print(train_X[0])
print(train_Y[0])

model = k.Sequential([
    k.layers.Dense(units=52, activation='relu', input_shape=(13, )),
    k.layers.Dense(units=39, activation='relu'),
    k.layers.Dense(units=26, activation='relu'),
Пример #26
0
import tensorflow as tf
import pandas as pd
from tensorflow.keras.datasets import boston_housing
import matplotlib.pyplot as plt

print(tf.__version__)

(trainX, trainY), (testX, testY) = boston_housing.load_data()

print(len(trainX))
print(len(trainY))

print(trainX[0])
print(trainY[0])

meanX = trainX.mean()
stdX = trainX.std()

trainX -= meanX
trainX /= stdX

testX -= meanX
testX /= stdX

meanY = trainY.mean()
stdY = trainY.std()

trainY -= meanY
trainY /= stdY

testY -= meanY
    plt.savefig(savepath)
    plt.show()
    plt.clf()


def standardize(train_data):

    mean = train_data.mean(axis=0)
    train_data -= mean
    std = train_data.std(axis=0)
    standardized_data = train_data / std

    return standardized_data


(train_data, train_labels) = boston_housing.load_data()[0]


train_data = standardize(train_data)

k = 4
num_val_samples = len(train_data) // k
num_epochs = 20
all_scores_mae = []
all_scores_mse = []

all_scores_Vmae = []
all_scores_Vmse = []

hidden_layers = [64, 64, 1]
network = build_model(hidden_layers)
import numpy as np
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.datasets import boston_housing
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from sklearn.metrics import r2_score

# fix random seed for reproducibility
seed = 5
np.random.seed(seed)

# Load data
(train_x, train_y), (test_x, test_y) = boston_housing.load_data()
print('The data shape of boston housing:')
print('train_x: ', train_x.shape)
print('test_x: ', test_x.shape)
print('train_y: ', train_y.shape)
print('test_y: ', test_y.shape)

# process the data (Standardization)
from sklearn import preprocessing

scaler = preprocessing.StandardScaler()
train_x = scaler.fit_transform(train_x)
test_x = scaler.fit_transform(test_x)

# build the model
activation_func = 'relu'
Пример #29
0
#alpha-2

#パッケージのインポート
from tensorflow.keras.datasets import boston_housing
from tensorflow.keras.layers import Activation, Dense, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import Adam
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

#データセットの準備
(train_data, train_labels), (test_data,
                             test_labels) = boston_housing.load_data()

#データセットのシェイプの確認
print(train_data.shape)
print(train_labels.shape)
print(test_data.shape)
print(test_labels.shape)

#データセットのデータの確認
# column_names = ['CRIN', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PIRATIO', 'B', 'LSTAT']
# df = pd.DataFrame(train_data, columns=column_names)
# df.head()

#データセットのラベルの確認
print(train_labels[0:10])

#データセットのシャッフルの前確認
Пример #30
0
def Train_model(model, json_file):

    epochs = 3

    #telegram callback
    telegram_token = "TOKEN"
    telegram_user_id = None
    bot = DLBot(token=telegram_token, user_id=telegram_user_id)
    telegram_callback = TelegramBotCallback(bot)

    #loading config
    with open(json_file, 'r') as f:
        config = json.load(f)
    category = config['category']

    #for classification
    if category == 1:
        (X_train, y_train), (X_test, y_test) = mnist.load_data()
        X_train = tf.keras.utils.normalize(X_train, axis=1)
        X_test = tf.keras.utils.normalize(X_test, axis=1)

        model.fit(X_train,
                  y_train,
                  epochs=epochs,
                  validation_data=(X_test, y_test),
                  verbose=1)

        score = model.evaluate(X_test, y_test, verbose=0)
        bot.send_message('Test loss:' + str(score[0]))
        bot.send_message('Test accuracy:' + str(score[1]))

    #for regression
    elif category == 2:
        (X_train, y_train), (X_test, y_test) = boston_housing.load_data()

        model.fit(
            X_train,
            y_train,
            epochs=3,
            validation_data=(X_test, y_test),
            verbose=1)  #score = model.evaluate(X_test, y_test, verbose=0)
        bot.send_message('Test loss:' + str(score[0]))
        bot.send_message('Test accuracy:' + str(score[1]))

    elif category == 3:
        num_classes = 10
        (X_train, y_train), (X_test, y_test) = mnist.load_data()
        X_train = X_train[:1000]
        y_train = y_train[:1000]
        X_test = X_test[:200]
        y_test = y_test[:200]
        img_rows, img_cols = 28, 28
        X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
        X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
        X_train = X_train.astype('float32')
        X_test = X_test.astype('float32')
        X_train /= 255
        X_test /= 255

        y_train = tf.keras.utils.to_categorical(y_train, num_classes)
        y_test = tf.keras.utils.to_categorical(y_test, num_classes)
        #print(X_train.shape)
        model.fit(X_train,
                  y_train,
                  epochs=3,
                  batch_size=32,
                  validation_data=(X_test, y_test),
                  verbose=1)