test=generDataset(200)
x_test, y_test = split(test)
x_test, y_test=np.array(x_test), np.array(y_test)



#print(y_test.shape)



# model

model = Sequential()  # random architecture

#model.add(keras.layers.Flatten(input_shape=(20,)))  #useless after all
model.add(Dense(20))
#model.add(Dropout(0.2))   # prevent overfitting lol value might be a little bit high

model.add(Dense(160, activation='linear'))


model.add(Dense(80))


model.add(Dense(40, activation='linear'))


model.add(Dense(20, activation='linear'))
Esempio n. 2
0
    class_weights_dict = dict(enumerate(class_weights))

    return train_it, val_it, test_it, class_weights_dict


trn_lbls = pd.read_csv(
    '/home/ubuntu/Notebooks/Datasets/FERPlus_occ/train_label.csv')

# Whether to retrain the model or load a previously saved model.
retrain = False
cp_dir = '/home/ubuntu/Notebooks/Models/FER_VGG16_scratch_best.h5'
log_dir = '/home/ubuntu/Notebooks/Models/FER_VGG16_scratch_log.csv'

if retrain:
    VGGFace16_classes = 2622
    VGGFace16 = Sequential()

    #Block 1
    VGGFace16.add(
        layers.Conv2D(input_shape=(224, 224, 3),
                      filters=64,
                      kernel_size=(3, 3),
                      padding='same',
                      activation='relu',
                      name='conv1_1'))
    VGGFace16.add(
        layers.Conv2D(filters=64,
                      kernel_size=(3, 3),
                      padding='same',
                      activation='relu',
                      name='conv1_2'))
Esempio n. 3
0
    def train(self):
        model = Sequential()
        model.add(
            Conv2D(filters=96,
                   kernel_size=(11, 11),
                   strides=(4, 4),
                   activation='relu',
                   input_shape=(277, 277, 3)))
        model.add(BatchNormalization())
        model.add(MaxPool2D(pool_size=(3, 3), strides=(2, 2)))
        model.add(
            Conv2D(filters=256,
                   kernel_size=(5, 5),
                   strides=(1, 1),
                   activation='relu',
                   padding='same'))
        model.add(BatchNormalization())
        model.add(MaxPool2D(pool_size=(3, 3), strides=(2, 2)))
        model.add(
            Conv2D(filters=384,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   activation='relu',
                   padding='same'))
        model.add(BatchNormalization())
        model.add(
            Conv2D(filters=384,
                   kernel_size=(1, 1),
                   strides=(1, 1),
                   activation='relu',
                   padding='same'))
        model.add(BatchNormalization())
        model.add(
            Conv2D(filters=256,
                   kernel_size=(1, 1),
                   strides=(1, 1),
                   activation='relu',
                   padding='same'))
        model.add(BatchNormalization())
        model.add(MaxPool2D(pool_size=(3, 3), strides=(2, 2)))
        model.add(Flatten())
        model.add(Dense(4096, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(4096, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(self.train_gen.num_classes, activation='softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer=SGD(lr=0.001),
                      metrics=['accuracy'])
        model.summary()

        callbacks = [
            ModelCheckpoint(
                filepath="models/saved/AlexNet/model_epoch_{epoch}",
                save_best_only=True,
                monitor="val_accuracy",
                verbose=1)
        ]

        hist = model.fit(
            self.train_gen,
            steps_per_epoch=self.train_gen.samples // self.batch_size,
            validation_data=self.test_gen,
            validation_steps=self.test_gen.samples // self.batch_size,
            epochs=self.epochs,
            callbacks=callbacks)

        plt.plot(hist.history["accuracy"])
        plt.plot(hist.history['val_accuracy'])
        plt.plot(hist.history['loss'])
        plt.plot(hist.history['val_loss'])
        plt.title("models accuracy")
        plt.ylabel("Accuracy")
        plt.xlabel("Epoch")
        plt.legend(
            ["Accuracy", "Validation Accuracy", "loss", "Validation Loss"])
        plt.show()

        return model
# MAIN CODE ============================================================================================================
train_labels = to_categorical(train_labels)
val_labels = to_categorical(val_labels)
test_labels = to_categorical(test_labels)

es_callback = EarlyStopping(monitor="val_loss",
                            patience=10,
                            restore_best_weights=True,
                            verbose=1)
auc = tf.keras.metrics.AUC()

with tf.device('/CPU:0'):
    model = Sequential([
        Masking(mask_value=-1),
        GRU(timeseries.shape[2] * 2, input_shape=timeseries.shape[1:]
            ),  # shape = (max_time, feat_count) check
        Dense(10, activation=tf.keras.activations.relu),
        Dense(2),
        Softmax()
    ])

    model.compile(loss=tf.losses.categorical_crossentropy,
                  optimizer=optimizers.Adam(lr=0.001, amsgrad=True),
                  metrics=['acc', auc])

    history = model.fit(x=train_subjects,
                        y=train_labels,
                        epochs=MAX_EPOCHS,
                        batch_size=1,
                        validation_data=(val_subjects, val_labels),
                        shuffle=False,
                        callbacks=[es_callback],
Esempio n. 5
0
from sklearn.linear_model import LogisticRegression
from tensorflow.keras.layers import Activation, Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.regularizers import L2

data, label = make_blobs(n_samples=5000,
                         centers=2,
                         random_state=0,
                         cluster_std=0.6)
plt.scatter(data[:, 0], data[:, 1], c=label, s=50, cmap="autumn")

lr_skl = LogisticRegression()
lr_skl.fit(data, label)

lr_tf = Sequential()
lr_tf.add(
    Dense(
        1,
        use_bias=True,
        bias_initializer="zeros",
        activation="sigmoid",
        input_dim=data.shape[1],
        kernel_regularizer=L2(0.01),
    ))
lr_tf.compile(optimizer=Adam(0.5), loss="binary_crossentropy")
lr_tf.fit(data, label, epochs=40, batch_size=32)

ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
Esempio n. 6
0
    def fit(self, module=None):

        if not module:
            module = self.module

        intents = {}
        for intent in module.intents:
            if intent.patterns:
                intents[intent.name] = {"patterns": []}
                for pattern in intent.patterns:
                    intents[intent.name]['patterns'].append(pattern.text)

        garbage_training_intents = Intent().select().where(
            Intent.agent != module.id)
        intents['not_found'] = {"patterns": []}
        for intent in garbage_training_intents:
            if intent.patterns:
                for pattern in intent.patterns:
                    intents['not_found']['patterns'].append(pattern.text)

        vocabulary = []
        classes = []
        documents = []
        ignore_words = ['?']

        for intent_name in intents:
            intent = intents[intent_name]
            for pattern in intent['patterns']:
                w = nltk.word_tokenize(pattern)
                vocabulary.extend(w)
                documents.append((w, intent_name))
                if intent_name not in classes:
                    classes.append(intent_name)

        stemmer = LancasterStemmer()
        vocabulary = [
            stemmer.stem(w.lower()) for w in vocabulary
            if w not in ignore_words
        ]
        vocabulary = sorted(list(set(vocabulary)))

        classes = sorted(list(set(classes)))
        training = []
        output_empty = [0] * len(classes)

        for doc in documents:
            bag = []
            pattern_words = doc[0]
            pattern_words = [
                stemmer.stem(word.lower()) for word in pattern_words
            ]
            for word in vocabulary:
                bag.append(1) if word in pattern_words else bag.append(0)

            output_row = list(output_empty)
            output_row[classes.index(doc[1])] = 1
            training.append([bag, output_row])

        random.shuffle(training)
        training = np.array(training)
        train_x = list(training[:, 0])
        train_y = list(training[:, 1])

        tf_model = Sequential()
        tf_model.add(
            Dense(128, input_shape=(len(train_x[0]), ), activation='relu'))
        tf_model.add(Dropout(0.5))
        tf_model.add(Dense(64, activation='relu'))
        tf_model.add(Dropout(0.5))
        tf_model.add(Dense(len(train_y[0]), activation='softmax'))

        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
        tf_model.compile(loss='categorical_crossentropy',
                         optimizer=sgd,
                         metrics=['accuracy'])

        tf_model.fit(np.array(train_x),
                     np.array(train_y),
                     epochs=200,
                     batch_size=5,
                     verbose=1)

        save_model(tf_model, 'chat/' + module.name + '.h5', True)
        #converter = tf.lite.TFLiteConverter.from_keras_model_file('chat/model.h5')
        #tflite_model = converter.convert()
        #open("chat/model.tflite", "wb").write(tflite_model);

        with open("chat/" + module.name + ".pkl", "wb") as dataFile:
            pickle.dump(
                {
                    'vocabulary': vocabulary,
                    'classes': classes,
                    'train_x': train_x,
                    'train_y': train_y
                }, dataFile)
#     plt.title(y_train[i])
#     plt.imshow(x_train[i], cmap = 'binary')
#     plt.show()

x_train_reshaped = np.reshape(x_train, (60000, 784))
x_test_reshaped  = np.reshape(x_test,  (10000, 784))

epsilon = 1e-10
x_mean  = np.mean(x_train_reshaped)
x_std   = np.std(x_train_reshaped)
x_train_norm = (x_train_reshaped - x_mean)/(x_std + epsilon)
x_test_norm  = (x_test_reshaped  - x_mean)/(x_std + epsilon)

model = Sequential([
    Dense(128, activation = 'relu', input_shape = (784, )),
    Dense(128, activation = 'relu'),
    Dense( 10, activation = 'softmax')
])
model.compile(
    optimizer = 'sgd',
    loss      = 'categorical_crossentropy',
    metrics   = ['accuracy']
)
model.summary()

model.fit(x_train_norm, y_train_encoded, epochs = 5)
model.evaluate(x_test_norm, y_test_encoded)

preds = model.predict(x_test_norm)
print('Shape of preds:', preds.shape)
start_index = 0
Esempio n. 8
0
def train_nn(d, x, f, num_nodes, weights_init=None, summary=False, plot=False):

    # set hyperparameters
    lr_start = 1e-03
    lr_decay = .99954
    learning_rate = LearningRateScheduler(
        lambda epoch: lr_start * lr_decay**epoch)
    epochs = 10000
    batch_size = x.shape[0] if weights_init is not None else np.ceil(
        x.shape[0] / 100).astype(int)

    # initialize weights if A,B,C are provided
    if weights_init is not None:

        def A_init(shape, dtype=None):
            return weights_init[0].T

        def B_init(shape, dtype=None):
            return weights_init[1].reshape(-1)

        def C_init(shape, dtype=None):
            return weights_init[2]

    # define the model
    model = Sequential()
    if weights_init:
        model.add(Dense(num_nodes, input_shape=(d,), \
            kernel_initializer=A_init, bias_initializer=B_init))
        model.add(Activation('relu'))
        model.add(Dense(1, \
            kernel_initializer=C_init))
    else:
        model.add(Dense(num_nodes, input_shape=(d,), \
            kernel_initializer=tf.keras.initializers.TruncatedNormal(), \
            bias_initializer=tf.keras.initializers.Zeros()))
        model.add(Activation('relu'))
        model.add(Dense(1, \
            kernel_initializer=tf.keras.initializers.TruncatedNormal(), \
            bias_initializer=tf.keras.initializers.Zeros()))

    # compile the model
    model.compile(loss='mean_squared_error', \
        optimizer=tf.keras.optimizers.Adam(lr=lr_start))

    # display the model
    if summary:
        print()
        model.summary()

    # display network parameters
    if weights_init:
        label = 'GSN loss'
        print('\ntraining network using GSN initialization...')
    else:
        label = 'random loss'
        print('\ntraining network using random initialization...')
    print('network parameters: epochs = {:d}, batch size = {:d}'.format(
        epochs, batch_size))

    # train the model
    history = model.fit(x, f, batch_size=batch_size, epochs=epochs, \
                        verbose=0, callbacks=[learning_rate])

    # save and plot the training process
    fig = plt.figure(figsize=(8, 3))
    plt.semilogy(history.history['loss'], label=label)
    plt.legend(loc='upper right')
    # save the figure
    name = time.strftime('%Y-%m-%d %H.%M.%S', time.localtime())
    #plt.savefig('./images/{:s}.png'.format(name), format='png')
    #plt.savefig('./images/{:s}.pdf'.format(name), format='pdf')
    # plot the figure
    if plot:
        plt.show()
    else:
        plt.close()

    # return trained weights
    A = model.layers[0].get_weights()[0].T
    B = model.layers[0].get_weights()[1].reshape((-1, 1))
    C = model.layers[-1].get_weights()[0].reshape((-1, 1))
    c = model.layers[-1].get_weights()[1]

    # evaluate the model
    model.evaluate(x, f, batch_size=x.shape[0])

    return A, B, C, c
Esempio n. 9
0
# for i in scaled_train_samples:
#     print(i)

#%% Simple tf.keras Sequential Model

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation, Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import categorical_crossentropy

#%% Set up model

model = Sequential([
    Dense(units=16, input_shape=(1, ), activation='relu'),
    # Dense(units=8, activation='relu'),
    Dense(units=32, activation='relu'),
    # Dense(units=2048, activation='relu'),
    Dense(units=2, activation='softmax')
])

model.summary()

model.compile(optimizer=Adam(learning_rate=0.0001),
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

import datetime
import os
# log_dir = os.getcwd() + "\\logs\\fit\\"
# log_dir = "logs\fit"
# os.makedirs(log_dir)
Esempio n. 10
0
cut_labels_fare = [x for x in range(1, 12)]
cut_bins_fare = [x for x in range(0, 600, 50)]
df_new_columns['fare_'] = pd.cut(df_new_columns['fare'],
                                 bins=cut_bins_fare,
                                 labels=cut_labels_fare)
df_new_columns.drop(['fare'], axis=1, inplace=True)
df_new_columns.rename(columns={'fare_': 'fare'}, inplace=True)

# Разбиваем данные на учебную и тестовую выборку
X = df_new_columns.drop(['survived'], axis=1)
y = df_new_columns['survived']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)

# Нейронная сеть
classifier = Sequential()
classifier.add(
    Dense(units=32,
          kernel_initializer='uniform',
          activation='relu',
          input_dim=6))
# classifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu'))
# classifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))
classifier.add(
    Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
classifier.compile(optimizer='rmsprop',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])

# Выполняем обучение на тренировочных данных
classifier.fit(X_train, y_train, batch_size=128, epochs=100)
Esempio n. 11
0
    (train_data, test_data, train_labels,
     test_labels) = train_test_split(data, labels, test_size=0.2)

    # train_data = data
    # test_data = data
    # train_labels = labels
    # test_labels = labels

    label_lb = LabelBinarizer()
    label_lb.fit(tags)
    train_labels = label_lb.transform(train_labels)
    test_labels = label_lb.transform(test_labels)

    model = Sequential([
        Dense(128, input_shape=(len(train_data[0]), ), activation='relu'),
        Dense(64, activation='relu'),
        Dropout(0.5),
        Dense(len(train_labels[0]), activation='softmax')
    ])

    model.compile(optimizer='adam',
                  loss=CategoricalCrossentropy(),
                  metrics=['accuracy'])

    time_start = time.time()
    history = model.fit(train_data,
                        train_labels,
                        epochs=30,
                        batch_size=5,
                        validation_data=(test_data, test_labels),
                        verbose=2)
    time_end = time.time()
BATCH_SIZE = 128
EPOCH = 250
#unité pour 1 parcours de dataset


### Cell 4 ###
VGG_CONVS = VGG16(weights='imagenet', include_top=False, input_shape=INPUT_SHAPE)
for layer in VGG_CONVS.layers:
    layer.trainable= False


### Cell 5 ###
model = Sequential([
                    VGG_CONVS,
                    Flatten(),
                    Dense(1024, activation='relu'),
                    Dropout(0.5),
                    Dense(10, activation='softmax')
                    ])


### Cell 6 ###
model.summary()

print(Y_train[2])


### Cell 7 ###
labels = [
          'airplane',
          'automobile',
    def build_model(self, hp):
        """
            Model is responsible of building an RNN model with the decided parameters
        :param hp: Keras Tuner Parameters
        :return: Sequential Recurrent Neural Network Model
        """
        self.log_event('    -> Creating a model.')
        model = Sequential()

        model.add(self.add_mask_layer())

        # Add One LSTM Layer with Batch Normalization
        model.add(
            LSTM(units=hp.Int(
                'first_layer',
                min_value=self.hyper_parameters['lstm_units']['min'],
                max_value=self.hyper_parameters['lstm_units']['max'],
                step=self.hyper_parameters['lstm_units']['step']),
                 return_sequences=True,
                 dropout=self.hyper_parameters['lstm_layer_dropout'],
                 recurrent_dropout=0.1,
                 activation=self.hyper_parameters['lstm_layer_activation']))

        model.add(BatchNormalization())

        # Add Dropout
        model.add(
            Dropout(
                hp.Choice('dropout_one',
                          values=self.hyper_parameters['dropout'])))

        # Add the second LSTM Layer with Batch Normalization
        model.add(
            LSTM(units=hp.Int(
                'second_layer',
                min_value=self.hyper_parameters['lstm_units']['min'],
                max_value=self.hyper_parameters['lstm_units']['max'],
                step=self.hyper_parameters['lstm_units']['step']),
                 return_sequences=False,
                 dropout=self.hyper_parameters['lstm_layer_dropout'],
                 recurrent_dropout=0.1,
                 activation=self.hyper_parameters['lstm_layer_activation']))

        model.add(BatchNormalization())

        # Add Dropout
        model.add(
            Dropout(
                hp.Choice('dropout_one',
                          values=self.hyper_parameters['dropout'])))

        # Add Output Layer
        model.add(
            Dense(self.number_of_distinct_items,
                  activation=self.hyper_parameters['dense_activation']))

        # Compile the model
        opt = Adam(
            hp.Choice('learning_rate',
                      values=self.hyper_parameters['learning_rate']))

        model.compile(loss=self.hyper_parameters['loss'],
                      optimizer=opt,
                      metrics=self.hyper_parameters['metric'])

        self.log_event('    -> Returning the model.')
        return model
Esempio n. 14
0
               FOOD_SPAWN_MODE=food_spawn_mode,
               MAX_COUNTER=int(max_counter),
               HEALTH_CUTOFF=health_cutoff,
               DRF_MC_HIGH=DRF_MC_high,
               DRF_MC_LOW=DRF_MC_low,
               IMMORTAL=immortal,
               WALL_PENALTY=wall_penalty)
tf_env = tf_py_environment.TFPyEnvironment(env)

## ------------------------------------------------------------------------------
## ------------------------------------------------------------------------------
## ------------------------------------------------------------------------------

# Preprocessing layers
board_preprocessing = Sequential([
    keras.layers.Lambda(lambda obs: tf.cast(obs, np.float32)),
    keras.layers.Flatten()
])
health_preprocessing = keras.layers.Flatten()

# Layers params are specified by local variables ovtained from DataFrame
act_net = ActorDistributionNetwork(
    tf_env.observation_spec(),
    tf_env.action_spec(),
    preprocessing_layers=(board_preprocessing, health_preprocessing),
    preprocessing_combiner=tf.keras.layers.Concatenate(axis=-1),
    conv_layer_params=conv_layer_params,
    fc_layer_params=fc_layer_params,
    batch_squash=False)

## ------------------------------------------------------------------------------
## ------------------------------------------------------------------------------
Esempio n. 15
0
def cnn_classifer():
    classifer = Sequential()  #모델 생성
    #해야 할 일: 모델을 chalkpoint를 정하여 저장하기, 가능하다면 병목특징 만들기, 학습률 높이는 알고리즘 찾아서 적용하기

    #1
    classifer.add(
        Conv2D(filters=20,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=(64, 64, 1),
               padding='same',
               strides=(2, 2),
               kernel_initializer='he_normal'))
    classifer.add(BatchNormalization())

    classifer.add(Activation('relu'))
    classifer.add(AveragePooling2D(
        pool_size=(2, 2)))  #globalmaxpooling으로 고침(원래는 maxpooling)
    #classifer.add(BatchNormalization())

    classifer.add(
        Conv2D(filters=20,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               strides=(2, 2),
               kernel_initializer='he_normal'))
    #classifer.add(BatchNormalization())

    classifer.add(MaxPooling2D(pool_size=(2, 2)))
    #classifer.add(BatchNormalization())

    classifer.add(Dropout(0.25))

    classifer.add(Flatten())  #2차원으로 변환(항상 맨 아래에 있어야 함)

    #세번째. Flatten층 추가하기=>1D로 변환
    #네번째, Dense층 추가하기
    classifer.add(
        Dense(units=32, activation='relu', kernel_initializer='he_normal'))
    classifer.add(Dropout(0.5))
    classifer.add(
        Dense(units=1,
              activation='hard_sigmoid',
              kernel_initializer='he_normal'))  #시그모이드 함수는 바꾸지 말것
    #이진분류 필요
    classifer.summary()

    optimizer = keras.optimizers.Adadelta(
        rho=0.95, lr=1.0, decay=0.0)  #학습의 단계가 진행될 수록 계수의 값이 작아지는 문제를 해결
    #모든 경사의 제곱합을 평균 감쇄하여 재곱식으로 계산
    sgd = optimizers.SGD(lr=0.1, decay=1e-6, momentum=0.9,
                         nesterov=True)  #학습률을 처음에는 크게하였다 점점 줄임
    #다섯번째, 모델 컴파일하기
    optimizer = tf.keras.optimizers.RMSprop(lr=0.001,
                                            rho=0.9,
                                            epsilon=None,
                                            decay=0.0)
    # optimizer에 adagrad를 넣는다면 적응형 함수
    #classifer.compile(loss="mse",metrics=["mae"])
    classifer.compile(
        optimizer=tf.keras.optimizers.Adadelta(rho=0.95, lr=1.0, decay=0.0),
        loss='mse',
        metrics=['accuracy'
                 ])  #loss를 binary_crossentropy로 쓰기도 함/무조건 mse를 사용하여야 손실이 줄어듬

    #classifer.compile(optimizer=SGD(lr=0.1,momentum=0.9,nesterov=True,metrics=['accuracy']
    # loss='mse',metrics=['accuracy'])
    #케라스 모델 별 optimizer정리
    #sgd:확률적 경사 하강법/momentum학습률을 고정하고 매개변수를 조정
    #adagrad:학습률을 조정하여 학습
    #rmsprop,adadelta:adagrad를 보완
    #adam이 기본, spare~은 인코딩 필요없이 바로 가능, 측정함수는 모델의 성능 평가=>손실함수 사용가능

    return classifer
look_back = 12

train_data_gen = TimeseriesGenerator(train_data, train_data,
	length=look_back, sampling_rate=1, stride=1,
    batch_size=4)

test_data_gen = TimeseriesGenerator(test_data, test_data,
	length=look_back, sampling_rate=1, stride=1,
	batch_size=4)

# Создаем модель:
model = Sequential([LSTM(250, activation='relu',
                         recurrent_dropout=0.15,
                         kernel_regularizer=regularizers.l2(0.01),
                         return_sequences=True,
                         input_shape=(look_back, 1)),
                    LSTM(250, activation='relu',
                         recurrent_dropout=0.15,
                         kernel_regularizer=regularizers.l2(0.01),
                         return_sequences=False),
                    Dense(1)])

model.compile(optimizer='adam', loss='mse')
model.summary()

# Обучаем модель (в процессе обучения отслеживаем показатель MSE,
# останавливаем обучение, если нет улучшений для валидационных данных,
# и восстанавливаем веса модели с наилучшим показателем):
early_stop = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)

history = model.fit(train_data_gen,
                    epochs=150,
X_trainn, X_test, y_trainn, y_test = train_test_split(x,
                                                      y,
                                                      test_size=0.30,
                                                      random_state=5)
X_train, X_val, y_train, y_val = train_test_split(X_trainn,
                                                  y_trainn,
                                                  test_size=0.30,
                                                  random_state=5)

len(pd.unique(y))

X_trainn = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_testt = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
X_vall = np.reshape(X_val, (X_val.shape[0], X_val.shape[1], 1))

modelcnn = Sequential()

modelcnn.add(Conv1D(512, 3, padding='same', input_shape=(X_train.shape[1], 1)))
modelcnn.add(Activation('tanh'))
modelcnn.add(Conv1D(256, 3, padding='same'))
modelcnn.add(Activation('tanh'))
modelcnn.add(Dropout(0.5))

modelcnn.add(Conv1D(
    128,
    3,
    padding='same',
))
modelcnn.add(Activation('tanh'))
modelcnn.add(Conv1D(
    512,
Esempio n. 18
0
base_treinamento_normalizada = normalizador.fit_transform(
    base_treinamento)  #Aplicamos o normalizador na base de treinamento
'''Aqui é a parte mais importante de series temporais. Para fazer a previsão de um preço real, precisamos de uma certa quantidade de dados antes dele, nesse exemplo utilizaremos 90 dados antes do preço real analisado. Entao criamos um for que pegara fara uma lista de listas, com essa ultima tendo lotes de 90 dias, sendo que os lotes tem um dia de diferença. Tambem sera criada uma lista com os preços reais a partir do 90 ate o 1242'''

previsores = []
preco_real = []
for i in range(90, 1242):
    previsores.append(base_treinamento_normalizada[i - 90:i, 0])
    preco_real.append(base_treinamento_normalizada[i, 0])
previsores, preco_real = np.array(previsores), np.array(preco_real)
previsores = np.reshape(
    previsores, (previsores.shape[0], previsores.shape[1], 1)
)  #Tanto de registros(1152), tanto de registros por lote, somente um atributo previsor

#==== Estrutura da rede Recorrente ====
regressor = Sequential()
regressor.add(
    LSTM(units=100,
         return_sequences=True,
         input_shape=(previsores.shape[1],
                      1)))  #Usamos LSTM que é um tipo de camada de recorrencia
regressor.add(Dropout(0.3))

regressor.add(LSTM(units=50, return_sequences=True))
regressor.add(Dropout(0.3))

regressor.add(LSTM(units=50, return_sequences=True))
regressor.add(Dropout(0.3))

regressor.add(
    LSTM(units=50)
Esempio n. 19
0

def next_batch(batch_size):
    i = np.random.randint(0, 3704 - batch_size)
    x_batch = x_train_rnn[i:i + batch_size, :, :]
    y_batch = y_train_rnn[i:i + batch_size, :].reshape(batch_size, 7 * 21)
    return x_batch, y_batch

    #LSTM considering past 120 days
    batch_size = 64


from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout, BatchNormalization
initializer = tf.keras.initializers.GlorotNormal
model0 = Sequential()
model0.add(
    Dense(64,
          activation='relu',
          input_shape=(x_train_rnn.shape[1], x_train_rnn.shape[2]),
          kernel_initializer=initializer))
model0.add(Dropout(0.1))
model0.add(
    LSTM(128,
         kernel_initializer=initializer,
         dropout=0.15,
         recurrent_dropout=0.15,
         return_sequences=True,
         go_backwards=True))
#model0.add(BatchNormalization())
model0.add(
Esempio n. 20
0
    def train(self):
        train_img_list = []
        train_label_list = []
        for file in os.listdir(self.train_folder):
            files_img_in_array = self.read_train_images(filename=file)
            train_img_list.append(files_img_in_array)  #Image list add up
            train_label_list.append(int(file.split('_')[0]))
        print(train_label_list)  #lable list addup
        train_label_list = to_categorical(train_label_list, 4)
        train_img_list = np.array(train_img_list)
        train_label_list = np.array(train_label_list)
        print(train_label_list)

        #train_label_list = to_categorical(train_label_list,4) #format into binary [0,0,0,0,1,0,0]

        train_img_list = train_img_list.astype('float32')
        train_img_list /= 255

        #-- setup Neural network CNN
        model = Sequential()
        #CNN Layer - 1
        model.add(
            Convolution2D(
                filters=32,  #Output for next later layer output (100,100,32)
                kernel_size=(5, 5),  #size of each filter in pixel
                padding='same',  #边距处理方法 padding method
                input_shape=(100, 100,
                             3),  #input shape ** channel last(TensorFlow)
            ))
        model.add(Activation('relu'))
        model.add(
            MaxPooling2D(
                pool_size=(2, 2),  #Output for next layer (50,50,32)
                strides=(2, 2),
                padding='same',
            ))

        #CNN Layer - 2
        model.add(
            Convolution2D(
                filters=64,  #Output for next layer (50,50,64)
                kernel_size=(2, 2),
                padding='same',
            ))
        model.add(Activation('relu'))
        model.add(
            MaxPooling2D(  #Output for next layer (25,25,64)
                pool_size=(2, 2),
                strides=(2, 2),
                padding='same',
            ))

        #Fully connected Layer -1
        model.add(Flatten())
        model.add(Dense(1024))
        model.add(Activation('relu'))
        # Fully connected Layer -2
        model.add(Dense(512))
        model.add(Activation('relu'))
        # Fully connected Layer -3
        model.add(Dense(256))
        model.add(Activation('relu'))
        # Fully connected Layer -4
        model.add(Dense(self.categories))
        model.add(Activation('softmax'))
        # Define Optimizer
        adam = Adam(lr=0.0001)
        #Compile the model
        model.compile(optimizer=adam,
                      loss="categorical_crossentropy",
                      metrics=['accuracy'])
        # Fire up the network
        model.fit(
            train_img_list,
            train_label_list,
            epochs=self.number_batch,
            batch_size=self.batch_size,
            verbose=1,
        )
        #SAVE your work -model
        model.save('./cellfinder.h5')
Esempio n. 21
0
def test_tf_keras_mnist_cnn():
    """ This is the basic mnist cnn example from keras.
    """
    _skip_if_no_tensorflow()

    from tensorflow import keras
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation
    from tensorflow.keras.layers import Conv2D, MaxPooling2D
    from tensorflow.keras import backend as K
    import tensorflow as tf
    import shap

    batch_size = 128
    num_classes = 10
    epochs = 1

    # input image dimensions
    img_rows, img_cols = 28, 28

    # the data, split between train and test sets
    (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()

    if K.image_data_format() == 'channels_first':
        x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
        x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
        input_shape = (1, img_rows, img_cols)
    else:
        x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
        x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    model = Sequential()
    model.add(
        Conv2D(8,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=input_shape))
    model.add(Conv2D(16, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(32, activation='relu'))  # 128
    model.add(Dropout(0.5))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adadelta(),
                  metrics=['accuracy'])

    model.fit(x_train[:1000, :],
              y_train[:1000, :],
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              validation_data=(x_test[:1000, :], y_test[:1000, :]))

    # explain by passing the tensorflow inputs and outputs
    np.random.seed(0)
    inds = np.random.choice(x_train.shape[0], 10, replace=False)
    e = shap.DeepExplainer((model.layers[0].input, model.layers[-1].input),
                           x_train[inds, :, :])
    shap_values = e.shap_values(x_test[:1])

    sess = tf.keras.backend.get_session()
    diff = sess.run(model.layers[-1].input, feed_dict={model.layers[0].input: x_test[:1]}) - \
    sess.run(model.layers[-1].input, feed_dict={model.layers[0].input: x_train[inds,:,:]}).mean(0)

    sums = np.array([shap_values[i].sum() for i in range(len(shap_values))])
    d = np.abs(sums - diff).sum()
    assert d / np.abs(diff).sum(
    ) < 0.001, "Sum of SHAP values does not match difference! %f" % d
Esempio n. 22
0
                    batch_size=BATCH_SIZE,
                    step=STEP,
                    shuffle=False)
test_gen = generator(df_values,
                     lookback=LOOKBACK,
                     delay=DELAY,
                     min_index=test_min_i,
                     max_index=test_max_i,
                     batch_size=BATCH_SIZE,
                     step=STEP,
                     shuffle=False)

# Instantiate Model
###################
clear_session()
model6 = Sequential()
model6.add(
    Conv1D(32, 5, activation='relu', input_shape=(None, df_values.shape[-1])))
model6.add(MaxPooling1D(3))
model6.add(Conv1D(32, 5, activation='relu'))
model6.add(MaxPooling1D(3))
model6.add(Conv1D(32, 5, activation='relu'))
model6.add(GlobalMaxPooling1D())
model6.add(Dense(1))

model6.compile(optimizer=RMSprop(), loss='mae', metrics=['mae'])
print(model6.summary())

# Train
#######
m2_callbacks = [
Esempio n. 23
0
import numpy as np
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras import optimizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense

from keras_facile import *

modele = Sequential()

# Réseau :
# 1 entrée x,
# première couche : 2 neurones
# seconde couche : 1 neurone
# activation = sigma

# Première couche : 2 neurones (entrée de dimension 1)
modele.add(Dense(2, input_dim=1, activation='sigmoid'))

# Seconde et dernière couche : 1 neurone
modele.add(Dense(1, activation='sigmoid'))

mysgd = optimizers.SGD(lr=1)
modele.compile(loss='mean_squared_error', optimizer=mysgd)

# poids_a_zeros(modele,0)  # couche 0, tous les poids à zéros
# poids_a_zeros(modele,1)  # couche 0, tous les poids à zéros
definir_poids(modele, 0, 0, [1 / 3], -1)  # couche, rang, [coeffs], biais
definir_poids(modele, 0, 1, [-1 / 5], 1)  # couche, rang, [coeffs], biais
definir_poids(modele, 1, 0, [1, 1], -2)  # couche, rang, [coeffs], biais
Esempio n. 24
0
def get_model(input_shape=(256, 256, 3), logits=5):
    model = Sequential()

    # 1st Convolutional Layer
    model.add(
        Conv2D(filters=96,
               input_shape=input_shape,
               kernel_size=(11, 11),
               strides=(4, 4),
               padding="valid",
               activation="relu"))

    # Max Pooling
    model.add(MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding="valid"))

    # 2nd Convolutional Layer
    model.add(
        Conv2D(filters=256,
               kernel_size=(5, 5),
               strides=(1, 1),
               padding="same",
               activation="relu"))

    # Max Pooling
    model.add(MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding="valid"))

    # 3rd Convolutional Layer
    model.add(
        Conv2D(filters=384,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding="same",
               activation="relu"))

    # 4th Convolutional Layer
    model.add(
        Conv2D(filters=384,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding="same",
               activation="relu"))

    # 5th Convolutional Layer
    model.add(
        Conv2D(filters=256,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding="same",
               activation="relu"))

    # Max Pooling
    model.add(MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding="valid"))

    # Passing it to a Fully Connected layer
    model.add(Flatten())
    # 1st Fully Connected Layer
    model.add(Dense(units=9216, activation="relu"))

    # 2nd Fully Connected Layer
    model.add(Dense(units=4096, activation="relu"))

    # 3rd Fully Connected Layer
    model.add(Dense(4096, activation="relu"))

    # Output Layer
    model.add(Dense(logits, activation="softmax"))

    opt = Adam(lr=0.001)
    model.compile(optimizer=opt,
                  loss=sparse_categorical_crossentropy,
                  metrics=['sparse_categorical_accuracy'])
    return model
Esempio n. 25
0
    def train_model(self, sym):
        print('training of dataset started')
        #normalization
        df = self.df[sym]
        columns = df.columns[8:-6]
        dfTrain = self.normalize(df)

        #Creación dataset LSTM
        dfLstmTrain = self.getLstm(dfTrain)

        #Filtramos las columnas
        dfColsTrain = dfLstmTrain[self.columns]
        res = []
        y = []
        rr = True

        for val in dfColsTrain.index.get_level_values(0).unique():
            for val2 in dfColsTrain.loc[val].index.get_level_values(
                    0).unique():
                res.append(dfColsTrain.loc[val, val2].values)
                y.append(dfLstmTrain.loc[val, val2, val2]['target4_8'])
        resnp = np.asarray(res)
        ynp = np.asarray(y)

        #resnp=np.delete(resnp,slice(0,10),1)
        from sklearn.utils import shuffle
        X_total, y_total = shuffle(resnp, ynp)

        #X_total=tf.keras.utils.normalize(X_total,axis=1,order=1)
        x_train = X_total
        y_train = tf.keras.utils.to_categorical(y_total,
                                                num_classes=3,
                                                dtype='int16')
        x_test = X_total[-100:]
        y_test = tf.keras.utils.to_categorical(y_total[-100:],
                                               num_classes=3,
                                               dtype='int16')
        model2 = Sequential()

        model2.add(
            LSTM(128, input_shape=(x_train.shape[1:]), return_sequences=False))
        model2.add(Dropout(0.1))
        model2.add(BatchNormalization())
        model2.add(Dense(3, activation='softmax'))
        opt = tf.keras.optimizers.Adam(learning_rate=0.001)

        EPOCHS = 70  # how many passes through our data
        BATCH_SIZE = 60

        # Compile model

        def f1(y_true, y_pred):
            class_id_true = K.argmax(y_true, axis=-1)
            class_id_preds = K.argmax(y_pred, axis=-1)
            # Replace class_id_preds with class_id_true for recall here
            accuracy_mask = K.cast(K.equal(class_id_preds, 1), 'int32')
            class_acc_tensor = K.cast(K.equal(class_id_true, class_id_preds),
                                      'int32') * accuracy_mask
            class_acc = K.sum(class_acc_tensor) / K.maximum(
                K.sum(accuracy_mask), 1)
            return class_acc

        def f2(y_true, y_pred):
            class_id_true = K.argmax(y_true, axis=-1)
            class_id_preds = K.argmax(y_pred, axis=-1)
            # Replace class_id_preds with class_id_true for recall here
            accuracy_mask = K.cast(K.equal(class_id_preds, 2), 'int32')
            class_acc_tensor = K.cast(K.equal(class_id_true, class_id_preds),
                                      'int32') * accuracy_mask
            class_acc = K.sum(class_acc_tensor) / K.maximum(
                K.sum(accuracy_mask), 1)
            return class_acc

        def f0(y_true, y_pred):
            class_id_true = K.argmax(y_true, axis=-1)
            class_id_preds = K.argmax(y_pred, axis=-1)
            # Replace class_id_preds with class_id_true for recall here
            accuracy_mask = K.cast(K.equal(class_id_preds, 0), 'int32')
            class_acc_tensor = K.cast(K.equal(class_id_true, class_id_preds),
                                      'int32') * accuracy_mask
            class_acc = K.sum(class_acc_tensor) / K.maximum(
                K.sum(accuracy_mask), 1)
            return class_acc

        class_weights = class_weight.compute_class_weight(
            'balanced', [0, 1, 2], y_total)

        import tensorflow.keras.backend as K
        from sklearn.metrics import classification_report

        model2.compile(loss='categorical_crossentropy',
                       optimizer=opt,
                       metrics=['accuracy', f1, f0, f2])

        class_w = {
            0: class_weights[0] * (1 / class_weights[1]),
            1: 1.0,
            2: class_weights[2] * (1 / class_weights[1])
        }
        #print(class_w)
        #class_w = {0:5.5,1:1.0,2:4.5}
        model2.summary()
        history = model2.fit(x_train,
                             y_train,
                             batch_size=BATCH_SIZE,
                             epochs=EPOCHS,
                             validation_data=(x_test, y_test),
                             class_weight=class_w,
                             verbose=0)

        return model2
Esempio n. 26
0
import numpy as np
from tensorflow.keras.models import Sequential, model_from_json
from tensorflow.keras.layers import Dense, Embedding
from tcn import TCN, tcn_full_summary

# define input shape
max_len = 100
max_features = 50

# make model
model = Sequential(layers=[
    Embedding(max_features, 16, input_shape=(max_len, )),
    TCN(nb_filters=12, dropout_rate=0.5, kernel_size=6, dilations=[1, 2, 4]),
    Dense(units=1, activation='sigmoid')
])

# get model as json string and save to file
model_as_json = model.to_json()
with open('model.json', "w") as json_file:
    json_file.write(model_as_json)
# save weights to file (for this format, need h5py installed)
model.save_weights('weights.h5')

# Make inference.
inputs = np.ones(shape=(1, 100))
out1 = model.predict(inputs)[0, 0]
print('*' * 80)
print('Inference after creation:', out1)

# load model from file
loaded_json = open('model.json', 'r').read()
Esempio n. 27
0
    def train_model(self):
        config = self.config

        num_epochs = config.num_epochs
        model_name = config.model_name
        batch = config.batch

        log.info("Model name: %s" % model_name)

        #TODO 1004 데이터 로딩
        debug = 0
        ds = {}
        phases = ["train", "validate", "test"]
        ds_path = self.ds_path
        then = time.time()

        for phase in phases:
            debug = (phase == "test")
            debug = 0
            ds[phase] = TimeSeriesDataset(root=ds_path,
                                          phase=phase,
                                          debug=debug)
        pass

        now = time.time()
        log.info("// Training data loading. Duration %d sec(s)" % (now - then))
        # // data loading

        x0 = ds["train"].x_list[0]
        y0 = ds["train"].y_list[0]

        log.info("x shape = %s, y shape = %s" % (x0.shape, y0.shape))

        #TODO 1010 모델 구성하기
        log.info("Model Setting ....")

        m_name = "_03_flat"

        model = Sequential()

        out_dim = np.prod(x0.shape)
        y_dim = len(y0)

        log.info("out_dim = %s, y_dim = %s" % (out_dim, y_dim))

        model.add(Flatten(input_shape=ds["train"].x_list.shape[1:]))

        #model.add( LSTM(out_dim, batch_input_shape=(None,out_dim), return_sequences=True, stateful=True) )

        #TODO 2020 은닉 레이어 숫자
        x_layer_cnt = out_dim
        dense_layer_cnt = out_dim
        y_layer_cnt = 4
        y_layer_cnt = y_dim

        for _ in range(dense_layer_cnt):
            model.add(Dense(out_dim))
            # model.add( Dropout( .2 ) )
        pass

        for _ in range(y_layer_cnt):
            model.add(Dense(y_dim))
        pass

        log.info("// Done. Model Setting ....")
        print(LINE)

        #-- 2. 모델 구성.

        # 3. 모델 학습과정 설정하기
        log.info("Model Compile ....")

        #optimizer = "rmsprop"
        optimizer = "adam"
        loss = "mean_absolute_error"
        #metrics = "mse"
        metrics = tf.keras.metrics.MeanSquaredLogarithmicError()
        #metrics = tf.keras.metrics.RootMeanSquaredError()

        model.compile(optimizer=optimizer, loss=loss, metrics=[metrics])

        log.info("// Deon. Model Compile ....")
        print(LINE)
        #-- 모델 학습 과정 설정.

        # 4. 모델 학습시키기
        print(LINE)
        log.info("Model Learinig ....")

        # 조기종료 콜백함수 정의
        early_stopping = callbacks.EarlyStopping(patience=20)

        ds_train = ds["train"]
        ds_validate = ds["validate"]

        # 랜덤 시드 고정
        np.random.seed(0)

        hist = model.fit(ds_train.x_list,
                         ds_train.y_list,
                         epochs=num_epochs,
                         batch_size=batch,
                         validation_data=(ds_validate.x_list,
                                          ds_validate.y_list),
                         callbacks=[early_stopping,
                                    TrainCallback(self)])

        log.info("// Dene. Model Learinig ....")
        print(LINE)

        # 5. 모델 평가하기

        print(LINE)
        log.info("Evaluating .....")

        verbose = 1
        for phase in ["train", "validate"]:
            loss = model.evaluate(ds[phase].x_list,
                                  ds[phase].y_list,
                                  verbose=verbose)
            log.info('Loss[%s] : %s' % (phase, loss))
        pass

        log.info("// Evaluating .....")

        # 6. 결과 보기
        if 1:
            ds_test = ds["validate"]
            self.test_model(ds_test, debug=0)
        pass

        if 1:
            evaluator = Evaluator()
            evaluator.evaluate()
        pass

        #-- 결과 보기

        #TODO 2007 학습과정 살펴보기
        history_loss = hist.history['loss']

        history_loss = history_loss.copy()

        # remove too large values
        if 1:
            avg = np.mean(history_loss)
            std = np.std(history_loss)

            del_keys = []
            for i, v in enumerate(history_loss):
                if v > avg + std:
                    del_keys.append(i)
                pass
            pass

            for k in del_keys:
                history_loss.pop(k)
            pass
        pass  # -- remove too large values

        max_hist_loss = max(history_loss)
        plt.plot(history_loss)
        plt.ylim(0, max_hist_loss)

        if 0 and max_hist_loss > 100_000:
            #plt.yscale('log')
            plt.semilogy()
            pass
        pass

        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train'], loc='upper left')
        #plt.show()
        log.info("min loss = %s, max_y = %s" % ((int)(min(history_loss)),
                                                (int)(ds["train"].max_y)))

        # save plot
        plt.savefig("train_%s.png" % m_name, format="png")
        #plt.savefig( "train_%s.svg" % m_name, format="svg" )

        plt.show()
Esempio n. 28
0
# This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column.
def plotImages(images_arr):
    fig, axes = plt.subplots(1, 5, figsize=(20,20))
    axes = axes.flatten()
    for img, ax in zip( images_arr, axes):
        ax.imshow(img)
        ax.axis('off')
    plt.tight_layout()
    plt.show()

model = Sequential([
    Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
    MaxPooling2D(),
    Conv2D(32, 3, padding='same', activation='relu'),
    MaxPooling2D(),
    Conv2D(64, 3, padding='same', activation='relu'),
    MaxPooling2D(),
    Flatten(),
    Dense(512, activation='relu'),
    Dense(1)
])
model.compile(optimizer='adam',
              loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
              metrics=['accuracy'])
model.summary()
history = model.fit_generator(
    train_data_gen,
    steps_per_epoch=total_train // batch_size,
    epochs=epochs,
    validation_data=val_data_gen,
    validation_steps=total_val // batch_size
Esempio n. 29
0
    os.mkdir(dir_path)
mnist_model_path = os.path.join(dir_path, "mnist_model.h5")
# Log Dir
log_dir = os.path.abspath("C:/Users/Jan/Dropbox/_Programmieren/UdemyTF/logs/")
if not os.path.exists(log_dir):
    os.mkdir(log_dir)
model_log_dir = os.path.join(log_dir, "model3")

# Model params
lr = 0.001
optimizer = Adam(lr=lr)
epochs = 10
batch_size = 256

# Define the DNN
model = Sequential()

model.add(
    Conv2D(filters=32,
           kernel_size=3,
           padding='same',
           input_shape=x_train.shape[1:]))
model.add(Activation("relu"))
model.add(Conv2D(filters=32, kernel_size=3, padding='same'))
model.add(Activation("relu"))
model.add(MaxPool2D())

model.add(Conv2D(filters=64, kernel_size=5, padding='same'))
model.add(Activation("relu"))
model.add(Conv2D(filters=64, kernel_size=5, padding='same'))
model.add(Activation("relu"))
Esempio n. 30
0
def main():
    print("Number of train waves: ", len(os.listdir("train_images/")))
    print("Number of validation waves: ", len(os.listdir("test_images/")))

    train_image_generator = ImageDataGenerator(
        rescale=1. / 255)  # Generator for our training data
    validation_image_generator = ImageDataGenerator(rescale=1. / 255)

    batch_size = 100
    #batch_size = 40
    epochs = 10
    IMG_HEIGHT = 150
    IMG_WIDTH = 150

    train_data_gen = train_image_generator.flow_from_directory(
        batch_size=batch_size,
        directory="train_images/",
        shuffle=True,
        target_size=(IMG_HEIGHT, IMG_WIDTH),
        class_mode='binary')

    val_data_gen = validation_image_generator.flow_from_directory(
        batch_size=batch_size,
        directory="test_images/",
        target_size=(IMG_HEIGHT, IMG_WIDTH),
        class_mode='binary')

    # To visualize
    sample_training_images, _ = next(train_data_gen)

    # Create model
    # model = Sequential([
    #    Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)),
    #   MaxPooling2D(),
    #  Conv2D(32, 3, padding='same', activation='relu'),
    # MaxPooling2D(),
    # Conv2D(64, 3, padding='same', activation='relu'),
    # MaxPooling2D(),
    #Flatten(),
    #  Dense(512, activation='softmax'),
    #  Dense(10)

    #])
    model = Sequential()
    model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
    model.add(MaxPooling2D((2, 2)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2)))
    model.add(Conv2D(64, (3, 3), activation='relu'))

    model.add(Flatten())
    model.add(Dense(64, activation='softmax'))
    model.add(Dense(10))

    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])
    model.summary()

    history = model.fit_generator(
        train_data_gen,
        #steps_per_epoch=len(os.listdir("train_images/")) // batch_size,
        steps_per_epoch=29577 // batch_size,
        epochs=epochs,
        validation_data=val_data_gen,
        validation_steps=7677 // batch_size)
    acc = history.history['accuracy']
    val_acc = history.history['val_accuracy']

    loss = history.history['loss']
    val_loss = history.history['val_loss']

    epochs_range = range(epochs)

    plt.figure(figsize=(8, 8))
    plt.subplot(1, 2, 1)
    plt.plot(epochs_range, acc, label='Training Accuracy')
    plt.plot(epochs_range, val_acc, label='Validation Accuracy')
    plt.legend(loc='lower right')
    plt.title('Training and Validation Accuracy')

    plt.subplot(1, 2, 2)
    plt.plot(epochs_range, loss, label='Training Loss')
    plt.plot(epochs_range, val_loss, label='Validation Loss')
    plt.legend(loc='upper right')
    plt.title('Training and Validation Loss')
    plt.savefig("trainingPlot.jpg")