Beispiel #1
0
def create_classifier_model(
        input_length, hidden_layer_sizes, regularization_beta):
    
    layer_sizes = hidden_layer_sizes + [1]
    num_layers = len(layer_sizes)
    
    regularizer = keras.regularizers.l2(regularization_beta)
    
    model = Sequential()
    
    for i in range(num_layers):
        
        kwargs = {
            'activation': 'sigmoid' if i == num_layers - 1 else 'relu',
            'kernel_regularizer': regularizer
        }
        
        if i == 0:
            kwargs['input_dim'] = input_length
            
        model.add(Dense(layer_sizes[i], **kwargs))
        
    model.compile(
        optimizer='adam',
        loss='binary_crossentropy',
        metrics=['accuracy'])
    
    return model
Beispiel #2
0
def model(GRU_size=5):
    adam = optimizers.Adam()
    model = Sequential()
    model.add(GRU(GRU_size))
    model.add(Dense(len(interested_words), activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=adam)
    return model
Beispiel #3
0
    def model_architecture(
            self,
            input_shape,  # type: Tuple[int, int]
            output_shape  # type: Tuple[int, Optional[int]]
    ):
        # type: (...) -> tf.keras.models.Sequential
        """Build a keras model and return a compiled model."""

        from tensorflow.keras.models import Sequential
        from tensorflow.keras.layers import \
            Masking, LSTM, Dense, TimeDistributed, Activation

        # Build Model
        model = Sequential()

        # the shape of the y vector of the labels,
        # determines which output from rnn will be used
        # to calculate the loss
        if len(output_shape) == 1:
            # y is (num examples, num features) so
            # only the last output from the rnn is used to
            # calculate the loss
            model.add(Masking(mask_value=-1, input_shape=input_shape))
            model.add(LSTM(self.rnn_size, dropout=0.2))
            model.add(Dense(input_dim=self.rnn_size, units=output_shape[-1]))
        elif len(output_shape) == 2:
            # y is (num examples, max_dialogue_len, num features) so
            # all the outputs from the rnn are used to
            # calculate the loss, therefore a sequence is returned and
            # time distributed layer is used

            # the first value in input_shape is max dialogue_len,
            # it is set to None, to allow dynamic_rnn creation
            # during prediction
            model.add(Masking(mask_value=-1,
                              input_shape=(None, input_shape[1])))
            model.add(LSTM(self.rnn_size, return_sequences=True, dropout=0.2))
            model.add(TimeDistributed(Dense(units=output_shape[-1])))
        else:
            raise ValueError("Cannot construct the model because"
                             "length of output_shape = {} "
                             "should be 1 or 2."
                             "".format(len(output_shape)))

        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

        logger.debug(model.summary())

        return model
Beispiel #4
0
def fit_model(train_X, train_Y, window_size = 1):
    EPOCHS=10
    model = Sequential()

    model.add(LSTM(4,
                   input_shape = (1, window_size)))
    model.add(Dense(1))
    model.compile(loss = "mean_squared_error",
                  optimizer = "adam")
    model.fit(train_X,
              train_Y,
              epochs = EPOCHS,
              batch_size = 1,
              verbose = 2)

    return(model)
    plt.tight_layout()
    plt.show()

model = Sequential([
    Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
    MaxPooling2D(),
    Conv2D(32, 3, padding='same', activation='relu'),
    MaxPooling2D(),
    Conv2D(64, 3, padding='same', activation='relu'),
    MaxPooling2D(),
    Flatten(),
    Dense(512, activation='relu'),
    Dense(1)
])
model.compile(optimizer='adam',
              loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
              metrics=['accuracy'])
model.summary()
history = model.fit_generator(
    train_data_gen,
    steps_per_epoch=total_train // batch_size,
    epochs=epochs,
    validation_data=val_data_gen,
    validation_steps=total_val // batch_size
)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss=history.history['loss']
val_loss=history.history['val_loss']
for layer in baseModel.layers:
    layer.trainable = False

for layer in baseModel.layers[-22:]:
    layer.trainable = True

model.summary()


# optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999,
#                  epsilon=None, decay=1e-6, amsgrad=False)

optimizer = SGD(learning_rate=0.001)

model.compile(optimizer=optimizer,
              loss="categorical_crossentropy",
              metrics=['accuracy', utils.CalculateF1Score])

# Set a learning rate reductor
cb_early_stopper = EarlyStopping(monitor='val_loss', patience=4)

# Set a learning rate reductor
learningRateReduction = ReduceLROnPlateau(
    monitor='val_accuracy', patience=3, verbose=1, factor=0.5, min_lr=0.00001)

# Fit the model
epochs = 30
batchSize = 10

history = model.fit(train_datagen.flow(xTrain, yTrain, batch_size=batchSize),
                    epochs=epochs, validation_data=(xValidate, yValidate),
Beispiel #7
0
def get_model(input_shape=(256, 256, 3), logits=5):
    model = Sequential()

    # 1st Convolutional Layer
    model.add(
        Conv2D(filters=96,
               input_shape=input_shape,
               kernel_size=(11, 11),
               strides=(4, 4),
               padding="valid",
               activation="relu"))

    # Max Pooling
    model.add(MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding="valid"))

    # 2nd Convolutional Layer
    model.add(
        Conv2D(filters=256,
               kernel_size=(5, 5),
               strides=(1, 1),
               padding="same",
               activation="relu"))

    # Max Pooling
    model.add(MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding="valid"))

    # 3rd Convolutional Layer
    model.add(
        Conv2D(filters=384,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding="same",
               activation="relu"))

    # 4th Convolutional Layer
    model.add(
        Conv2D(filters=384,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding="same",
               activation="relu"))

    # 5th Convolutional Layer
    model.add(
        Conv2D(filters=256,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding="same",
               activation="relu"))

    # Max Pooling
    model.add(MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding="valid"))

    # Passing it to a Fully Connected layer
    model.add(Flatten())
    # 1st Fully Connected Layer
    model.add(Dense(units=9216, activation="relu"))

    # 2nd Fully Connected Layer
    model.add(Dense(units=4096, activation="relu"))

    # 3rd Fully Connected Layer
    model.add(Dense(4096, activation="relu"))

    # Output Layer
    model.add(Dense(logits, activation="softmax"))

    opt = Adam(lr=0.001)
    model.compile(optimizer=opt,
                  loss=sparse_categorical_crossentropy,
                  metrics=['sparse_categorical_accuracy'])
    return model
# print type of model
print('type(vgg16_model): ', type(vgg16_model))

# Create empty Sequentail model
model = Sequential()
# Add layer by layer from the vgg16_model to our Sequential model
# Skip the last layer (index = -1) pf vgg16_model.
for layer in vgg16_model.layers[:-1]:
    model.add(layer)
# print our new Sequential Model (witout the last layer)
model.summary()

# Check the model summary, we see the trainable parameters is 134260544
params = count_params(model)
assert params['non_trainable_params'] == 0
assert params['trainable_params'] == 134260544

# Set the layer to non-trainable
for layer in model.layers:
    layer.trainable = False

# Add the Dense layer to the last output layer
model.add(Dense(units=2, activation='softmax'))
model.summary()

# Compiled the fine-tuned vgg16_model
model.compile(optimizer=Adam(learning_rate=0.0001), \
    loss='categorical_crossentropy', metrics=['accuracy'])
# model fit
model.fit(x=train_batches, steps_per_epoch=4, \
    validation_data=valid_batches, validation_steps=4, epochs=10, verbose=2)
Beispiel #9
0
    # test_labels = labels

    label_lb = LabelBinarizer()
    label_lb.fit(tags)
    train_labels = label_lb.transform(train_labels)
    test_labels = label_lb.transform(test_labels)

    model = Sequential([
        Dense(128, input_shape=(len(train_data[0]), ), activation='relu'),
        Dense(64, activation='relu'),
        Dropout(0.5),
        Dense(len(train_labels[0]), activation='softmax')
    ])

    model.compile(optimizer='adam',
                  loss=CategoricalCrossentropy(),
                  metrics=['accuracy'])

    time_start = time.time()
    history = model.fit(train_data,
                        train_labels,
                        epochs=30,
                        batch_size=5,
                        validation_data=(test_data, test_labels),
                        verbose=2)
    time_end = time.time()
    print('Training time:', time_end - time_start)

    test_loss, test_acc = model.evaluate(test_data, test_labels, verbose=2)
    print(test_acc, test_loss)
model = Sequential()
model.add(Dense(3, input_dim=5))
model.add(Dense(7))
model.add(Dense(4))
model.add(Dense(3))
model.summary()

input1 = Input(shape=(5, ))
dense1 = Dense(3)(input1)
dense1_1 = Dense(7)(dense1)
dense2 = Dense(4)(dense1_1)
output1 = Dense(3)(dense2)

model = Model(inputs=input1, outputs=output1)
model.summary()
'''
#3. Compile, Train
model.compile(optimizer='adam', loss='mse')
model.fit(x_train, y_train, batch_size=1, epochs=64,
                validation_data=(x_test, y_test),verbose=2)

#4. Evaluate, Predict
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
def RMSE(y_test, y_predict):
    return np.sqrt(mean_squared_error(y_test, y_predict))
import tensorflow as tf

y_predict = model.predict(x_test)
loss = model.evaluate(x_test, y_test)
print('MSE :', mean_squared_error(y_test, y_predict))
Beispiel #11
0
    data_augmentation,
    layers.experimental.preprocessing.Rescaling(1. / 255),
    layers.Conv2D(16, 3, padding='same', activation='relu'),
    layers.MaxPooling2D(),
    layers.Conv2D(32, 3, padding='same', activation='relu'),
    layers.MaxPooling2D(),
    layers.Conv2D(64, 3, padding='same', activation='relu'),
    layers.MaxPooling2D(),
    layers.Dropout(0.2),
    layers.Flatten(),
    layers.Dense(128, activation='relu'),
    layers.Dense(num_classes)
])

model_v2.compile(
    optimizer='adam',
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    metrics=['accuracy'])

epochs = 15
history = model_v2.fit(train_ds, validation_data=val_ds, epochs=epochs)

acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']

epochs_range = range(epochs)

plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
Beispiel #12
0
    X.append(features)
    y.append(label)

X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)

X = X / 255.0

model = Sequential()
model.add(Conv2D(64, (3, 3), input_shape=X.shape[1:]))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(64))
model.add(Activation("relu"))

model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(loss="binary_crossentropy",
              optimizer="adam",
              metrics=['accuracy'])

model.fit(X, y, batch_size=2, epochs=20, validation_split=0.1)

model.save("trained.model")
# to all nodes
#%%
# below is another way to build the model
model = Sequential()
model.add(Dense(4, activation="relu"))
model.add(Dense(2, activation="relu"))
model.add(Dense(1))

#%%
model = Sequential()
model.add(Dense(4, activation="relu"))
model.add(Dense(4, activation="relu"))
model.add(Dense(4, activation="relu"))
model.add(Dense(1))

model.compile(optimizer="rmsprop", loss="mse")

#%%
model.fit(x=X_train, y=y_train, epochs=250)
#%%
df_loss = pd.DataFrame(model.history.history)
df_loss.plot()

#%%
# the last layer will determine what type of
# model will be produced
# in a regression problema we leave the
# last layer with a identity activation function
# if the problema was a classifcation problem
# we shoul use more final neurons and with
# a different activation function like
Beispiel #14
0
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# create model
model = Sequential()
model.add(Dense(128, input_dim=784, activation='sigmoid'))
model.add(Dense(10, activation='sigmoid'))
#compile model
model.compile(loss='mean_squared_error', optimizer='sgd', metrics=['accuracy'])

mnist = input_data.read_data_sets("/tf/data/mnist/", one_hot=True)
X = mnist.train.images
Y = mnist.train.labels
# Fit the model

model.fit(X, Y, epochs=1, batch_size=10)
# evaluate the model
scores = model.evaluate(X, Y)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
Beispiel #15
0
def train_nn(d, x, f, num_nodes, weights_init=None, summary=False, plot=False):

    # set hyperparameters
    lr_start = 1e-03
    lr_decay = .99954
    learning_rate = LearningRateScheduler(
        lambda epoch: lr_start * lr_decay**epoch)
    epochs = 10000
    batch_size = x.shape[0] if weights_init is not None else np.ceil(
        x.shape[0] / 100).astype(int)

    # initialize weights if A,B,C are provided
    if weights_init is not None:

        def A_init(shape, dtype=None):
            return weights_init[0].T

        def B_init(shape, dtype=None):
            return weights_init[1].reshape(-1)

        def C_init(shape, dtype=None):
            return weights_init[2]

    # define the model
    model = Sequential()
    if weights_init:
        model.add(Dense(num_nodes, input_shape=(d,), \
            kernel_initializer=A_init, bias_initializer=B_init))
        model.add(Activation('relu'))
        model.add(Dense(1, \
            kernel_initializer=C_init))
    else:
        model.add(Dense(num_nodes, input_shape=(d,), \
            kernel_initializer=tf.keras.initializers.TruncatedNormal(), \
            bias_initializer=tf.keras.initializers.Zeros()))
        model.add(Activation('relu'))
        model.add(Dense(1, \
            kernel_initializer=tf.keras.initializers.TruncatedNormal(), \
            bias_initializer=tf.keras.initializers.Zeros()))

    # compile the model
    model.compile(loss='mean_squared_error', \
        optimizer=tf.keras.optimizers.Adam(lr=lr_start))

    # display the model
    if summary:
        print()
        model.summary()

    # display network parameters
    if weights_init:
        label = 'GSN loss'
        print('\ntraining network using GSN initialization...')
    else:
        label = 'random loss'
        print('\ntraining network using random initialization...')
    print('network parameters: epochs = {:d}, batch size = {:d}'.format(
        epochs, batch_size))

    # train the model
    history = model.fit(x, f, batch_size=batch_size, epochs=epochs, \
                        verbose=0, callbacks=[learning_rate])

    # save and plot the training process
    fig = plt.figure(figsize=(8, 3))
    plt.semilogy(history.history['loss'], label=label)
    plt.legend(loc='upper right')
    # save the figure
    name = time.strftime('%Y-%m-%d %H.%M.%S', time.localtime())
    #plt.savefig('./images/{:s}.png'.format(name), format='png')
    #plt.savefig('./images/{:s}.pdf'.format(name), format='pdf')
    # plot the figure
    if plot:
        plt.show()
    else:
        plt.close()

    # return trained weights
    A = model.layers[0].get_weights()[0].T
    B = model.layers[0].get_weights()[1].reshape((-1, 1))
    C = model.layers[-1].get_weights()[0].reshape((-1, 1))
    c = model.layers[-1].get_weights()[1]

    # evaluate the model
    model.evaluate(x, f, batch_size=x.shape[0])

    return A, B, C, c
Beispiel #16
0
print('x_test.reshape:', x_test.shape)

# 3. 모델
model = Sequential()
model.add(LSTM(1, input_shape=(10, 1)))
model.add(Dense(200))
model.add(Dense(300))
model.add(Dense(500))
model.add(Dense(200))
model.add(Dense(10, activation='softmax'))

model.summary()

# 4. 훈련
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['acc'])

model.fit(x_train,
          y_train,
          epochs=100,
          batch_size=5,
          verbose=1,
          validation_split=0.5)

# 5. 평가, 예측
loss, acc = model.evaluate(x_test, y_test, batch_size=32)
print("loss : ", loss)
print("acc : ", acc)

y_predict = model.predict(x_test)  # 평가 데이터 다시 넣어 예측값 만들기
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='softmax'))

    # Horovod: adjust learning rate based on number of GPUs.
    opt = tf.keras.optimizers.Adadelta(1.0 * hvd.size())

    # Horovod: add Horovod Distributed Optimizer.
    opt = hvd.DistributedOptimizer(opt)

    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=opt,
                  metrics=['accuracy'])

    callbacks = [
        # Horovod: broadcast initial variable states from rank 0 to all other processes.
        # This is necessary to ensure consistent initialization of all workers when
        # training is started with random weights or restored from a checkpoint.
        hvd.callbacks.BroadcastGlobalVariablesCallback(0),
    ]

    # Horovod: save checkpoints only on worker 0 to prevent other workers from corrupting them.
    if hvd.rank() == 0:
        callbacks.append(tf.keras.callbacks.ModelCheckpoint('./checkpoint-{epoch}.h5'))

    model.fit(x_train, y_train,
              batch_size=batch_size,
Beispiel #18
0
print(y_test.shape) #(89,)
'''

#2.모델구성
from tensorflow.keras.models import Sequential
#함수를 불러올때는 텐서플로우,케라스에서 '모델스'로 불러온다.
from tensorflow.keras.layers import Dense
#층을 불러올때는 텐서플로우,케라스에서 '레이어스'로 불러온다.

model = Sequential()
#model.add(Dense(10,input_dim=10,activation='relu'))
model.add(Dense(10, input_shape=(10, ), activation='relu'))
model.add(Dense(1))

#3.컴파일,훈련
model.compile(loss='mse', optimizer='adam')
model.fit(x_train, y_train, epochs=100, batch_size=10, validation_split=0.2)

#4.평가,예측:모델에 대한 평가,예측을 여러가지 지표로 확인

#1) 로스값 구하기
loss = model.evaluate(x_test, y_test, batch_size=10)
print('loss : ', loss)

#2) y예측값 구하기
y_predict = model.predict(x_test)  #테스트한 x값을 통해 y값을 예측함

#3) y예측값을 통한 RMSE와 2R2 구하기

#RMSE(루트평균제곱오차): def함수를 통해 RMSE 선언 후 RMSE값 구하기
from sklearn.metrics import mean_squared_error  #사이킷런 중 메트릭스에 있음
print(model.summary())

#Parâmetros:
#kernel_size: indica o tamanho da matriz de detector de características (feature detector)
#input_shape: indica o tamanho da imagem a ser utilizada no treinamento
#data_format='channels_last': parâmetro padrão que indica que será adicionada mais uma dimensão da imagem no final da matriz
#kernel_regularizer: indica que irá adicionar uma penalidade quando o erro tiver um determinado valo
"""## Etapa 8 - Compilando o modelo
*   Parâmetros Adam:  https://arxiv.org/abs/1412.6980
*   Artigo sobre parâmetros Adam: https://machinelearningmastery.com/adam-optimization-algorithm-for-deep-learning/ 
*   Parâmetros beta: representam a taxa de decaimento exponencial (por exemplo, 0.9) que está relacionado com a taxa de aprendizagem (learning rate - lr)
"""

#Compilando o modelo
model.compile(loss=categorical_crossentropy,
              optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7),
              metrics=['accuracy'])
arquivo_modelo = 'modelo_02_expressoes.h5'
arquivo_modelo_json = 'modelo_02_expressoes.json'
lr_reducer = ReduceLROnPlateau(monitor='val_loss',
                               factor=0.9,
                               patience=3,
                               verbose=1)
early_stopper = EarlyStopping(monitor='val_loss',
                              min_delta=0,
                              patience=8,
                              verbose=1,
                              mode='auto')
checkpointer = ModelCheckpoint(arquivo_modelo,
                               monitor='val_loss',
                               verbose=1,
Beispiel #20
0
regressor.add(LSTM(units=50, return_sequences=True))
regressor.add(Dropout(0.3))

regressor.add(
    LSTM(units=50)
)  #Colocamos return_sequences =True quando vamos passar os dados de uma camada recorrente para outra recorrente, no caso a proxima é densa
regressor.add(Dropout(0.3))

regressor.add(
    Dense(units=1, activation='linear')
)  #Camada de saida com funçao linear, nao queremos fazer nenhuma transformaçao na saida

regressor.compile(
    optimizer='rmsprop',
    loss='mean_squared_error',
    metrics=['mean_absolute_error'
             ])  #RMSprop é o optimizer mais recomendado para Redes Recorrentes
regressor.fit(previsores, preco_real, epochs=100, batch_size=32)

# ==== Estrutura dos dados de teste ====
base_teste = pd.read_csv('petr4_teste.csv')
preco_real_teste = base_teste.iloc[:, 1:
                                   2].values  #Pegamos somente a primeira coluna
base_completa = pd.concat(
    (base['Open'], base_teste['Open']),
    axis=0)  #Aqui concatenamos a base teste com a de treinamento
entradas = base_completa[
    len(base_completa) - len(base_teste) -
    90:].values  #Precisamos somente da base de teste e os 90 dias anteriores ao primeiro dia de teste
entradas = entradas.reshape(
Beispiel #21
0
                              activation='relu'))
                    model.add(
                        Dense(75,
                              kernel_initializer='normal',
                              activation='relu'))
                    model.add(
                        Dense(100,
                              kernel_initializer='normal',
                              activation='relu'))
                    model.add(
                        Dense(1,
                              kernel_initializer='normal',
                              activation='sigmoid'))

                    model.compile(loss='binary_crossentropy',
                                  optimizer='adam',
                                  metrics=['accuracy'])

                    model.fit(Xcl_train,
                              ycl_train,
                              epochs=10,
                              batch_size=10,
                              verbose=0)

                    ##################################################
                    ##################################################

                    cluster_models[cl] = model

            tn, fp, fn, tp = 0, 0, 0, 0
Beispiel #22
0
    def train(self):
        train_img_list = []
        train_label_list = []
        for file in os.listdir(self.train_folder):
            files_img_in_array = self.read_train_images(filename=file)
            train_img_list.append(files_img_in_array)  #Image list add up
            train_label_list.append(int(file.split('_')[0]))
        print(train_label_list)  #lable list addup
        train_label_list = to_categorical(train_label_list, 4)
        train_img_list = np.array(train_img_list)
        train_label_list = np.array(train_label_list)
        print(train_label_list)

        #train_label_list = to_categorical(train_label_list,4) #format into binary [0,0,0,0,1,0,0]

        train_img_list = train_img_list.astype('float32')
        train_img_list /= 255

        #-- setup Neural network CNN
        model = Sequential()
        #CNN Layer - 1
        model.add(
            Convolution2D(
                filters=32,  #Output for next later layer output (100,100,32)
                kernel_size=(5, 5),  #size of each filter in pixel
                padding='same',  #边距处理方法 padding method
                input_shape=(100, 100,
                             3),  #input shape ** channel last(TensorFlow)
            ))
        model.add(Activation('relu'))
        model.add(
            MaxPooling2D(
                pool_size=(2, 2),  #Output for next layer (50,50,32)
                strides=(2, 2),
                padding='same',
            ))

        #CNN Layer - 2
        model.add(
            Convolution2D(
                filters=64,  #Output for next layer (50,50,64)
                kernel_size=(2, 2),
                padding='same',
            ))
        model.add(Activation('relu'))
        model.add(
            MaxPooling2D(  #Output for next layer (25,25,64)
                pool_size=(2, 2),
                strides=(2, 2),
                padding='same',
            ))

        #Fully connected Layer -1
        model.add(Flatten())
        model.add(Dense(1024))
        model.add(Activation('relu'))
        # Fully connected Layer -2
        model.add(Dense(512))
        model.add(Activation('relu'))
        # Fully connected Layer -3
        model.add(Dense(256))
        model.add(Activation('relu'))
        # Fully connected Layer -4
        model.add(Dense(self.categories))
        model.add(Activation('softmax'))
        # Define Optimizer
        adam = Adam(lr=0.0001)
        #Compile the model
        model.compile(optimizer=adam,
                      loss="categorical_crossentropy",
                      metrics=['accuracy'])
        # Fire up the network
        model.fit(
            train_img_list,
            train_label_list,
            epochs=self.number_batch,
            batch_size=self.batch_size,
            verbose=1,
        )
        #SAVE your work -model
        model.save('./cellfinder.h5')
Beispiel #23
0
                                                    test_size=0.3,
                                                    random_state=101)
scaler = MinMaxScaler()

X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)

model = Sequential()
model.add(Dense(19, activation='relu'))
model.add(Dense(19, activation='relu'))
model.add(Dense(19, activation='relu'))
model.add(Dense(19, activation='relu'))

model.add(Dense(1))

model.compile(optimizer='adam', loss='mse')
model.fit(x=X_train,
          y=y_train,
          validation_data=(X_test, y_test),
          batch_size=128,
          epochs=400)

losses = pd.DataFrame(model.history.history)

predictions = model.predict(X_test)

# np.sqrt(mean_squared_error(y_test, predictions))
mae = mean_absolute_error(y_test, predictions)
# print(mae)

explained_variance_score(y_test, predictions)
Beispiel #24
0
def predict(figi: str):
    interval = request.args.get('interval')
    end_date = request.args.get('to')
    client = ti.SyncClient(SANDBOX_TOKEN, use_sandbox=True)
    print(datetime.now())
    # get candles for last year
    response = client.get_market_candles(figi=figi,
                                         from_=datetime.now() -
                                         timedelta(days=365),
                                         to=datetime.now(),
                                         interval=ti.CandleResolution.day)
    candles_data = response.payload.candles
    close_full_y = [float(x.c.real) for x in candles_data]
    time_full_x = [x.time for x in candles_data]
    num_shape = math.ceil(0.9 * len(close_full_y))

    df = pd.DataFrame(list(zip(close_full_y, time_full_x)),
                      columns=['Close', 'Date'])

    train = df.iloc[:num_shape, 0:1].values
    test = df.iloc[num_shape:, 0:1].values

    sc = MinMaxScaler(feature_range=(0, 1))
    train_scaled = sc.fit_transform(train)
    X_train = []
    # Price on next day
    y_train = []
    window = 30
    for i in range(window, num_shape):
        X_train_ = np.reshape(train_scaled[i - window:i, 0], (window, 1))
        X_train.append(X_train_)
        y_train.append(train_scaled[i, 0])
    X_train = np.stack(X_train)
    y_train = np.stack(y_train)
    model = Sequential()

    model.add(
        LSTM(units=50,
             return_sequences=True,
             input_shape=(X_train.shape[1], 1)))
    model.add(Dropout(0.2))

    model.add(LSTM(units=50, return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(units=50, return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(units=50))
    model.add(Dropout(0.2))

    model.add(Dense(units=1))

    model.compile(optimizer='adam', loss='mean_squared_error')

    model.fit(X_train, y_train, epochs=30, batch_size=24, verbose=0)
    df_volume = np.vstack((train, test))
    num_2 = df_volume.shape[0] - num_shape + window

    pred_ = df['Close'].iloc[-1].copy()
    prediction_full = []
    df_copy = df.iloc[:, 0:1][1:].values

    for j in range(20):
        df_ = np.vstack((df_copy, pred_))
        train_ = df_[:num_shape]
        test_ = df_[num_shape:]

        df_volume_ = np.vstack((train_, test_))

        inputs_ = df_volume_[df_volume_.shape[0] - test_.shape[0] - window:]
        inputs_ = inputs_.reshape(-1, 1)
        inputs_ = sc.transform(inputs_)

        X_test_2 = []

        for k in range(window, num_2):
            X_test_3 = np.reshape(inputs_[k - window:k, 0], (window, 1))
            X_test_2.append(X_test_3)

        X_test_ = np.stack(X_test_2)
        predict_ = model.predict(X_test_)
        pred_ = sc.inverse_transform(predict_)
        prediction_full.append(pred_[-1][0])
        df_copy = df_[j:]
    df_date = df[['Date']]

    for h in range(20):
        df_date_add = pd.to_datetime(
            df_date['Date'].iloc[-1]) + pd.DateOffset(days=1)
        df_date_add = pd.DataFrame([df_date_add.strftime("%Y-%m-%d")],
                                   columns=['Date'])
        df_date = df_date.append(df_date_add)
    df_date = df_date.reset_index(drop=True)

    result = []
    for ind, val in zip(df_date['Date'][len(close_full_y):], prediction_full):
        mda = dict({"pred_close": float(val), "time": ind})
        result.append(mda)
    return jsonify(result)
Beispiel #25
0
model.add(BatchNormalization())

model.add(LSTM(128))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))

model.add(Dense(2, activation='softmax'))

opt = tf.keras.optimizers.Adam(lr=0.001, decay=1e-6)

# Compile model
model.compile(loss='sparse_categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])
tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))

# unique file name that will include the epoch and the validation acc for that epoch
filepath = "RNN_Final-{epoch:02d}-{val_acc:.3f}"
checkpoint = ModelCheckpoint(
    "models/{}.model".format(filepath,
                             monitor='val_acc',
                             verbose=1,
                             save_best_only=True,
                             mode='max'))  # saves only the best ones

# Train model
history = model.fit(
    train_x,
image=np.array(image)
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)   
## define one constants, for mouth aspect ratio to indicate open mouth
MOUTH_AR_THRESH = 0.5

## initialize dlib's face detector (HOG-based) and then create
## the facial landmark predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")

## grab the indexes of the facial landmarks for the mouth
(mStart, mEnd) = (49, 68)


if mode == "train":
    model.compile(loss='categorical_crossentropy',optimizer=Adam(lr=0.0001, decay=1e-6),metrics=['accuracy'])
    model_info = model.fit_generator(
            train_generator,
            steps_per_epoch=num_train // batch_size,
            epochs=num_epoch,
            validation_data=validation_generator,
            validation_steps=num_val // batch_size)
    model.save_weights('model.h5')

# emotions will be displayed on your face from the webcam feed
elif mode == "display":
    model.load_weights('model.h5')

    # prevents openCL usage and unnecessary logging messages
    cv2.ocl.setUseOpenCL(False)
Beispiel #27
0
           input_shape=(SIZE, SIZE, 3)))
model.add(MaxPooling2D((2, 2), padding='same'))
model.add(Conv2D(8, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D((1, 1), padding='same'))

model.add(MaxPooling2D((2, 2), padding='same'))
#Decoder
model.add(UpSampling2D((1, 1)))
model.add(Conv2D(8, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(3, (3, 3), activation='relu', padding='same'))

model.compile(optimizer='adam',
              loss='mean_squared_error',
              metrics=['accuracy'])
model.summary()

model.fit(
    img_array,
    img_array,
    epochs=1000,  #1000s of epochs needed for good results. Use GPU.
    shuffle=True)  #Shuffle data for each epoch

print("Output")

pred = model.predict(img_array)  #Predict model on the same input array.

#In reality, train on 1000s of input images and predict on images that the training
#algorithm never saw.
Beispiel #28
0
                     mask_zero=True,
                     trainable=False)
model = Sequential()
model.add(eb_layer)
model.add(SpatialDropout1D(0.30))
model.add(
    Bidirectional(
        LSTM(lstm_out,
             dropout=0.2,
             recurrent_dropout=0.2,
             return_sequences=True)))
model.add(TimeDistributed(Dense(2, activation="softmax")))

adam = optimizers.Adam(learning_rate=0.001)
model.compile(loss="binary_crossentropy",
              optimizer=adam,
              metrics=["acc"],
              sample_weight_mode="temporal")
print(model.summary())

batch_size = 32
#class_weights = {0: 1., 1: 5.}
weighting = 4
model.fit(x_train,
          y_train,
          validation_split=0.1,
          epochs=10,
          sample_weight=sw_train * weighting + 1,
          batch_size=batch_size,
          verbose=1)

model.save("./data/models/nb_stream_10_fasttext.h5")
def cnn_classifer():
    classifer = Sequential()  #모델 생성
    #해야 할 일: 모델을 chalkpoint를 정하여 저장하기, 가능하다면 병목특징 만들기, 학습률 높이는 알고리즘 찾아서 적용하기

    #1
    classifer.add(
        Conv2D(filters=20,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=(64, 64, 1),
               padding='same',
               strides=(2, 2),
               kernel_initializer='he_normal'))
    classifer.add(BatchNormalization())

    classifer.add(Activation('relu'))
    classifer.add(AveragePooling2D(
        pool_size=(2, 2)))  #globalmaxpooling으로 고침(원래는 maxpooling)
    #classifer.add(BatchNormalization())

    classifer.add(
        Conv2D(filters=20,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               strides=(2, 2),
               kernel_initializer='he_normal'))
    #classifer.add(BatchNormalization())

    classifer.add(MaxPooling2D(pool_size=(2, 2)))
    #classifer.add(BatchNormalization())

    classifer.add(Dropout(0.25))

    classifer.add(Flatten())  #2차원으로 변환(항상 맨 아래에 있어야 함)

    #세번째. Flatten층 추가하기=>1D로 변환
    #네번째, Dense층 추가하기
    classifer.add(
        Dense(units=32, activation='relu', kernel_initializer='he_normal'))
    classifer.add(Dropout(0.5))
    classifer.add(
        Dense(units=1,
              activation='hard_sigmoid',
              kernel_initializer='he_normal'))  #시그모이드 함수는 바꾸지 말것
    #이진분류 필요
    classifer.summary()

    optimizer = keras.optimizers.Adadelta(
        rho=0.95, lr=1.0, decay=0.0)  #학습의 단계가 진행될 수록 계수의 값이 작아지는 문제를 해결
    #모든 경사의 제곱합을 평균 감쇄하여 재곱식으로 계산
    sgd = optimizers.SGD(lr=0.1, decay=1e-6, momentum=0.9,
                         nesterov=True)  #학습률을 처음에는 크게하였다 점점 줄임
    #다섯번째, 모델 컴파일하기
    optimizer = tf.keras.optimizers.RMSprop(lr=0.001,
                                            rho=0.9,
                                            epsilon=None,
                                            decay=0.0)
    # optimizer에 adagrad를 넣는다면 적응형 함수
    #classifer.compile(loss="mse",metrics=["mae"])
    classifer.compile(
        optimizer=tf.keras.optimizers.Adadelta(rho=0.95, lr=1.0, decay=0.0),
        loss='mse',
        metrics=['accuracy'
                 ])  #loss를 binary_crossentropy로 쓰기도 함/무조건 mse를 사용하여야 손실이 줄어듬

    #classifer.compile(optimizer=SGD(lr=0.1,momentum=0.9,nesterov=True,metrics=['accuracy']
    # loss='mse',metrics=['accuracy'])
    #케라스 모델 별 optimizer정리
    #sgd:확률적 경사 하강법/momentum학습률을 고정하고 매개변수를 조정
    #adagrad:학습률을 조정하여 학습
    #rmsprop,adadelta:adagrad를 보완
    #adam이 기본, spare~은 인코딩 필요없이 바로 가능, 측정함수는 모델의 성능 평가=>손실함수 사용가능

    return classifer
    MaxPooling2D(),

    Conv2D(128, 3, padding='same', activation='relu'),
    MaxPooling2D(),
    

    Flatten(),
    Dense(256, activation='relu'),
    Dropout(0.5),
    Dense(512, activation='relu'),
    Dropout(0.5),
    Dense(1, activation='sigmoid')
])
op = Adam(lr=0.0003)
model.compile(optimizer=op,
              loss='binary_crossentropy',
              metrics=['accuracy'])

history = model.fit_generator(
    train_data_gen,
    steps_per_epoch=total_train // batch_size,
    epochs=epochs,
    validation_data=val_data_gen,
    validation_steps=total_val // batch_size
)

model.summary()
# model.save(top_model_path)
# model.save_weights(top_model_weights_path)

Beispiel #31
0
#A second LSTM layer with 256 units.
#A further Dropout layer.
#A Dense layer to produce a single output.
#Use MSE as loss function.


model2 = Sequential()
model2.add(LSTM(input_shape = (window_size, 1),
               units = window_size,
               return_sequences = True))
model2.add(Dropout(0.5))
model2.add(LSTM(256))
model2.add(Dropout(0.5))
model2.add(Dense(1))
model2.add(Activation("linear"))
model2.compile(loss = "mse",
              optimizer = "adam")
print(model2.summary())

# Fit the model.
model2.fit(train_X,
          train_Y,
          batch_size = 512,
          epochs = 3,
          validation_split = 0.1)


pred_test = model2.predict(test_X)

# Apply inverse transformation to get back true values.
test_y_actual = scaler.inverse_transform(test_Y.values.reshape(test_Y.shape[0], 1))
Beispiel #32
0
                     shuffle=False)

# Instantiate Model
###################
clear_session()
model6 = Sequential()
model6.add(
    Conv1D(32, 5, activation='relu', input_shape=(None, df_values.shape[-1])))
model6.add(MaxPooling1D(3))
model6.add(Conv1D(32, 5, activation='relu'))
model6.add(MaxPooling1D(3))
model6.add(Conv1D(32, 5, activation='relu'))
model6.add(GlobalMaxPooling1D())
model6.add(Dense(1))

model6.compile(optimizer=RMSprop(), loss='mae', metrics=['mae'])
print(model6.summary())

# Train
#######
m2_callbacks = [
    # interrupt training when there is no more improvement.
    # patience=2 means interrupt training when accuracy has stopped improving
    # for more than 2 epochs. mae MUST be in the compile step in the metrics
    EarlyStopping(monitor='mae', patience=2),
    # saves the current weights after every epoch
    # only overwrite the model file when val_loss has improved
    ModelCheckpoint('weather__v6.h5', monitor='val_loss', save_best_only=True)
]
history6 = model6.fit(train_gen,
                      steps_per_epoch=500,
pickle_in = open("X.pickle","rb")
X = pickle.load(pickle_in)

pickle_in = open("y.pickle","rb")
y = pickle.load(pickle_in)

X = X/255.0

model = Sequential()
model.add(Conv2D(256, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())  # this converts our 3D feature maps to 1D feature vectors

model.add(Dense(64))

model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(X, y, batch_size=32, epochs=3, validation_split=0.3)

training=[]
output_empty=[0]*len(classes)
for document in documents:
    bag=[]
    word_patterns=document[0]
    word_patterns=[lemmatizer.lemmatize(word.lower()) for word in word_patterns]
    for word in words:
        bag.append(1) if word in word_patterns else bag.append(0)
    output_row=list(output_empty)
    output_row[classes.index(document[1])]=1
    training.append([bag,output_row])

random.shuffle(training)
training=numpy.array(training)

train_x=list(training[:,0])
train_y=list(training[:,1])

model=Sequential()
model.add(Dense(128,input_shape=(len(train_x[0]),),activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]),activation='softmax'))

sgd=SGD(lr=0.01,decay=1e-6,momentum=0.9,nesterov=True)
model.compile(loss='categorical_crossentropy',optimizer=sgd,metrics=['accuracy'])

hist=model.fit(numpy.array(train_x),numpy.array(train_y),epochs=200,batch_size=5,verbose=1)
model.save('chatbot_model.h5',hist)
print("Done")
Beispiel #35
0
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(LSTM(128, activation='tanh', input_shape=(train_x.shape[1:])))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))

model.add(Dense(2, activation="softmax"))

opt = tf.keras.optimizers.Adam(lr = 0.001, decay=1e-6)

model.compile(loss='sparse_categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])

tensorboard = TensorBoard(log_dir=f"logs/{NAME}")

# Save the Models
filepath = "RNN_Final-{epoch:02d}-{val_acc:.3f}"  # unique file name that will include the epoch and the validation acc for that epoch
checkpoint = ModelCheckpoint("models/{}.model".format(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max'))  # saves only the best ones

history = model.fit(
    train_x, train_y,
    batch_size=BATCH_SIZE,
    epochs = EPOCHS,
    validation_data=(validation_x, validation_y),
    callbacks=[tensorboard, checkpoint])
Beispiel #36
0
           input_shape=(28, 28, 1)))  # 28x28x1 -> 24x24x16

model.add(MaxPooling2D(pool_size=(2, 2)))  # 24x24x16 -> 12x12x16

model.add(
    Conv2D(64,
           kernel_size=(5, 5),
           activation='relu',
           kernel_initializer='he_normal'))  # 12x12x16 -> 8x8x64

model.add(MaxPooling2D(pool_size=(2, 2)))  # 8x8x64 -> 4x4x64

model.add(Flatten())  # 4x4x64-> 1024
model.add(Dense(10, activation='softmax'))  # 1024 -> 10

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer='adam',
              metrics=['accuracy'])

# 作成したモデルの確認
# SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))

early_stopping = EarlyStopping(patience=1, verbose=1)
model.fit(x=x_train,
          y=y_train,
          batch_size=128,
          epochs=100,
          verbose=1,
          validation_data=(x_test, y_test),
          callbacks=[early_stopping])
Beispiel #37
0
from tensorflow.keras.callbacks import TensorBoard

(X_train,y_train), (X_test, y_test) = mnist.load_data()

X_train = X_train.reshape(60000,28,28,1).astype('float32')
X_test = X_test.reshape(10000,28,28,1).astype('float32')

X_train /= 255
X_test /= 255

n_classes = 10
y_train = keras.utils.to_categorical(y_train, n_classes)
y_test = keras.utils.to_categorical(y_test, n_classes)

model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), activation='relu', input_shape=(28,28,1)) )
model.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

tensor_board = TensorBoard('./logs/LeNet-MNIST-1')

model.fit(X_train, y_train, batch_size=128, epochs=15, verbose=1, validation_data=(X_test,y_test), callbacks=[tensor_board])

Beispiel #38
0
dense1 = 70
dense2 = 25
dropout1 = 0.15
dropout2 = 0.2

model = Sequential()

model.add(Dense(dense1, activation="relu"))
model.add(Dropout(dropout1))

model.add(Dense(dense2, activation="relu"))
model.add(Dropout(dropout2))

model.add(Dense(1, activation="sigmoid"))

model.compile(Adam(lr=lr), loss="binary_crossentropy", metrics=["accuracy"])

log_dir = "logs\\fit\\" + f"lr={lr} dense ({dense1}, {dense2}) drop ({dropout1}, {dropout2}) " + str(
    int(time.time()))
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
model_saver = tf.keras.callbacks.ModelCheckpoint('saved_model',
                                                 monitor='accuracy',
                                                 verbose=1,
                                                 save_best_only=True,
                                                 save_weights_only=False,
                                                 mode='auto',
                                                 save_freq='epoch')

model.fit(X, y, epochs=150, callbacks=[model_saver])

print(model.evaluate(X, y))
Beispiel #39
0
from tensorflow.keras.layers import Dense, Activation


model = Sequential()


model.add(Dense(8,
                activation=keras.activations.sigmoid,
                ))
model.add(Dense(3,
                activation=keras.activations.sigmoid,
                ))

model.compile(
              optimizer=tf.train.AdamOptimizer(0.001),
              # loss=keras.losses.categorical_crossentropy,
              loss=keras.losses.mse,
              metrics=[keras.metrics.binary_accuracy]
              )

# This is the process I used to train my weights
model.fit(x_train, y_train, epochs=2000)
myWeights = model.get_weights()
np.set_printoptions(suppress=True)
np.set_printoptions(precision=2)
print('myWeights =', myWeights)

# These are the weights I got, pretty-printed
# myWeights = [
# #     # first layer, 7x8
#     array([[ 1.2 , -1.16, -1.97,  2.16,  0.97,  0.86, -1.2 ,  1.12],
#        [ 1.21, -1.17, -1.97,  2.16,  0.84,  0.76, -1.19,  1.22],
Beispiel #40
0
y = df_new_columns['survived']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)

# Нейронная сеть
classifier = Sequential()
classifier.add(
    Dense(units=32,
          kernel_initializer='uniform',
          activation='relu',
          input_dim=6))
# classifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu'))
# classifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))
classifier.add(
    Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
classifier.compile(optimizer='rmsprop',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])

# Выполняем обучение на тренировочных данных
classifier.fit(X_train, y_train, batch_size=128, epochs=100)

# Определяем точность модели
score = classifier.evaluate(X_test, y_test)
print(score)

# Альтернативный вариант определения точности модели
prediction = classifier.predict(X).tolist()
y_prediction = pd.Series(prediction)
df_new_columns['y_prediction'] = y_prediction
df_new_columns['y_prediction'] = df_new_columns['y_prediction'].str.get(0)