learningRate = 0.001
decay = 1e-6

"""# Preprocessing 
dividing all images by 255
"""

xTrain = xTrain / 255.0
xTest = xTest / 255.0

"""# Building model"""

# Encoder
encoderInput = Input(shape=(28, 28, 1))         # 784 features
x = Flatten()(encoderInput)
x = Dense(128, activation='relu')(x)            # 128 features
encoderOutput = Dense(64, activation='relu')(x) # 64 features
encoder = keras.Model(encoderInput, encoderOutput, name="encoder")     # encoder model

# Decoder
decoderInput = Dense(128, activation='relu')(encoderOutput)
x = Dense(28*28*1, activation='relu')(decoderInput)
decoderOutput = Reshape((28, 28, 1))(x)

# Autoencoder
autoencoder = keras.Model(encoderInput, decoderOutput, name="autoencoder")
autoencoder.summary()

"""# Training model"""

optimizer = Adam(learningRate, decay)
Exemple #2
0
plt.figure(figsize=(10,8))
for i in range(25):
    plt.subplot(5,5,i+1)
    plt.xticks([])
    plt.yticks([])
    plt.grid(False)
    plt.imshow(x_test[i], cmap=plt.cm.binary)
    plt.xlabel(class_names[y_test[i].argmax()])
plt.show()
'''

input_layer = Input((28, 28))

x = Flatten()(input_layer)

x = Dense(128, activation='relu')(x)

output_layer = Dense(NUM_CLASSES, activation='softmax')(x)

#model = Model(input_layer, output_layer)
model = keras.Sequential([
    keras.layers.Conv2D(32, (3, 3), activation='relu',
                        input_shape=(28, 28, 1)),
    keras.layers.MaxPooling2D((2, 2)),
    keras.layers.Conv2D(64, (3, 3), activation='relu'),
    keras.layers.MaxPooling2D((2, 2)),
    keras.layers.Conv2D(64, (3, 3), activation='relu'),
    keras.layers.Flatten(),
    keras.layers.Dense(64, activation='relu'),
    keras.layers.Dense(10, activation='softmax')
])
Exemple #3
0
           activation='relu',
           input_shape=x_train[0].shape))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(96, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())  # Flattening the 2D arrays for fully connected layers

model.add(Dense(512, activation='relu'))
model.add(Dropout(0.4))

model.add(Dense(256, activation='relu'))
model.add(Dropout(0.2))

model.add(Dense(len(CATEGORIES), activation='softmax'))

opt = keras.optimizers.Adam(learning_rate=1e-4)
model.compile(optimizer=opt,
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

# Import the early stopping callback
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
# Define a callback to monitor val_acc
modelcnn.add(Conv1D(
    128,
    3,
    padding='same',
))
modelcnn.add(Activation('tanh'))
modelcnn.add(Conv1D(
    512,
    3,
    padding='same',
))
modelcnn.add(Dropout(0.5))
modelcnn.add(Activation('tanh'))
modelcnn.add(Flatten())
modelcnn.add(Dense(len(pd.unique(y))))
modelcnn.add(Activation('softmax'))
opt = tensorflow.keras.optimizers.Adam(lr=0.00001, decay=1e-6)

modelcnn.compile(loss='sparse_categorical_crossentropy',
                 optimizer=opt,
                 metrics=['accuracy'])

cnnhistory = modelcnn.fit(X_trainn,
                          y_train,
                          batch_size=16,
                          epochs=50,
                          validation_data=(X_vall, y_val))

# Check out our train accuracy and validation accuracy over epochs.
train_accuracy = cnnhistory.history['accuracy']
from sklearn.preprocessing import OneHotEncoder

y_train = y_train.reshape(-1,1) # reshape에서 -1은 재배열의 의미이다.
y_test = y_test.reshape(-1,1)

ohencoder = OneHotEncoder()
ohencoder.fit(y_train)
y_train = ohencoder.transform(y_train).toarray()
y_test = ohencoder.transform(y_test).toarray()

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM

model = Sequential()
model.add(LSTM(50, input_shape=(28, 28)))
model.add(Dense(40, activation='relu'))
model.add(Dense(30, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='softmax'))

model.summary()

# 3. 컴파일, 훈련
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
from tensorflow.keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='loss', patience=10, mode='auto')
model.fit(x_train, y_train, batch_size=128, epochs=70, validation_split=0.2, callbacks=[early_stopping])

# 4. 평가, 예측
loss, acc = model.evaluate(x_test, y_test, batch_size=128)
y_pred = model.predict(x_test[:-10])
    directory="../stanford_dataSet/data/test2/",
    target_size=(input_size, input_size),
    color_mode="rgb",
    batch_size=batch_size,
    class_mode="categorical",
    shuffle=False,
    seed=42)

y_train = train_generator.classes
y_valid = valid_generator.classes
y_train = np_utils.to_categorical(y_train)
y_valid = np_utils.to_categorical(y_valid)

model = Sequential()
model.add(GlobalAveragePooling2D(input_shape=train_inception_v3.shape[1:]))
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(120, activation='softmax'))

from keras.models import model_from_json
json_file = open('../stanford_dataSet/data/model_inception_v3.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)

loaded_model.load_weights("../stanford_dataSet/data/model_inception_v3.h5")
print("Loaded model from disk")

optimizer = RMSprop(lr=0.0001, rho=0.99)
Exemple #7
0
    X = pickle.load(file)

with open("y.pickle", 'rb') as file:
    y = pickle.load(file)

# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)

lr = 4e-4
dense1 = 70
dense2 = 25
dropout1 = 0.15
dropout2 = 0.2

model = Sequential()

model.add(Dense(dense1, activation="relu"))
model.add(Dropout(dropout1))

model.add(Dense(dense2, activation="relu"))
model.add(Dropout(dropout2))

model.add(Dense(1, activation="sigmoid"))

model.compile(Adam(lr=lr), loss="binary_crossentropy", metrics=["accuracy"])

log_dir = "logs\\fit\\" + f"lr={lr} dense ({dense1}, {dense2}) drop ({dropout1}, {dropout2}) " + str(
    int(time.time()))
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
model_saver = tf.keras.callbacks.ModelCheckpoint('saved_model',
                                                 monitor='accuracy',
                                                 verbose=1,
            data = f.read()
            words = set(text_to_word_sequence(data))
            result = one_hot(data, round(len(words) * 1.3))
            m.append(result)

    m = pad_sequences(m, maxlen=2000)
    return m

posarray = get_array_from_directory('D:/AI&ML/data/aclImdb_v1/aclImdb/train/pos')
negarray = get_array_from_directory('D:/AI&ML/data/aclImdb_v1/aclImdb/train/neg')

poslabelarr = np.zeros(len(posarray))
neglabelarr = np.ones(len(negarray))

x_train = np.concatenate((posarray, negarray))

y_train = np.concatenate((poslabelarr, neglabelarr))
y_train = keras.utils.to_categorical(y_train, 2)

model = Sequential()
model.add(Embedding(2000, 128))
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(2, activation='sigmoid'))

model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

print(model.summary())

model.fit(x_train, y_train, epochs=25)
model.save('model.h5')
Exemple #9
0
    class_x_data = np.hstack((x1_data, x2_data))
    class_y_data = class_idx * np.ones((n_data, 1))

    x_data = np.vstack((x_data, class_x_data)).astype(np.float32)
    y_data = np.vstack((y_data, class_y_data)).astype(np.int32)
print('x_data.shape: {}, y_data.shape: {}'.format(x_data.shape, y_data.shape))

#input_visualization()
#plt.show()

train_ds = tf.data.Dataset.from_tensor_slices((x_data, y_data))
train_ds = train_ds.shuffle(1000).batch(8)

# Model
model = Sequential()
model.add(Dense(n_class))
model.add(Activation('softmax'))
#model.summary()

loss_object = SparseCategoricalCrossentropy()
optimizer = Adam(learning_rate=0.01)

train_loss = Mean()
train_acc = SparseCategoricalAccuracy()

EPOCHS = 10

## Training
for epoch in range(EPOCHS):
    trainer()
    training_reporter()
Exemple #10
0
scaled_train_samples = scaler.fit_transform(train_samples.reshape(-1, 1))

# for i in scaled_train_samples:
#     print(i)

#%% Simple tf.keras Sequential Model

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation, Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import categorical_crossentropy

#%% Set up model

model = Sequential([
    Dense(units=16, input_shape=(1, ), activation='relu'),
    # Dense(units=8, activation='relu'),
    Dense(units=32, activation='relu'),
    # Dense(units=2048, activation='relu'),
    Dense(units=2, activation='softmax')
])

model.summary()

model.compile(optimizer=Adam(learning_rate=0.0001),
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

import datetime
import os
# log_dir = os.getcwd() + "\\logs\\fit\\"
Exemple #11
0
 def __init__(self):
     super().__init__()
     self.add(Flatten())
     self.add(Dense(1, activation=sigmoid))
     self.add(Reshape([1]))
Exemple #12
0
            # Building the Model
            model = Sequential()

            model.add(Conv2D(layer_size, (3, 3), input_shape=X.shape[1:]))
            model.add(Activation('relu'))
            model.add(MaxPooling2D(pool_size=(2, 2)))

            for l in range(conv_layer - 1):
                model.add(Conv2D(layer_size, (3, 3)))
                model.add(Activation('relu'))
                model.add(MaxPooling2D(pool_size=(2, 2)))

            model.add(Flatten())

            for l in range(dense_layer):
                model.add(Dense(512))
                model.add(Activation('relu'))
                model.add(Dropout(0.2))

            model.add(Dense(1))
            model.add(Activation('sigmoid'))

            model.compile(loss='binary_crossentropy',
                          optimizer='adam',
                          metrics=['accuracy'])

            model.fit(X,
                      y,
                      batch_size=32,
                      epochs=10,
                      validation_split=0.3,
# print('===================')
# print(x[:5])
# print(y[:10])

# print(np.max(x), np.min(x))
# print(dataset.feature_names)
# print(dataset.DESCR)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state= 104, shuffle=True)

#2 모델구성
from tensorflow.keras.models import Sequential,Model
from tensorflow.keras.layers import Input,Dense

input1 = Input(shape=(13,))
dense1 = Dense(120, activation='relu')(input1)
dense1 = Dense(80)(dense1)
dense1 = Dense(60)(dense1)
dense1 = Dense(30)(dense1)
dense1 = Dense(7)(dense1)
dense1 = Dense(7)(dense1)
dense1 = Dense(5)(dense1)
dense1 = Dense(4)(dense1)
output1 = Dense(1)(dense1)
model = Model(inputs = input1, outputs = output1)

#3 컴파일 훈련
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
model.fit(x_train, y_train, epochs=1000, batch_size=4, validation_split=0.2, verbose=1)

#4평가 예측
Exemple #14
0
    (train_data, test_data, train_labels,
     test_labels) = train_test_split(data, labels, test_size=0.2)

    # train_data = data
    # test_data = data
    # train_labels = labels
    # test_labels = labels

    label_lb = LabelBinarizer()
    label_lb.fit(tags)
    train_labels = label_lb.transform(train_labels)
    test_labels = label_lb.transform(test_labels)

    model = Sequential([
        Dense(128, input_shape=(len(train_data[0]), ), activation='relu'),
        Dense(64, activation='relu'),
        Dropout(0.5),
        Dense(len(train_labels[0]), activation='softmax')
    ])

    model.compile(optimizer='adam',
                  loss=CategoricalCrossentropy(),
                  metrics=['accuracy'])

    time_start = time.time()
    history = model.fit(train_data,
                        train_labels,
                        epochs=30,
                        batch_size=5,
                        validation_data=(test_data, test_labels),
Exemple #15
0
eb_layer = Embedding(maxwords,
                     embed_dim,
                     embeddings_initializer=Constant(embedding_matrix),
                     input_length=maxseqlen,
                     mask_zero=True,
                     trainable=False)
model = Sequential()
model.add(eb_layer)
model.add(SpatialDropout1D(0.30))
model.add(
    Bidirectional(
        LSTM(lstm_out,
             dropout=0.2,
             recurrent_dropout=0.2,
             return_sequences=True)))
model.add(TimeDistributed(Dense(2, activation="softmax")))

adam = optimizers.Adam(learning_rate=0.001)
model.compile(loss="binary_crossentropy",
              optimizer=adam,
              metrics=["acc"],
              sample_weight_mode="temporal")
print(model.summary())

batch_size = 32
#class_weights = {0: 1., 1: 5.}
weighting = 4
model.fit(x_train,
          y_train,
          validation_split=0.1,
          epochs=10,
        x = Embedding(self.n_item, self.n_factors, embeddings_initializer='he_normal',
                      embeddings_regularizer=l2(1e-6))(x)
        x = Reshape((self.n_factors,))(x)
        return x


user = Input(shape=(1,))
u = EmbeddingLayer(n_users, n_factors)(user)

movie = Input(shape=(1,))
m = EmbeddingLayer(n_movies, n_factors)(movie)

x = Concatenate()([u, m])
x = Dropout(0.05)(x)

x = Dense(10, kernel_initializer='he_normal')(x)
x = Activation('relu')(x)
x = Dropout(0.5)(x)

x = Dense(1, kernel_initializer='he_normal')(x)
x = Activation('sigmoid')(x)
x = Lambda(lambda x: x * (max_rating - min_rating) + min_rating)(x)

model = Model(inputs=[user, movie], outputs=x)
opt = Adam(lr=0.001)
model.compile(loss='mean_squared_error', optimizer=opt)

print(model.summary())

history = model.fit(x=X_train_array, y=y_train, batch_size=64, epochs=5, verbose=1,
                    validation_data=(X_test_array, y_test))
    Conv2D(16, 3, padding='same', activation='relu', 
           input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
    MaxPooling2D(),

    Conv2D(32, 3, padding='same', activation='relu'),
    MaxPooling2D(),

    Conv2D(64, 3, padding='same', activation='relu'),
    MaxPooling2D(),

    Conv2D(128, 3, padding='same', activation='relu'),
    MaxPooling2D(),
    

    Flatten(),
    Dense(256, activation='relu'),
    Dropout(0.5),
    Dense(512, activation='relu'),
    Dropout(0.5),
    Dense(1, activation='sigmoid')
])
op = Adam(lr=0.0003)
model.compile(optimizer=op,
              loss='binary_crossentropy',
              metrics=['accuracy'])

history = model.fit_generator(
    train_data_gen,
    steps_per_epoch=total_train // batch_size,
    epochs=epochs,
    validation_data=val_data_gen,
Exemple #18
0
                      1)))  #Usamos LSTM que é um tipo de camada de recorrencia
regressor.add(Dropout(0.3))

regressor.add(LSTM(units=50, return_sequences=True))
regressor.add(Dropout(0.3))

regressor.add(LSTM(units=50, return_sequences=True))
regressor.add(Dropout(0.3))

regressor.add(
    LSTM(units=50)
)  #Colocamos return_sequences =True quando vamos passar os dados de uma camada recorrente para outra recorrente, no caso a proxima é densa
regressor.add(Dropout(0.3))

regressor.add(
    Dense(units=1, activation='linear')
)  #Camada de saida com funçao linear, nao queremos fazer nenhuma transformaçao na saida

regressor.compile(
    optimizer='rmsprop',
    loss='mean_squared_error',
    metrics=['mean_absolute_error'
             ])  #RMSprop é o optimizer mais recomendado para Redes Recorrentes
regressor.fit(previsores, preco_real, epochs=100, batch_size=32)

# ==== Estrutura dos dados de teste ====
base_teste = pd.read_csv('petr4_teste.csv')
preco_real_teste = base_teste.iloc[:, 1:
                                   2].values  #Pegamos somente a primeira coluna
base_completa = pd.concat(
    (base['Open'], base_teste['Open']),
Exemple #19
0
    def __init__(self, num_class=2, weight_decay=0.005):
        super(C3D_Model, self).__init__()
        # 64, 128, 128, 256, 256
        self.weight_decay = weight_decay
        self.conv3d_1 = Conv3D(64, (3, 3, 3),
                               strides=(1, 1, 1),
                               padding='same',
                               activation='relu',
                               kernel_regularizer=l2(self.weight_decay))
        self.maxpooling3d_1 = MaxPooling3D((2, 2, 1),
                                           strides=(2, 2, 1),
                                           padding='same')

        self.conv3d_2 = Conv3D(128, (3, 3, 3),
                               strides=(1, 1, 1),
                               padding='same',
                               activation='relu',
                               kernel_regularizer=l2(self.weight_decay))
        self.maxpooling3d_2 = MaxPooling3D((2, 2, 2),
                                           strides=(2, 2, 2),
                                           padding='same')

        self.conv3d_3 = Conv3D(128, (3, 3, 3),
                               strides=(1, 1, 1),
                               padding='same',
                               activation='relu',
                               kernel_regularizer=l2(self.weight_decay))
        self.maxpooling3d_3 = MaxPooling3D((2, 2, 2),
                                           strides=(2, 2, 2),
                                           padding='same')

        self.conv3d_4 = Conv3D(256, (3, 3, 3),
                               strides=(1, 1, 1),
                               padding='same',
                               activation='relu',
                               kernel_regularizer=l2(self.weight_decay))
        self.maxpooling3d_4 = MaxPooling3D((2, 2, 2),
                                           strides=(2, 2, 2),
                                           padding='same')

        self.conv3d_5 = Conv3D(256, (3, 3, 3),
                               strides=(1, 1, 1),
                               padding='same',
                               activation='relu',
                               kernel_regularizer=l2(self.weight_decay))
        self.maxpooling3d_5 = MaxPooling3D((2, 2, 2),
                                           strides=(2, 2, 2),
                                           padding='same')

        self.flatten = Flatten()
        self.dropout_0 = Dropout(0.5)
        self.dense_1 = Dense(1024,
                             activation='relu',
                             kernel_regularizer=l2(self.weight_decay))
        self.dropout_1 = Dropout(0.5)
        self.dense_2 = Dense(512,
                             activation='relu',
                             kernel_regularizer=l2(self.weight_decay))
        self.dropout_2 = Dropout(0.5)
        self.output_tensor = Dense(num_class,
                                   activation='softmax',
                                   kernel_regularizer=l2(self.weight_decay))
Exemple #20
0
def f(env_name):
    import time
    # importing frameworks/libraries
    import tensorflow as tf
    from tensorflow.keras.layers import Input, Dense, Lambda
    from tensorflow.keras.models import Model
    import pandas as pd
    import matplotlib.pyplot as plt
    import gym
    import numpy as np
    from timeit import default_timer as timer

    import pybulletgym
    import pybullet_envs
    '''gpus = tf.config.experimental.list_physical_devices('GPU')
    tf.config.experimental.set_virtual_device_configuration(
        gpus[0],
        [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024)])'''
    tf.keras.backend.set_floatx('float64')
    # declaring constants
    gamma = 0.99
    upds = [64, 256, 1024]
    lamb = 0.95
    # max_episodes = 3000
    tot_steps = 1000000
    env = gym.make(env_name)
    env.seed(15)
    np.random.seed(24)
    tf.random.set_seed(34)
    node = 64
    policy_learning_rates = [1e-1, 1e-2, 1e-3, 1e-4]
    mult_lrs = [1, 2]

    # getting the dimension of the environment
    state_dim = env.observation_space.shape[0]
    action_dim = env.action_space.shape[0]
    action_bound = env.action_space.high[0]
    # act_bound_low = env.action_space.low[0]
    # print(act_bound, act_bound_low)
    std_bound = [1e-2, 1.0]

    for upd in upds:
        for policy_lr in policy_learning_rates:
            for mult_lr in mult_lrs:
                # models
                # policy model
                state_input = Input((state_dim, ))
                dense_1 = Dense(node, activation='relu')(state_input)
                dense_2 = Dense(node, activation='relu')(dense_1)
                out_mu = Dense(action_dim, activation='tanh')(dense_2)
                mu_output = Lambda(lambda x: x * action_bound)(out_mu)
                std_output = Dense(action_dim, activation='softplus')(dense_2)
                policy_model = tf.keras.models.Model(state_input,
                                                     [mu_output, std_output])
                policy_model_optimize = tf.keras.optimizers.Adam(policy_lr)

                # value model
                value_model = tf.keras.Sequential([
                    Input((state_dim, )),
                    Dense(node, activation='relu'),
                    Dense(node, activation='relu'),
                    Dense(1, activation='linear')
                ])
                value_lr = mult_lr * policy_lr
                value_model_optimizer = tf.keras.optimizers.Adam(value_lr)

                ep_score = 0
                ep_step = 0
                episode = 0
                scores = []
                steps = []

                state_batch = []
                action_batch = []
                td_target_batch = []
                advantage_batch = []
                done = False

                state = env.reset()

                print(
                    "Experiment is starting to learn {} using A2C with a policy learning rate {} and value learning rate {} and upd {}."
                    .format(env_name, policy_lr, value_lr, upd))
                start = timer()

                for step in range(tot_steps):
                    state = np.reshape(state, [1, state_dim])
                    mu, std = policy_model.predict(state)
                    action = np.random.normal(mu[0], std[0], size=action_dim)
                    action = np.clip(action, -action_bound, action_bound)

                    next_state, reward, done, _ = env.step(action)

                    state = np.reshape(state, [1, state_dim])
                    action = np.reshape(action, [1, action_dim])
                    next_state = np.reshape(next_state, [1, state_dim])
                    reward = np.reshape(reward, [1, 1])

                    if done:
                        td_target = reward
                    else:
                        v_value = value_model.predict(
                            (np.reshape(next_state, [1, state_dim])))
                        td_target = np.reshape(reward + gamma * v_value[0],
                                               [1, 1])

                    advantage = td_target - value_model.predict(state)

                    state_batch.append(state)
                    action_batch.append(action)
                    td_target_batch.append(td_target)
                    advantage_batch.append(advantage)

                    if len(state_batch) >= upd or done:

                        states_arr = state_batch[0]
                        for elem in state_batch[1:]:
                            states_arr = np.append(states_arr, elem, axis=0)

                        actions_arr = action_batch[0]
                        for elem in action_batch[1:]:
                            actions_arr = np.append(actions_arr, elem, axis=0)

                        td_targets_arr = td_target_batch[0]
                        for elem in td_target_batch[1:]:
                            td_targets_arr = np.append(td_targets_arr,
                                                       elem,
                                                       axis=0)

                        advantages_arr = advantage_batch[0]
                        for elem in advantage_batch[1:]:
                            advantages_arr = np.append(advantages_arr,
                                                       elem,
                                                       axis=0)

                        # gradient and loss calculation
                        # policy
                        with tf.GradientTape() as policy_tape:
                            mus, stds = policy_model(states_arr, training=True)
                            stds = tf.clip_by_value(stds, std_bound[0],
                                                    std_bound[1])
                            vars = stds**2
                            log_policy_pdf = -0.5 * (actions_arr - mus) ** 2 / \
                                             vars - 0.5 * tf.math.log(vars * 2 * np.pi)
                            log_policy_pdf = tf.reduce_sum(log_policy_pdf,
                                                           1,
                                                           keepdims=True)

                            policy_loss = log_policy_pdf * advantages_arr
                            policy_loss = tf.reduce_sum(-policy_loss)
                        policy_grads = policy_tape.gradient(
                            policy_loss, policy_model.trainable_variables)
                        policy_model_optimize.apply_gradients(
                            zip(policy_grads,
                                policy_model.trainable_variables))

                        # value
                        with tf.GradientTape() as value_tape:
                            v_pred = value_model(states_arr, training=True)
                            assert v_pred.shape == td_targets_arr.shape
                            mse = tf.keras.losses.MeanSquaredError()
                            value_loss = mse(v_pred,
                                             tf.stop_gradient(td_targets_arr))
                        value_grads = value_tape.gradient(
                            value_loss, value_model.trainable_variables)
                        value_model_optimizer.apply_gradients(
                            zip(value_grads, value_model.trainable_variables))

                        state_batch = []
                        action_batch = []
                        td_target_batch = []
                        advantage_batch = []

                    ep_score += reward[0][0]
                    ep_step += 1
                    state = next_state[0]

                    if done:
                        scores.append(ep_score)
                        steps.append(ep_step)
                        episode += 1
                        print("Episode: {} Step: {} Reward: {}".format(
                            episode, step, ep_score))
                        ep_score = 0
                        ep_step = 0
                        state = env.reset()

                end = timer()

                print(
                    'Experiment is finished to learn {} using A2C with a policy learning rate {} and value learning rate {} and upd {} after {} hours.'
                    .format(env_name, policy_lr, value_lr, upd,
                            (end - start) / 3600.0))

                dict = {'r': scores, 'l': steps}
                df = pd.DataFrame(dict)
                logdir = 'logs/a2c/envs/' + env_name + '/lrs/'
                fname = 'plr=' + str(policy_lr) + 'mult=' + str(
                    mult_lr) + 'upd=' + str(upd) + '.csv'
                os.makedirs(logdir, exist_ok=True)
                df.to_csv(logdir + fname)

    for upd in upds:
        for policy_lr in policy_learning_rates:
            for mult_lr in mult_lrs:
                df = pd.read_csv('logs/a2c/envs/' + env_name + '/lrs/plr=' +
                                 str(policy_lr) + 'mult=' + str(mult_lr) +
                                 'upd=' + str(upd) + '.csv')
                x = np.cumsum(df['l'])
                plt.scatter(x,
                            df['r'],
                            s=5,
                            label='plr=' + str(policy_lr) + 'mult=' +
                            str(mult_lr),
                            alpha=0.4)
                plt.legend()

    plt.xlabel('Steps')
    plt.ylabel('Episodic Score')
    plt.title('Experiment on ' + env_name)

    logdir = 'logs/a2c/envs/' + env_name + '/lrs/'
    fname = env_name + '_opt.png'
    plt.savefig(logdir + fname)
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model

# this is the size of our encoded representations
encoding_dim = 64  # 32 floats -> compression of factor 24.5, assuming the input is 784 floats

# this is our input placeholder
input_img = Input(shape=(784,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu')(input_img)
# this model maps an input to its encoded representation
encoder = Model(input_img, encoded)


# "decoded" is the lossy reconstruction of the input
decoded = Dense(784, activation='sigmoid')(encoded)

# this model maps an input to its reconstruction
autoencoder = Model(input_img, decoded)

# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer(encoded_input))

autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')

from tensorflow.keras.datasets import mnist
Exemple #22
0
def get_age_model(DATA):

    feed_forward_size = 2048
    max_seq_len = 150
    model_dim = 256 + 256 + 64 + 32 + 8 + 16

    input_creative_id = Input(shape=(max_seq_len, ), name='creative_id')
    x1 = Embedding(
        input_dim=NUM_creative_id + 1,
        output_dim=256,
        weights=[DATA['creative_id_emb']],
        trainable=args.not_train_embedding,
        #    trainable=False,
        input_length=150,
        mask_zero=True)(input_creative_id)
    # encodings = PositionEncoding(model_dim)(x1)
    # encodings = Add()([embeddings, encodings])

    input_ad_id = Input(shape=(max_seq_len, ), name='ad_id')
    x2 = Embedding(
        input_dim=NUM_ad_id + 1,
        output_dim=256,
        weights=[DATA['ad_id_emb']],
        trainable=args.not_train_embedding,
        #    trainable=False,
        input_length=150,
        mask_zero=True)(input_ad_id)

    input_product_id = Input(shape=(max_seq_len, ), name='product_id')
    x3 = Embedding(
        input_dim=NUM_product_id + 1,
        output_dim=32,
        weights=[DATA['product_id_emb']],
        trainable=args.not_train_embedding,  #
        #    trainable=False,
        input_length=150,
        mask_zero=True)(input_product_id)

    input_advertiser_id = Input(shape=(max_seq_len, ), name='advertiser_id')
    x4 = Embedding(
        input_dim=NUM_advertiser_id + 1,
        output_dim=64,
        weights=[DATA['advertiser_id_emb']],
        trainable=args.not_train_embedding,  #
        #    trainable=False,
        input_length=150,
        mask_zero=True)(input_advertiser_id)

    input_industry = Input(shape=(max_seq_len, ), name='industry')
    x5 = Embedding(
        input_dim=NUM_industry + 1,
        output_dim=16,
        weights=[DATA['industry_emb']],
        trainable=args.not_train_embedding,
        #    trainable=False,
        input_length=150,
        mask_zero=True)(input_industry)

    input_product_category = Input(shape=(max_seq_len, ),
                                   name='product_category')
    x6 = Embedding(
        input_dim=NUM_product_category + 1,
        output_dim=8,
        weights=[DATA['product_category_emb']],
        trainable=args.not_train_embedding,
        #    trainable=False,
        input_length=150,
        mask_zero=True)(input_product_category)

    # (bs, 100, 128*2)
    encodings = layers.Concatenate(axis=2)([x1, x2, x3, x4, x5, x6])
    # (bs, 100)
    masks = tf.equal(input_creative_id, 0)

    # (bs, 100, 128*2)
    attention_out = MultiHeadAttention(
        8, 79)([encodings, encodings, encodings, masks])

    # Add & Norm
    attention_out += encodings
    attention_out = LayerNormalization()(attention_out)
    # Feed-Forward
    ff = PositionWiseFeedForward(model_dim, feed_forward_size)
    ff_out = ff(attention_out)
    # Add & Norm
    # ff_out (bs, 100, 128),但是attention_out是(bs,100,256)
    ff_out += attention_out
    encodings = LayerNormalization()(ff_out)
    encodings = GlobalMaxPooling1D()(encodings)
    encodings = Dropout(0.2)(encodings)

    # output_gender = Dense(2, activation='softmax', name='gender')(encodings)
    output_age = Dense(10, activation='softmax', name='age')(encodings)

    model = Model(inputs=[
        input_creative_id, input_ad_id, input_product_id, input_advertiser_id,
        input_industry, input_product_category
    ],
                  outputs=[output_age])

    model.compile(
        optimizer=optimizers.Adam(2.5e-4),
        loss={
            # 'gender': losses.CategoricalCrossentropy(from_logits=False),
            'age': losses.CategoricalCrossentropy(from_logits=False)  #
        },
        # loss_weights=[0.4, 0.6],
        metrics=['accuracy'])
    return model
x_train = x_train.reshape(x_train.shape[0], 30, 1)
x_test = x_test.reshape(x_test.shape[0], 30, 1)


# 2. 모델 구성
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv1D, MaxPooling1D, Dense, Flatten, Dropout

model = Sequential()
model.add(Conv1D(400, 3, padding='same', input_shape=(30, 1)))
model.add(Dropout(0.2))
model.add(MaxPooling1D(2))
model.add(Conv1D(300, 3))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(200, activation='sigmoid'))
model.add(Dropout(0.2))
model.add(Dense(120, activation='sigmoid'))
model.add(Dense(60, activation='sigmoid'))
model.add(Dense(30, activation='sigmoid'))
model.add(Dense(1, activation='sigmoid'))

# 3. 컴파일, 훈련
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
early_stopping = EarlyStopping(monitor='val_loss', patience=30, mode='auto')
modelpath= '../data/modelcheckpoint/k54_conv1d_cancer_checkpoint.hdf5'
cp = ModelCheckpoint(modelpath, monitor='val_loss', save_best_only=True, mode='auto')
model.fit(x_train, y_train, epochs=400, validation_split=0.2, callbacks=[early_stopping, cp], batch_size=16)

# 4. 평가, 예측
Exemple #24
0
model.add(Dropout(0.2))

model.add(Conv2D(64, (3, 3), padding='same',
                 activation='relu'))  #padding default=valid
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=2))  #pool_size default=2
model.add(Dropout(0.2))

model.add(Conv2D(128, (3, 3), padding='same',
                 activation='relu'))  #padding default=valid
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=2))  #pool_size default=2
model.add(Dropout(0.2))

model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))  #ouput

#3. 컴파일, 훈련
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau

es = EarlyStopping(monitor='loss', patience=10, mode='auto')
r_lr = ReduceLROnPlateau(monitor='val_loss', patience=3, factor=0.5, verbose=1)

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

#batch_size 무조건 32가 잘 먹히진 안음...
#특히 이미지 파일의 경우... 한번에 많이 줘야 학습을 잘하지 않을까...
Exemple #25
0
def build_model(
    img_shape: Tuple[int, int, int],
    num_classes: int,
    optimizer: tf.keras.optimizers.Optimizer,
    learning_rate: float,
    filter_block1: int,
    kernel_size_block1: int,
    filter_block2: int,
    kernel_size_block2: int,
    filter_block3: int,
    kernel_size_block3: int,
    dense_layer_size: int,
    kernel_initializer: tf.keras.initializers.Initializer,
    activation_cls: tf.keras.layers.Activation,
    dropout_rate: float
) -> Model:
    input_img = Input(shape=img_shape)

    x = Conv2D(
        filters=filter_block1,
        kernel_size=kernel_size_block1,
        padding="same",
        kernel_initializer=kernel_initializer
    )(input_img)
    x = activation_cls(x)
    x = Conv2D(
        filters=filter_block1,
        kernel_size=kernel_size_block1,
        padding="same", kernel_initializer=kernel_initializer
    )(x)
    if dropout_rate:
        x = Dropout(rate=dropout_rate)(x)
    x = activation_cls(x)
    x = MaxPool2D()(x)

    x = Conv2D(
        filters=filter_block2,
        kernel_size=kernel_size_block2,
        padding="same",
        kernel_initializer=kernel_initializer
    )(x)
    x = activation_cls(x)
    x = Conv2D(
        filters=filter_block2,
        kernel_size=kernel_size_block2,
        padding="same",
        kernel_initializer=kernel_initializer
    )(x)
    if dropout_rate:
        x = Dropout(rate=dropout_rate)(x)
    x = activation_cls(x)
    x = MaxPool2D()(x)

    x = Conv2D(
        filters=filter_block3,
        kernel_size=kernel_size_block3,
        padding="same",
        kernel_initializer=kernel_initializer
    )(x)
    x = activation_cls(x)
    x = Conv2D(
        filters=filter_block3,
        kernel_size=kernel_size_block3,
        padding="same",
        kernel_initializer=kernel_initializer
    )(x)
    if dropout_rate:
        x = Dropout(rate=dropout_rate)(x)
    x = activation_cls(x)
    x = MaxPool2D()(x)

    x = Flatten()(x)
    x = Dense(
        units=dense_layer_size,
        kernel_initializer=kernel_initializer
    )(x)
    x = activation_cls(x)
    x = Dense(
        units=num_classes,
        kernel_initializer=kernel_initializer
    )(x)
    y_pred = Activation("softmax")(x)

    model = Model(
        inputs=[input_img],
        outputs=[y_pred]
    )

    opt = optimizer(learning_rate=learning_rate)

    model.compile(
        loss="categorical_crossentropy",
        optimizer=opt,
        metrics=["accuracy"]
    )

    return model
    def make_autoencoder_model(self, d):
    
        #################################################
        # Hyper-parameters
        #################################################
        encoding_dim = 2
        hidden_dim = self.hidden_dim
        architecture = self.architecture

        #activation='elu'
        activation='leaky_relu'
        optimizer=self.optimizer
        loss=self.loss        
        #optimizer=optimizers.Adam(lr=0.01)
        #loss='mean_squared_error'
        #################################################

        out_activation = 'sigmoid' if loss=='binary_crossentropy' else 'linear'
        activation = None if activation=='leaky_relu' else activation

        #################################################
        # Auto-encoder
        #################################################
        input_img = Input(shape=(d,))


        ####### Encoder #######
        encoded = input_img        
        #   Layer
        if architecture >= 1:
            encoded = Dense(hidden_dim, activation=activation)(encoded)
            if activation is None: encoded = layers.LeakyReLU()(encoded)

        #   Layer
        if architecture >= 2:        
            encoded = Dense(hidden_dim//2, activation=activation)(encoded)
            if activation is None: encoded = layers.LeakyReLU()(encoded)
            
        #   Layer
        if architecture >= 3:
            encoded = Dense(hidden_dim//4, activation=activation)(encoded)
            if activation is None: encoded = layers.LeakyReLU()(encoded)                    

        #   Layer
        encoded = Dense(encoding_dim, activation=out_activation)(encoded)


        ####### Decoder #######
        decoded = encoded
        #   Layer
        if architecture >= 3:        
            decoded = Dense(hidden_dim//4, activation=activation)(decoded)
            if activation is None: decoded = layers.LeakyReLU()(decoded)   
                    
        #   Layer
        if architecture >= 2:        
            decoded = Dense(hidden_dim//2, activation=activation)(decoded)
            if activation is None: decoded = layers.LeakyReLU()(decoded)

        #   Layer
        if architecture >= 1:
            decoded = Dense(hidden_dim, activation=activation)(decoded)
            if activation is None: decoded = layers.LeakyReLU()(decoded)         

        #   Layer
        decoded = Dense(d, activation=out_activation)(decoded)
        
        
        ####### Make model #######
        autoencoder = Model(input_img, decoded)
        encoder = Model(input_img, encoded)
        #decoder = Model(encoded, decoded)
        
        autoencoder.compile(optimizer=optimizer, loss=loss)

        return autoencoder, encoder, decoded
NAME = "Cats-vs-dogs-{}-CNN-layers-sized{}-and-Denselayer-sized-{}-{}".format(conv_layers,layer_sizes , dense_layers ,int(time.time())) #very important to choose the name correctly to use tensorboard

model = Sequential()

model.add(Conv2D(64, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))

model.add(Conv2D(128, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))

model.add(Flatten())  # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.3))

model.add(Dense(1))
model.add(Activation('sigmoid'))

# tensorboard = TensorBoard(log_dir='log',
#                           batch_size=128,
#                           write_graph=True,
#                           write_grads=True,
#                           write_images=True, 
#                           update_freq='batch') 
# tensorboard not working properly

#%% defining our extra functions to help us improve our training 
Exemple #28
0
#mY = y_train.mean()
#sY = y_train.std()
#y_train = ( y_train - mY ) / sY
#y_test = ( y_test - mY ) / sY

#%% NN
mName = 'NN'
val_loss = []
model_rec = []
history_rec = []

for i in range(10):
    model = Sequential()

    model.add(Dense(units=3, activation='relu', input_dim=X.shape[1]))
    #model.add(Dense(units=3, activation='relu', input_dim=X.shape[1]))
    #model.add(Dense(units=3, activation='relu', input_dim=X.shape[1]))
    #model.add(Dense(units=3, activation='relu', input_dim=X.shape[1]))
    model.add(Dense(units=3, activation='relu'))
    model.add(Dense(units=3, activation='relu'))
    model.add(Dense(units=3, activation='relu'))
    model.add(Dense(units=1))
    model.summary()

    model.compile(loss='mse', optimizer='adam', metrics=['mse'])
    #    model.save_weights('model.h5')

    model_rec.append(model)

    patience = 200
Exemple #29
0
              [20,30,40],[30,40,50],[40,50,60]])
y = np.array([4,5,6,7,8,9,10,11,12,13,50,60,70])
x_pred = np.array([50,60,70]) 

print("x.shape : ", x.shape) #(13, 3)
print("y.shape : ", y.shape) #(13,)

x = x.reshape(13, 3, 1)

#2. 모델구성
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,GRU

model = Sequential()
model.add(GRU(10, activation='relu', input_shape=(3,1)))
model.add(Dense(30,activation='relu'))
model.add(Dense(20))
model.add(Dense(10))
model.add(Dense(1))
model.summary()

# 3. 컴파일, 훈련
model.compile(loss='mse', optimizer='adam')
model.fit(x,y, epochs=100, batch_size=1)

# 4. 평가, 예측
loss = model.evaluate(x, y)
print('loss : ',loss)

x_pred = x_pred.reshape(1, 3 , 1) 
Exemple #30
0
session = tf.Session(config=config)

model = Sequential()

model.add(Conv2D(512, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(512, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())

# Last dense layers must (not sure) have number of labels in data in parenthesis
model.add(Dense(32))
model.add(Activation('relu'))
model.add(Dropout(0.2))

model.add(Dense(2))
model.add(Activation('softmax'))

model.compile(loss='sparse_categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# Visualizing model open cmd cd to folder where the script is saved
# and type "tensorboard --logdir=logs\"
tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))

model.fit(X,