예제 #1
0
파일: model.py 프로젝트: arjo129/uSpeech
def model(GRU_size=5):
    adam = optimizers.Adam()
    model = Sequential()
    model.add(GRU(GRU_size))
    model.add(Dense(len(interested_words), activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=adam)
    return model
예제 #2
0
def create_classifier_model(
        input_length, hidden_layer_sizes, regularization_beta):
    
    layer_sizes = hidden_layer_sizes + [1]
    num_layers = len(layer_sizes)
    
    regularizer = keras.regularizers.l2(regularization_beta)
    
    model = Sequential()
    
    for i in range(num_layers):
        
        kwargs = {
            'activation': 'sigmoid' if i == num_layers - 1 else 'relu',
            'kernel_regularizer': regularizer
        }
        
        if i == 0:
            kwargs['input_dim'] = input_length
            
        model.add(Dense(layer_sizes[i], **kwargs))
        
    model.compile(
        optimizer='adam',
        loss='binary_crossentropy',
        metrics=['accuracy'])
    
    return model
예제 #3
0
def fit_model(train_X, train_Y, window_size = 1):
    EPOCHS=10
    model = Sequential()

    model.add(LSTM(4,
                   input_shape = (1, window_size)))
    model.add(Dense(1))
    model.compile(loss = "mean_squared_error",
                  optimizer = "adam")
    model.fit(train_X,
              train_Y,
              epochs = EPOCHS,
              batch_size = 1,
              verbose = 2)

    return(model)
예제 #4
0
파일: myNN.py 프로젝트: hooperw/aima-python
    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
    [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
], dtype='float32')

# this takes a looong time to index, and
# python may crash several times before indexing is complete
import tensorflow as tf

from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation

model = Sequential()

model.add(Dense(8,
                activation=keras.activations.sigmoid,
                ))
model.add(Dense(17,
                activation=keras.activations.sigmoid,
                ))
model.compile(
    optimizer=tf.train.AdamOptimizer(0.001),
    loss=keras.losses.categorical_crossentropy,
    # loss=keras.losses.mse,
    metrics=[keras.metrics.binary_accuracy]
)

# This is the process I used to train my weights
# model.fit(votingrecords, votingtarget,epochs =2000)
# myWeights= model.get_weights()
# np.set_printoptions(suppress=True)
예제 #5
0
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,
                                                    y,
                                                    train_size=0.8,
                                                    shuffle=True)
'''
print(x_train)
print(x_train.shape)
print(x_test.shape)
print(y_train.shape)
print(y_test.shape)
'''

#2.모델 구성
model = Sequential()
model.add(Dense(5, input_dim=1))
model.add(Dense(1))
model.add(Dense(1))

#3.컴파일,훈련
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
model.fit(x_train, y_train, epochs=100, validation_split=0.2)

#4.평가,예측
loss, mae = model.evaluate(x_test, y_test)
print('loss:', loss, 'mse:', mae)

y_predict = model.predict(x_test)
print(y_predict)
'''
#4.평가,예측
    def create_model(self):
        if LOAD_MODEL is not None:
            model = load_model(LOAD_MODEL)
        else:
            model = Sequential()

            model.add(Conv2D(128, (2, 2), input_shape=env.observation_space_values))  # OBSERVATION_SPACE_VALUES = (10, 10, 3) a 10x10 RGB image.
            model.add(Activation('relu'))

            model.add(Conv2D(128, (2, 2)))
            model.add(Activation('relu'))
            
            model.add(Flatten())  # this converts our 3D feature maps to 1D feature vectors
            model.add(Dense(128))

            model.add(Dense(len(env.actions), activation='linear'))  # ACTION_SPACE_SIZE = how many choices (9)
            model.compile(loss="mse", optimizer=Adam(lr=0.001))
        return model
예제 #7
0
def init():
    # [32C3-BN]*2-[32C5S2-BN]-0.4d-[64C3-BN]*2-[64C5S2-BN]-0.4d-F-256D-10D 20E
    model = Sequential()
    model.add(Conv2D(32, 3, padding='same', activation='relu', input_shape=(28, 28, 1)))
    model.add(tf.keras.layers.BatchNormalization())
    model.add(Conv2D(32, 3, padding='same', activation='relu', input_shape=(28, 28, 1)))
    model.add(tf.keras.layers.BatchNormalization())
    model.add(Conv2D(32, 5, padding='same',strides=2, activation='relu', input_shape=(28, 28, 1)))
    model.add(tf.keras.layers.BatchNormalization())
    model.add(Dropout(0.4))

    model.add(Conv2D(64, 3, padding='same', activation='relu', input_shape=(28, 28, 1)))
    model.add(tf.keras.layers.BatchNormalization())
    model.add(Conv2D(64, 3, padding='same', activation='relu', input_shape=(28, 28, 1)))
    model.add(tf.keras.layers.BatchNormalization())
    model.add(Conv2D(64, 5, padding='same',strides=2, activation='relu', input_shape=(28, 28, 1)))
    model.add(tf.keras.layers.BatchNormalization())
    model.add(Dropout(0.4))

    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.4))
    model.add(Dense(10, activation='softmax'))

    model.load_weights("./models/model_weights.h5")
    print("Loaded Model from disk")

    optimizer = tf.keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
    model.compile(loss=tf.keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy'])

    return model
image_size = x_train.shape[1]
input_size = image_size * image_size
# we train our network using float data
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255

# network parameters
batch_size = 128
hidden_units = 256
data_augmentation = True
epochs = 20
max_batches = len(x_train) / batch_size

# this is 3-layer MLP with ReLU after each layer
model = Sequential()
model.add(Dense(hidden_units, input_dim=input_size))
model.add(Activation('relu'))
model.add(Dense(hidden_units))
model.add(Activation('relu'))
model.add(Dense(num_labels))
# this is the output for one-hot vector
model.add(Activation('softmax'))
model.summary()

# loss function for one-hot vector
# use of sgd optimizer
# accuracy is good metric for classification tasks
model.compile(loss='categorical_crossentropy',
              optimizer='sgd',
              metrics=['accuracy'])
def build_model(img_shape: Tuple[int, int, int],
                num_classes: int) -> Sequential:
    catdog_model = Sequential()

    catdog_model.add(
        Conv2D(filters=24,
               kernel_size=3,
               padding="same",
               input_shape=img_shape))
    catdog_model.add(Activation("relu"))
    catdog_model.add(Conv2D(filters=24, kernel_size=3, padding="same"))
    catdog_model.add(Activation("relu"))
    catdog_model.add(MaxPool2D())

    catdog_model.add(Conv2D(filters=48, kernel_size=3, padding="same"))
    catdog_model.add(Activation("relu"))
    catdog_model.add(Conv2D(filters=48, kernel_size=3, padding="same"))
    catdog_model.add(Activation("relu"))
    catdog_model.add(MaxPool2D())

    catdog_model.add(Conv2D(filters=96, kernel_size=3, padding="same"))
    catdog_model.add(Activation("relu"))
    catdog_model.add(Conv2D(filters=96, kernel_size=3, padding="same"))
    catdog_model.add(Activation("relu"))
    catdog_model.add(MaxPool2D())

    catdog_model.add(Conv2D(filters=128, kernel_size=3, padding="same"))
    catdog_model.add(Activation("relu"))
    catdog_model.add(Conv2D(filters=128, kernel_size=3, padding="same"))
    catdog_model.add(Activation("relu"))
    catdog_model.add(MaxPool2D())

    catdog_model.add(Flatten())
    catdog_model.add(Dense(128))
    catdog_model.add(Dense(units=num_classes))
    catdog_model.add(Activation("softmax"))

    catdog_model.summary()

    return catdog_model
예제 #10
0
파일: test.py 프로젝트: cottrell/notebooks
from tensorflow.keras.callbacks import TensorBoard

(X_train,y_train), (X_test, y_test) = mnist.load_data()

X_train = X_train.reshape(60000,28,28,1).astype('float32')
X_test = X_test.reshape(10000,28,28,1).astype('float32')

X_train /= 255
X_test /= 255

n_classes = 10
y_train = keras.utils.to_categorical(y_train, n_classes)
y_test = keras.utils.to_categorical(y_test, n_classes)

model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), activation='relu', input_shape=(28,28,1)) )
model.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

tensor_board = TensorBoard('./logs/LeNet-MNIST-1')

model.fit(X_train, y_train, batch_size=128, epochs=15, verbose=1, validation_data=(X_test,y_test), callbacks=[tensor_board])

예제 #11
0
    (image_width - wandb.config.window_width) / wandb.config.window_stride) + 1
if num_windows < output_length:
    raise ValueError(
        f'Window width/stride need to generate >= {output_length} windows (currently {num_windows})')

# Build windowing layer
image_reshaped = Reshape((image_height, image_width, 1))(image_input)
image_patches = Lambda(
    slide_window,
    arguments={'window_width': wandb.config.window_width,
               'window_stride': wandb.config.window_stride}
)(image_reshaped)

# Build simple convnet
cnn = Sequential()
cnn.add(Conv2D(32, (3, 3), activation='relu'))
cnn.add(MaxPooling2D())
cnn.add(Dropout(0.4))
cnn.add(Conv2D(64, (3, 3), activation='relu'))
cnn.add(MaxPooling2D())
cnn.add(Dropout(0.4))
cnn.add(Flatten())
cnn.add(Dense(64, activation='relu'))

# TimeDistribute convnet output
cnn_out = TimeDistributed(cnn)(image_patches)

# Feed Time distributed into a GRU
gru_out = Bidirectional(gru_fn(128, return_sequences=True))(cnn_out)
gru2_out = gru_fn(128, return_sequences=True)(gru_out)
def build_ensemble_model(shape, NUM_CLASSES):
    """
    This ensemble model is based on the Deep Fingerprinting Model's flatten and dense layers
    before classification.
    :param shape: The shape of the ensembled training data
    :return: The ensemble model
    """
    initializer = tf.keras.initializers.VarianceScaling(
        scale=1.0, mode='fan_in', distribution='untruncated_normal')
    model = Sequential()
    model.add(Input(shape=shape))
    model.add(
        Dense(1024, kernel_initializer=glorot_uniform(seed=0), name='fc1'))
    model.add(Activation('relu', name='fc1_act'))
    model.add(Dropout(0.7, name='fc1_dropout'))
    model.add(
        Dense(1024, kernel_initializer=glorot_uniform(seed=0), name='fc2'))
    model.add(Activation('relu', name='fc2_act'))
    model.add(Dropout(0.7, name='fc2_dropout'))
    model.add(
        Dense(NUM_CLASSES,
              kernel_initializer=glorot_uniform(seed=0),
              name='fc3'))
    model.add(Activation('softmax', name="softmax"))

    sgd = SGD(lr=0.003, nesterov=True, momentum=0.5)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    return model
예제 #13
0
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
                                                     directory=train_dir,
                                                     target_size=(IMG_HEIGHT, IMG_WIDTH),
                                                     class_mode='binary')

augmented_images = [train_data_gen[0][0][0] for i in range(5)]

plotImages(augmented_images)

"""In the cell below, create a model for the neural network that outputs class probabilities. It should use the Keras Sequential model. It will probably involve a stack of Conv2D and MaxPooling2D layers and then a fully connected layer on top that is activated by a ReLU activation function.

Compile the model passing the arguments to set the optimizer and loss. Also pass in `metrics=['accuracy']` to view training and validation accuracy for each training epoch.
"""

model = Sequential()
model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu'))

model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))

model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

model.summary()
예제 #14
0
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.regularizers import L2

(trainX, trainY), (testX, testY) = mnist.load_data()

import matplotlib.pyplot as plt
plt.imshow(trainX[0])

model = Sequential()
model.add(Flatten(input_shape=(28, 28)))
model.add(Dense(256, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='softmax'))

model.summary()

model.compile(optimizer='sgd',
              loss='sparse_categorical_crossentropy',
              metrics=['acc'])

trainX = trainX / 255.0
testX = testX / 255.0
model.fit(trainX, trainY, epochs=10, verbose=1, validation_data=(testX, testY))

model2 = Sequential()
model2.add(Flatten(input_shape=(28, 28)))
model2.add(Dense(256, activation="relu", kernel_regularizer=L2(0.01)))
model2.add(Dense(128, activation='relu', kernel_regularizer=L2(0.01)))
speed = 10
count=0
car_shift=10

discount_factor =0.95
learning_rate =0.5
reward_val=10
crash_count =0

train_image_size = (right_max-left_max,int(H)-int(H//4),1)

X = []
y = []

model = Sequential()
model.add(Flatten())
model.add(Dense(256,activation='relu'))
model.add(Dense(126))
model.add(Dense(3))



#comment the below line if you want to train a model
model = load_model('Trained_models/model-v1.model')
model.compile(loss='mse',optimizer='adam',metrics=['accuracy'])

#print(model.summary())

l= len(get_state())
print(window_size)
def create_keras_model():
    model = Sequential()
    model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    #model.add(keras.layers.Dropout(0.5))
    #model.add(keras.layers.Flatten())
    model.add(Dense(10, activation='softmax', name ='predictions'))
    return model
# 그래프로 확인
sns.pairplot(df, hue='species');
plt.show()

# 데이터 분류
dataset = df.values
X = dataset[:,0:4].astype(float)
Y_obj = dataset[:,4]

# 문자열을 숫자로 변환
e = LabelEncoder()
e.fit(Y_obj)
Y = e.transform(Y_obj)
Y_encoded = tf.keras.utils.to_categorical(Y)

# 모델의 설정
model = Sequential()
model.add(Dense(16,  input_dim=4, activation='relu'))
model.add(Dense(3, activation='softmax'))

# 모델 컴파일
model.compile(loss='categorical_crossentropy',
            optimizer='adam',
            metrics=['accuracy'])

# 모델 실행
model.fit(X, Y_encoded, epochs=50, batch_size=1)

# 결과 출력
print("\n Accuracy: %.4f" % (model.evaluate(X, Y_encoded)[1]))
예제 #18
0
# split into train and validation
training_df = main_df[:last_10pct]
validation_df = main_df[last_10pct:last_5pct]

# preprocess the data
train_x, train_y = preprocess_df(training_df)
validation_x, validation_y = preprocess_df(validation_df)

# some statistics
print(f"train data: {len(train_x)} validation: {len(validation_x)}")
print(f"don't buys: {train_y.count(0)}, buys: {train_y.count(1)}")
print(f"VALIDATION don't buys: {validation_y.count(0)}, buys: {validation_y.count(1)}")

model = Sequential()
model.add(LSTM(128, activation='tanh', input_shape=(train_x.shape[1:]), return_sequences=True))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(LSTM(128, activation='tanh', input_shape=(train_x.shape[1:]), return_sequences=True))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(LSTM(128, activation='tanh', input_shape=(train_x.shape[1:])))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))

model.add(Dense(2, activation="softmax"))
예제 #19
0
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.3,
                                                    random_state=101)
from sklearn.preprocessing import MinMaxScaler

scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
#model definition and compliation
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense

model = Sequential()
model.add(Dense(19, activation='relu'))
model.add(Dense(19, activation='relu'))
model.add(Dense(19, activation='relu'))
model.add(Dense(19, activation='relu'))

model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
model.fit(x=X_train,
          y=y_train,
          validation_data=(X_test, y_test),
          batch_size=128,
          epochs=400)
losses = pd.DataFrame(model.history.history)
losses.plot()
from sklearn.metrics import mean_absolute_error, mean_squared_error, explained_variance_score
# =============================================================================
X_train = np.load(os.path.join(os.getcwd(), 'data', 'LSTM_x_Train.npy'))
y_train = np.load(os.path.join(os.getcwd(), 'data', 'LSTM_y_Train.npy'))

X_test = np.load(os.path.join(os.getcwd(), 'data', 'LSTM_x_Test.npy'))
y_test = np.load(os.path.join(os.getcwd(), 'data', 'LSTM_y_Test.npy'))

#%%
# =============================================================================
# Build classification model - GRU
# =============================================================================
from tensorflow.keras.layers import GRU, Activation

model = Sequential()
model.add(
    GRU(50,
        input_shape=(X_train.shape[1], X_train.shape[2]),
        return_sequences=True))
model.add(GRU(1, return_sequences=False))
model.add(Activation('sigmoid'))
model.summary()

adam = Adam(lr=learningRate)
model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
chk = ModelCheckpoint(os.path.join(os.getcwd(), 'data', 'best_model.hdf5'),
                      monitor='val_accuracy',
                      save_best_only=True,
                      mode='max',
                      verbose=1)
history = model.fit(X_train,
                    y_train,
                    epochs=150,
예제 #21
0
파일: lb1.py 프로젝트: iagsav/autumn_2020
# функция нормализуем данные для работы
def normalize_data(data):
  mean = data.mean(axis=0)
  std = data.std(axis=0)
  data -= mean
  data /= std
  return data

  
x_norm = normalize_data(x)
x_norm

x_train, x_test, y_train, y_test = train_test_split(x_norm, y, test_size=0.3, random_state = 2)

model = Sequential()
model.add(Dense(5059, activation="relu", input_shape=(x_train.shape[1],)))
model.add(Dropout(0.3))
model.add(Dense(1264, activation="relu"))
model.add(Dense(632, activation="relu"))
model.add(Dense(158, activation="relu"))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse', metrics=['mae'])
 
model.summary()

history = model.fit(x_train, y_train, epochs=150, batch_size=500, verbose=1, validation_split=0.3)
print(history)
history = history.history
print("[DEBUG-USER] nn finish")

def graphs(history):
예제 #22
0
print(y_test.shape)  #(456,)

from sklearn.preprocessing import MinMaxScaler  #preprocessing 전처리
scaler = MinMaxScaler()
scaler.fit(x)
x = scaler.transform(x)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
print(np.max(x), np.min(x))  #395: 해당칼럼의 최대가 395

#2.모델링
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout

model = Sequential()
model.add(Dense(100, activation='relu',
                input_shape=(30, )))  #액티베이션 처음과 중간은 다른거 넣어도 마지막엔 시그모이드 함수 넣어야함
model.add(Dropout(0.2))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))  #이 레이어만 있는 경우도 상관없음, 히든이 없는 모델도 가능함
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(
    1, activation='sigmoid'))  #액티베이션이 디폴트값은 리니어,따라서 범위는 무한대~무한대, 나오는 값은
#시그모이드함수는 0과 1사이에 한정을 짓는 함수 0<n<1, 어떤 레이어도 0과 1사이에 수렴한다.
#렐루함수는 액티베이션에 대해서 가중치 연산을 제어한다, 값이 음수일 때 상쇄, 0이상의 값만 전달해줌.
#렐루함수는 0보다 같거나 크거나 =>렐루가 성능이 좋다, 평타 85%이상침(시그모이드는 그렇지 않음)
#max(0,x)
#최초에 나온 모델은 0과 1사이에 있는 시그모이드 함수
예제 #23
0
def test_rest(scan_object):

    print('\n >>> start testing the rest... \n')

    import talos

    import random

    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import Dense

    from tensorflow.keras import metrics, losses

    deploy_filename = 'test' + str(random.randint(1, 20000000000))

    print('\n ...Deploy()... \n')
    talos.Deploy(scan_object, deploy_filename, 'val_acc')

    print('\n ...Restore()... \n')
    restored = talos.Restore(deploy_filename + '.zip')

    x, y = talos.templates.datasets.breast_cancer()
    x = x[:50]
    y = y[:50]

    x_train, y_train, x_val, y_val = talos.utils.val_split(x, y, .2)
    x = talos.utils.rescale_meanzero(x)

    callbacks = [
        talos.utils.early_stopper(10),
        talos.utils.ExperimentLogCallback('test', {})
    ]

    metrics = [
        metrics.MeanAbsolutePercentageError(),
        metrics.MeanSquaredLogarithmicError(),
        metrics.RootMeanSquaredError(),
        metrics.Precision(),
        metrics.Recall()
    ]

    print('\n ...callbacks and metrics... \n')

    model1 = Sequential()
    model1.add(Dense(10, input_dim=x.shape[1]))
    model1.add(Dense(1))
    model1.compile('adam', metrics=metrics)
    model1.fit(x, y, callbacks=callbacks)

    print('\n ...generator... \n')

    model2 = Sequential()
    model2.add(Dense(10, input_dim=x.shape[1]))
    model2.add(Dense(1))
    model2.compile('adam')
    model2.fit_generator(talos.utils.generator(x, y, 10), 5)

    print('\n ...SequenceGenerator... \n')

    model3 = Sequential()
    model3.add(Dense(10, input_dim=x.shape[1]))
    model3.add(Dense(1))
    model3.compile('adam', 'logcosh')
    model3.fit_generator(talos.utils.SequenceGenerator(x, y, 10))

    # print('\n ...gpu_utils... \n')

    # talos.utils.gpu_utils.force_cpu()
    # talos.utils.gpu_utils.parallel_gpu_jobs()

    # print('\n ...gpu_utils... \n')

    from talos.utils.test_utils import create_param_space
    create_param_space(restored.results, 5)

    print('finished testing the rest \n')
예제 #24
0
def model_fit(x_train, y_train, x_test, y_test, x_valid, numclasses,
              input_shape, saved_model_path):
    '''
    load data, compile and train CNN model, apply data shape trasformation for ANN inputs
    Parameters
    Input: 
        x_train, y_train - train data: qrs segments and labels
        y_test, y_test - test data: qrs segments and labels
        x_valid - validation data
        numclasses - the number of classes (labels)
        input_shape - the unput shape of the chosen ANN
    Output: 
        model - sequential model
        history - training history parameters
        x_valid - reshaped validation data
    '''

    x_train, x_test, x_valid = map(lambda x: get_transformed_input(x),
                                   [x_train, x_test, x_valid])

    epochs = 100

    model = Sequential()

    # Convolutional layers
    model.add(
        Convolution1D(100,
                      4,
                      1,
                      activation='tanh',
                      input_shape=input_shape,
                      kernel_regularizer=regularizers.l2(0.001)))
    model.add(MaxPooling1D(pool_size=2))
    model.add(
        Convolution1D(200,
                      2,
                      1,
                      activation='tanh',
                      kernel_regularizer=regularizers.l2(0.001)))
    model.add(MaxPooling1D(pool_size=4))
    model.add(
        Convolution1D(300,
                      1,
                      1,
                      activation='tanh',
                      kernel_regularizer=regularizers.l2(0.001)))
    model.add(MaxPooling1D(pool_size=2))
    model.add(
        Convolution1D(400,
                      1,
                      1,
                      activation='tanh',
                      kernel_regularizer=regularizers.l2(0.001)))

    model.add(Flatten())
    model.add(Dropout(0.9))
    model.add(Dense(3000, activation='tanh'))
    model.add(Dense(numclasses, activation='softmax'))

    model.summary()

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    reduce_lr = tensorflow.keras.callbacks.ReduceLROnPlateau(monitor='loss',
                                                             factor=0.5,
                                                             patience=50,
                                                             min_lr=0.0001)
    callbacks = [
        ModelCheckpoint(filepath=saved_model_path,
                        monitor='categorical_crossentropy'), reduce_lr
    ]

    history = model.fit(x_train,
                        y_train,
                        validation_data=(x_test, y_test),
                        epochs=epochs,
                        verbose=1,
                        callbacks=callbacks)

    return model, history, x_valid
예제 #25
0
    axes.imshow(image, cmap=plt.cm.gray_r)
    axes.set_xticks([])
    axes.set_yticks([])
    axes.set_tile(target)
plt.tight_layout()
#--------------------------------------------------------------------------------------------------#

#--------------------------------------------------------------------------------------------------#
# Creating the Neural Networks and adding Layers to the Networks.
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
NN = Sequential()
#--------------Then adding a convolution layer--------------------------------#
NN.add(
    Conv2D(filters=64,
           kernel_size=(3, 3),
           activation='relu',
           input_shape=(28, 28, 1)))
#--------------Then adding a pooling layer------------------------------------#
NN.add(MaxPooling2D(pool_size=(2, 2)))
#--------------Then adding flattening the results-----------------------------#
NN.add(Flatten())
#-------Then adding a dense layer to reduce the number of features-------#
NN.add(Dense(units=128, activation='relu'))
#-------Then adding a dense layer to produce the final output-----------#
NN.add(Dense(units=10, activation='softmax'))
#-------------------Printing the Model's Summary---------------------------#
print(NN.summary())  # show all data conclusion
#output: Model: "sequential_9"
# _________________________________________________________________
# Layer (type)                 Output Shape              Param #
예제 #26
0
# pos_feature_mat_tau_lst = []
#
# pos_feature_mat_tau = np.vstack([np.hstack((np.zeros((model_def.list_size + 1, k1)),
#             np.hstack((np.ones(k1), np.zeros(model_def.list_size - k1 + 1))).reshape(-1, 1),
#             np.zeros((model_def.list_size + 1, model_def.list_size - k1)))).flatten()
#  for k1 in range(1, model_def.list_size+1)])

pos_feature_gamma = np.eye(model_def.list_size)

var_dic = {'phi_A': item_feature_mat_A, 'gamma': pos_feature_gamma}

# Model:
model_phi_A = Sequential()
model_phi_A.add(
    Dense(1,
          input_dim=var_dic['phi_A'].shape[1],
          activation='sigmoid',
          use_bias=False))
model_phi_A.compile(loss=GCM.pos_log_loss, optimizer=RMSprop())

# Note the large output dimension and the softmax. We want multiple transition probabilities that sum up to 1
# Its the shape**2, as we flatten the square matrix.
model_gamma = Sequential()
model_gamma.add(SimpleDense(list_size + 1))
model_gamma.compile(loss=GCM.pos_log_loss, optimizer=RMSprop())

# model_tau = Sequential()
# model_tau.add(Dense(var_dic['tau'].shape[1], input_dim=var_dic['tau'].shape[1], activation=None, use_bias=False,
#                     kernel_initializer=Identity(), trainable=False))
# model_tau.compile('rmsprop', 'binary_crossentropy')  # No trainable weights, so doesn't really matter
예제 #27
0
    def _build_generator(self):
        model = Sequential()

        model.add(
            Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
        model.add(Activation("tanh"))

        noise = Input(shape=(self.latent_dim, ))
        img = model(noise)

        return Model(noise, img)
예제 #28
0
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense

import numpy as np
import tensorflow as tf

np.random.seed(3)
tf.random.set_seed(3)

Data_set = np.loadtxt("C:/Users/HJ/Downloads/lung_cancer.csv", delimiter=",")

X = Data_set[:, 0:4]
Y = Data_set[:, 4]

model = Sequential()
model.add(Dense(30, input_dim=4, activation="relu"))
model.add(Dense(1, activation="sigmoid"))

model.compile(loss="mean_squared_error",
              optimizer="adam",
              metrics=["accuracy"])
model.fit(X, Y, epochs=100, batch_size=10)
예제 #29
0
    def _build_discriminator(self):
        model = Sequential()

        model.add(
            Conv2D(32,
                   kernel_size=3,
                   strides=2,
                   input_shape=self.img_shape,
                   padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1, activation='sigmoid'))

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity)
예제 #30
0
    def model_architecture(
            self,
            input_shape,  # type: Tuple[int, int]
            output_shape  # type: Tuple[int, Optional[int]]
    ):
        # type: (...) -> tf.keras.models.Sequential
        """Build a keras model and return a compiled model."""

        from tensorflow.keras.models import Sequential
        from tensorflow.keras.layers import \
            Masking, LSTM, Dense, TimeDistributed, Activation

        # Build Model
        model = Sequential()

        # the shape of the y vector of the labels,
        # determines which output from rnn will be used
        # to calculate the loss
        if len(output_shape) == 1:
            # y is (num examples, num features) so
            # only the last output from the rnn is used to
            # calculate the loss
            model.add(Masking(mask_value=-1, input_shape=input_shape))
            model.add(LSTM(self.rnn_size, dropout=0.2))
            model.add(Dense(input_dim=self.rnn_size, units=output_shape[-1]))
        elif len(output_shape) == 2:
            # y is (num examples, max_dialogue_len, num features) so
            # all the outputs from the rnn are used to
            # calculate the loss, therefore a sequence is returned and
            # time distributed layer is used

            # the first value in input_shape is max dialogue_len,
            # it is set to None, to allow dynamic_rnn creation
            # during prediction
            model.add(Masking(mask_value=-1,
                              input_shape=(None, input_shape[1])))
            model.add(LSTM(self.rnn_size, return_sequences=True, dropout=0.2))
            model.add(TimeDistributed(Dense(units=output_shape[-1])))
        else:
            raise ValueError("Cannot construct the model because"
                             "length of output_shape = {} "
                             "should be 1 or 2."
                             "".format(len(output_shape)))

        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

        logger.debug(model.summary())

        return model
예제 #31
0
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, GRU
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score

x = np.array([[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6]])  #(4,3)
y = np.array([4, 5, 6, 7])  #(4,)

print("x.shape: ", x.shape)

x = x.reshape(4, 3, 1)
print("x.shape: ", x.shape)

#2. 모델 구성

model = Sequential()
model.add(GRU(16, activation='relu', input_shape=(3, 1)))
model.add(Dense(16))
model.add(Dense(1))

model.summary()

model.compile(loss='mse', metrics=['mse'], optimizer='adam')
model.fit(x, y, batch_size=1, verbose=1, epochs=100)

x_input = np.array([5, 6, 7])
x_input = x_input.reshape(1, 3, 1)

y_predict = model.predict(x_input)
print(y_predict)
예제 #32
0

#Build LSTM model on training data
#The model architecture used here is slightly more complex. Its elements are:

#LSTM input layer with 50 units.
#Dropout layer to prevent overfitting (see: http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf).
#A second LSTM layer with 256 units.
#A further Dropout layer.
#A Dense layer to produce a single output.
#Use MSE as loss function.


model2 = Sequential()
model2.add(LSTM(input_shape = (window_size, 1),
               units = window_size,
               return_sequences = True))
model2.add(Dropout(0.5))
model2.add(LSTM(256))
model2.add(Dropout(0.5))
model2.add(Dense(1))
model2.add(Activation("linear"))
model2.compile(loss = "mse",
              optimizer = "adam")
print(model2.summary())

# Fit the model.
model2.fit(train_X,
          train_Y,
          batch_size = 512,
          epochs = 3,
예제 #33
0
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# Convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
                 activation='relu',
                 input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

# BytePS: adjust learning rate based on number of GPUs.
opt = keras.optimizers.Adadelta(lr=1.0 * bps.size())

# BytePS: add BytePS Distributed Optimizer.
opt = bps.DistributedOptimizer(opt)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D

import pickle

pickle_in = open("X.pickle","rb")
X = pickle.load(pickle_in)

pickle_in = open("y.pickle","rb")
y = pickle.load(pickle_in)

X = X/255.0

model = Sequential()
model.add(Conv2D(256, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())  # this converts our 3D feature maps to 1D feature vectors

model.add(Dense(64))

model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',
예제 #35
0
y_test = tf.keras.utils.to_categorical(y_test, num_classes=tag_size)

print("학습 샘플 시퀀스 형상 : ", x_train.shape)
print("학습 샘플 레이블 형상 : ", y_train.shape)
print("테스트 샘플 시퀀스 형상 : ", x_test.shape)
print("테스트 샘플 레이블 형상 : ", y_test.shape)

# 모델 정의 (Bi-LSTM)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout, Bidirectional
from tensorflow.keras.optimizers import Adam

model = Sequential()
model.add(
    Embedding(input_dim=vocab_size,
              output_dim=30,
              input_length=max_len,
              mask_zero=True))
model.add(
    Bidirectional(
        LSTM(200, return_sequences=True, dropout=0.50,
             recurrent_dropout=0.25)))
model.add(TimeDistributed(Dense(tag_size, activation='softmax')))
model.compile(loss='categorical_crossentropy',
              optimizer=Adam(0.01),
              metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=128, epochs=10)

print("평가 결과 : ", model.evaluate(x_test, y_test)[1])
model.save('ner_model.h5')
#https://www.youtube.com/watch?v=V23DmbdzMvg
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import to_categorical

from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn import preprocessing

iris = load_iris()
X = preprocessing.scale(iris['data'])
Y = to_categorical(iris['target'])

#print(X)
#print(Y)

#training data and test data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)

#model
model = Sequential()
model.add(Dense(10, input_dim=4, activation='relu'))
model.add(Dense(10,  activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

#fitting the model
model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=200, batch_size=10)
예제 #37
0
    def build_discriminator(width, height, depth, alpha=0.2):
        # initialize the model along with the input shape to be
        # "channels last"
        model = Sequential()
        inputShape = (height, width, depth)

        # first set of CONV => RELU layers
        model.add(Conv2D(32, (5, 5), padding="same", strides=(2, 2),
                         input_shape=inputShape))
        model.add(LeakyReLU(alpha=alpha))

        # second set of CONV => RELU layers
        model.add(Conv2D(64, (5, 5), padding="same", strides=(2, 2)))
        model.add(LeakyReLU(alpha=alpha))

        # first (and only) set of FC => RELU layers
        model.add(Flatten())
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=alpha))

        # sigmoid layer outputting a single value
        model.add(Dense(1))
        model.add(Activation("sigmoid"))

        # return the discriminator model
        return model
예제 #38
0
    def build_generator(dim, depth, channels=1, inputDim=100,
        outputDim=512):
        # initialize the model along with the input shape to be
        # "channels last" and the channels dimension itself
        model = Sequential()
        inputShape = (dim, dim, depth)
        chanDim = -1

        # first set of FC => RELU => BN layers
        model.add(Dense(input_dim=inputDim, units=outputDim))
        model.add(Activation("relu"))
        model.add(BatchNormalization())

        # second set of FC => RELU => BN layers, this time preparing
        # the number of FC nodes to be reshaped into a volume
        model.add(Dense(dim * dim * depth))
        model.add(Activation("relu"))
        model.add(BatchNormalization())

        # reshape the output of the previous layer set, upsample +
        # apply a transposed convolution, RELU, and BN
        model.add(Reshape(inputShape))
        model.add(Conv2DTranspose(32, (5, 5), strides=(2, 2),
                                  padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chanDim))

        # apply another upsample and transposed convolution, but
        # this time output the TANH activation
        model.add(Conv2DTranspose(channels, (5, 5), strides=(2, 2),
                                  padding="same"))
        model.add(Activation("tanh"))

        # return the generator model
        return model
예제 #39
0
import numpy as np
import pickle
from scipy.stats import zscore
import datetime
import pytz

np.random.seed(seed=11)

with open('series_85177_500_stride50.pkl', 'rb') as f:
    segments = pickle.load(f)

segments = zscore(segments).astype(np.float32)  # standardize

deep_model = Sequential(name="LSTM-autoencoder")
deep_model.add(CuDNNGRU(20, input_shape=(500, 1), return_sequences=False))
#deep_model.add(CuDNNGRU(100, return_sequences=False))
deep_model.add(Dense(20, activation=None))
deep_model.add(RepeatVector(500))
#deep_model.add(CuDNNGRU(100, return_sequences=True))
deep_model.add(CuDNNGRU(20, return_sequences=True))
deep_model.add(TimeDistributed(Dense(1)))
deep_model.compile(optimizer=Adam(lr=5e-3, clipnorm=1.0), loss='mse')

#deep_model.load_weights("model_weights/lstm_autoencoder_2020-01-09_18-20-21.h5")

training_time_stamp = datetime.datetime.now(
    tz=pytz.timezone('Europe/London')).strftime("%Y-%m-%d_%H-%M-%S")

CB = EarlyStopping(monitor='val_loss',
                   min_delta=1e-4,
예제 #40
0
파일: myNN.py 프로젝트: hooperw/aima-python
    [0, 0, 1],
    [0, 0, 1],
])


# this takes a looong time to index, and
# python may crash several times before indexing is complete
import tensorflow as tf

from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation

model = Sequential()
model.add(Dense(8,
                activation=keras.activations.sigmoid,
                ))
model.add(Dense(3,
                activation=keras.activations.sigmoid,
                ))

model.compile(
              optimizer=tf.train.AdamOptimizer(0.001),
              # loss=keras.losses.categorical_crossentropy,
              loss=keras.losses.mse,
              metrics=[keras.metrics.binary_accuracy]
              )

# This is the process I used to train my weights
# model.fit(bin7, count3, epochs=2000)
# myWeights = model.get_weights()
def create_topology_mnist_conv_28_28_1():
    seed(1)
    tf.random.set_seed(1)
    model = Sequential()

    model.add(
        Conv2D(filters=32,
               kernel_size=(5, 5),
               padding='Same',
               activation='relu',
               input_shape=(28, 28, 1)))
    model.add(
        Conv2D(filters=32,
               kernel_size=(5, 5),
               padding='Same',
               activation='relu'))
    model.add(MaxPool2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(
        Conv2D(filters=64,
               kernel_size=(3, 3),
               padding='Same',
               activation='relu'))
    model.add(
        Conv2D(filters=64,
               kernel_size=(3, 3),
               padding='Same',
               activation='relu'))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(256, activation="relu"))
    model.add(Dropout(0.5))
    model.add(Dense(10, activation="softmax"))
    model.summary()
    return model
예제 #42
0
파일: myNN.py 프로젝트: hooperw/aima-python

# this takes a looong time to index, and
# python may crash several times before indexing is complete
import tensorflow as tf

from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation


model = Sequential()


model.add(Dense(8,
                activation=keras.activations.sigmoid,
                ))
model.add(Dense(3,
                activation=keras.activations.sigmoid,
                ))

model.compile(
              optimizer=tf.train.AdamOptimizer(0.001),
              # loss=keras.losses.categorical_crossentropy,
              loss=keras.losses.mse,
              metrics=[keras.metrics.binary_accuracy]
              )

# This is the process I used to train my weights
model.fit(x_train, y_train, epochs=2000)
myWeights = model.get_weights()
예제 #43
0
dense_layers = [0]
layer_sizes = [128]
conv_layers = [3]

for dense_layer in dense_layers:
    for layer_size in layer_sizes:
        for conv_layer in conv_layers:

            NAME = "bz16-adam-c30-{}-conv-{}-nodes-{}-dense-{}".format(
                conv_layer, layer_size, dense_layer, int(time.time()))
            print(NAME)

            model = Sequential()

            model.add(Conv2D(layer_size, (3, 3), input_shape=X.shape[1:]))
            model.add(Activation('relu'))
            model.add(MaxPooling2D(pool_size=(2, 2)))

            for l in range(conv_layer - 1):
                model.add(Conv2D(layer_size, (3, 3)))
                model.add(Activation('relu'))
                model.add(MaxPooling2D(pool_size=(2, 2)))

            model.add(Flatten())

        for _ in range(dense_layer):
            model.add(Dense(layer_size))
            model.add(Activation('relu'))

        model.add(Dense(1))