Example #1
0
def create_classifier_model(
        input_length, hidden_layer_sizes, regularization_beta):
    
    layer_sizes = hidden_layer_sizes + [1]
    num_layers = len(layer_sizes)
    
    regularizer = keras.regularizers.l2(regularization_beta)
    
    model = Sequential()
    
    for i in range(num_layers):
        
        kwargs = {
            'activation': 'sigmoid' if i == num_layers - 1 else 'relu',
            'kernel_regularizer': regularizer
        }
        
        if i == 0:
            kwargs['input_dim'] = input_length
            
        model.add(Dense(layer_sizes[i], **kwargs))
        
    model.compile(
        optimizer='adam',
        loss='binary_crossentropy',
        metrics=['accuracy'])
    
    return model
Example #2
0
def model(GRU_size=5):
    adam = optimizers.Adam()
    model = Sequential()
    model.add(GRU(GRU_size))
    model.add(Dense(len(interested_words), activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=adam)
    return model
Example #3
0
def fit_model(train_X, train_Y, window_size = 1):
    EPOCHS=10
    model = Sequential()

    model.add(LSTM(4,
                   input_shape = (1, window_size)))
    model.add(Dense(1))
    model.compile(loss = "mean_squared_error",
                  optimizer = "adam")
    model.fit(train_X,
              train_Y,
              epochs = EPOCHS,
              batch_size = 1,
              verbose = 2)

    return(model)
Example #4
0
    def model_architecture(
            self,
            input_shape,  # type: Tuple[int, int]
            output_shape  # type: Tuple[int, Optional[int]]
    ):
        # type: (...) -> tf.keras.models.Sequential
        """Build a keras model and return a compiled model."""

        from tensorflow.keras.models import Sequential
        from tensorflow.keras.layers import \
            Masking, LSTM, Dense, TimeDistributed, Activation

        # Build Model
        model = Sequential()

        # the shape of the y vector of the labels,
        # determines which output from rnn will be used
        # to calculate the loss
        if len(output_shape) == 1:
            # y is (num examples, num features) so
            # only the last output from the rnn is used to
            # calculate the loss
            model.add(Masking(mask_value=-1, input_shape=input_shape))
            model.add(LSTM(self.rnn_size, dropout=0.2))
            model.add(Dense(input_dim=self.rnn_size, units=output_shape[-1]))
        elif len(output_shape) == 2:
            # y is (num examples, max_dialogue_len, num features) so
            # all the outputs from the rnn are used to
            # calculate the loss, therefore a sequence is returned and
            # time distributed layer is used

            # the first value in input_shape is max dialogue_len,
            # it is set to None, to allow dynamic_rnn creation
            # during prediction
            model.add(Masking(mask_value=-1,
                              input_shape=(None, input_shape[1])))
            model.add(LSTM(self.rnn_size, return_sequences=True, dropout=0.2))
            model.add(TimeDistributed(Dense(units=output_shape[-1])))
        else:
            raise ValueError("Cannot construct the model because"
                             "length of output_shape = {} "
                             "should be 1 or 2."
                             "".format(len(output_shape)))

        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

        logger.debug(model.summary())

        return model
    def build(numChannels, imgRows, imgCols, numClasses,activation="relu"):
    # initialize the model
        model = Sequential()
        inputShape = (imgRows, imgCols, numChannels)
        # if we are using "channels first", update the input shape
        if K.image_data_format() == "channels_first":
            inputShape = (numChannels, imgRows, imgCols)
        #  first set of CONV => ACTIVATION => POOL layers
        model.add(Conv2D(20, 5, padding="same",input_shape=inputShape))
        model.add(Activation(activation))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        #  second set of CONV => ACTIVATION => POOL layers
        model.add(Conv2D(50, 5, padding="same"))
        model.add(Activation(activation))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        
        # FC Layers
        model.add(Flatten())
        model.add(Dense(500))
        model.add(Activation(activation))
        model.add(Dense(numClasses))
        model.add(Activation("softmax"))
        
        return model
Example #6
0
### AI의 겨울
from sklearn.svm import LinearSVC, SVC
import numpy as np
from sklearn.metrics import accuracy_score  #accuracy_score만 따로 메트릭스로 빼줌
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense

#1. 데이터
x_data = [[0, 0], [1, 0], [0, 1], [1, 1]]
y_data = [0, 1, 1, 0]  # or 니까 1로 바뀐다. 같거나 같지않아도 되므로

#2. 모델
# model = LinearSVC()
# model = SVC()
model = Sequential()
model.add(Dense(1, input_dim=2, activation='sigmoid'))  #y가 0,1 이진법이므로 sigmoid
#인풋과 아웃풋만 쓰기(히든 레이어)

#3. 훈련
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
model.fit(x_data, y_data, batch_size=1, epochs=100)

#4. 평가예측
y_pred = model.predict(x_data)
print(x_data, '의 예측결과:', y_pred)

result = model.score(x_data, y_data)  #accuracy model.score : 1.0 ---> 100%일치
print('model.score :', result[1])

acc = accuracy_score(y_data, y_pred)  #accuracy_score : 1.0  =model.score 동일하다
def CNN3D_Model(config):
    lamda = config.Regularization_term
    filt1 = (config.Filter_shape_dim1[0], config.Filter_shape_dim2[0],
             config.Filter_shape_dim3[0])
    filt2 = (config.Filter_shape_dim1[1], config.Filter_shape_dim2[1],
             config.Filter_shape_dim3[1])
    size1 = config.hidden_size[0]
    size2 = config.hidden_size[1]
    pool_shape1 = (config.Pool_shape_dim1[0], config.Pool_shape_dim2[0],
                   config.Pool_shape_dim3[0])
    pool_shape2 = (config.Pool_shape_dim1[1], config.Pool_shape_dim2[1],
                   config.Pool_shape_dim3[1])
    input_shape = config.model_input_dim
    dense_size1 = config.Dense_size[0]
    dense_size2 = config.Dense_size[1]
    p_dropout = config.dropout

    model = Sequential()
    model.add(
        Conv3D(size1,
               kernel_size=filt1,
               activation='relu',
               bias_regularizer='l2',
               input_shape=input_shape))
    model.add(MaxPooling3D(pool_size=pool_shape1))
    model.add(
        Conv3D(size2,
               kernel_size=filt2,
               activation='relu',
               bias_regularizer='l2'))
    model.add(MaxPooling3D(pool_size=pool_shape2))
    model.add(Flatten())
    model.add(Dense(dense_size1, kernel_regularizer='l2', activation='relu'))
    model.add(Dense(dense_size2, kernel_regularizer='l2', activation='relu'))
    model.add(Dense(1, activation='sigmoid'))
    return model
def create_keras_model():
    model = Sequential()
    model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    #model.add(keras.layers.Dropout(0.5))
    #model.add(keras.layers.Flatten())
    model.add(Dense(10, activation='softmax', name ='predictions'))
    return model
Example #9
0
def build_model():
    model = Sequential()
    model.add(
        Conv2D(filters=32,
               kernel_size=(3, 3),
               input_shape=(128, 128, 3),
               activation='relu',
               padding='same'))
    model.add(
        Conv2D(filters=32,
               kernel_size=(3, 3),
               activation='relu',
               padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))
    model.add(
        Conv2D(filters=64,
               kernel_size=(3, 3),
               activation='relu',
               padding='same'))
    model.add(
        Conv2D(filters=64,
               kernel_size=(3, 3),
               activation='relu',
               padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))
    model.add(
        Conv2D(filters=128,
               kernel_size=(3, 3),
               activation='relu',
               padding='same'))
    model.add(
        Conv2D(filters=128,
               kernel_size=(3, 3),
               activation='relu',
               padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Flatten())

    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(32, activation='relu'))
    model.add(Dropout(0.25))
    model.add(Dense(3, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['acc'])

    return model
Example #10
0
    def model(self):
        '''建立完整模型'''
        optimizer = Adam(0.0002, 0.5)
        #生成器generator
        inputGen=Input(shape=(self.mydata.capsize,))
        g=Sequential()
        g.add(Dense(256,activation='sigmoid'))     
        g.add(Dense(512,activation='sigmoid'))
        g.add(Dense(1024,activation='sigmoid'))
        g.add(Dense(self.mydata.imgsize,activation='tanh'))
        g1=g(inputGen)
        self.Generator=Model(inputs=inputGen,outputs=g1)
        #self.Generator.compile(loss='binary_crossentropy',optimizer='adam')

        #判别器discriminator
        inputDis=Input(shape=(self.mydata.imgsize,))
        d=Sequential()
        d.add(Dense(512,activation='sigmoid')) 
        d.add(Dense(256,activation='sigmoid'))             
        d.add(Dense(1,activation='sigmoid'))
        d1=d(inputDis)
        self.Discriminator=Model(inputs=inputDis,outputs=d1)
        self.Discriminator.trainable=True
        self.Discriminator.compile(loss='binary_crossentropy',optimizer=optimizer,metrics=['accuracy'])

        #总体
        Generateimg=self.Generator(inputGen)
        self.Discriminator.trainable=False
        validity = self.Discriminator(Generateimg)
        self.combined = Model(inputGen, validity)
        self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
Example #11
0
import keras.models

verbose, epochs, batch_size = 0, 100, 32
n_timesteps, n_features, n_outputs = X_train.shape[1], X_train.shape[2], 5

import tensorflow as tf
"""
tf.compat.v1.disable_eager_execution()
tf.keras.backend.clear_session()


graph = tf.compat.v1.get_default_graph()
global graph
"""

regressor = Sequential()
regressor.add(
    Conv1D(filters=32,
           kernel_size=5,
           activation='relu',
           input_shape=(n_timesteps, n_features)))
regressor.add(Dropout(0.1))

regressor.add(Conv1D(filters=64, kernel_size=5, activation='relu'))
regressor.add(Dropout(0.2))

regressor.add(tf.compat.v1.layers.MaxPooling1D(pool_size=2))

regressor.add(Flatten())

regressor.add(Dense(100, activation='relu'))
#1_2. 데이터 전처리
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size = 0.8, shuffle = True, random_state = 66 )
x_test, x_val, y_test, y_val = train_test_split(x_test, y_test, train_size= 0.8, shuffle = True, random_state = 66, )

from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)

#2. 모델링
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Input, Dropout
model = Sequential()
model.add(Dense(100, input_dim=10, activation = 'relu')) # 기본값 : activation='linear' 
model.add(Dropout(0.1))
model.add(Dense(75, activation = 'relu'))
model.add(Dropout(0.1))
model.add(Dense(50, activation = 'relu'))
model.add(Dropout(0.1))
model.add(Dense(50, activation = 'relu'))
model.add(Dropout(0.1))
model.add(Dense(50, activation = 'relu'))
model.add(Dropout(0.1))
model.add(Dense(50, activation = 'relu'))
model.add(Dense(1))

#3. 컴파일, 훈련
model.compile(loss = 'mse', optimizer = 'adam', metrics = ['mae'])
def train_data(features, labels):

    features = features / 255.0
    y_labels = np_utils.to_categorical(labels)

    dense_layers = [0, 1, 2, 3, 4, 5]
    sizes_layers = [32, 64, 128, 256]
    conv_layers = [1, 2, 3, 4]

    for dense in dense_layers:
        for size in sizes_layers:
            for conv in conv_layers:

                name_model = 'Training_Model_{}_Dense_{}_Size_{}_Conv_{}'.format(dense,size,conv,int(time.time()))

                tensorboard = TensorBoard(log_dir='logs\\{}'.format(name_model))

                model = Sequential()

                model.add(Conv2D(size, (3, 3), input_shape=features.shape[1:]))
                model.add(Activation("relu"))
                model.add(MaxPool2D(pool_size=(2, 2)))

                for layer in range(conv-1):
                    model.add(Conv2D(size, (3, 3)))
                    model.add(Activation("relu"))
                    model.add(MaxPool2D(pool_size=(2, 2)))

                model.add(Flatten())

                for layer in range(dense):
                    model.add(Dense(size))
                    model.add(Activation('relu'))

                model.add(Dense(7))
                model.add(Activation('softmax'))

                model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

                print(name_model)
                print(model.summary())
                model.fit(features, y_labels, batch_size=32, epochs=25, validation_split=0.1, callbacks=[tensorboard])

    '''model = Sequential()
Example #14
0
# from sklearn.preprocessing import OneHotEncoder
# ohe = OneHotEncoder()
# y_test = y_test.reshape(-1,1)
# ohe.fit(y_test)
# y_test = ohe.transform(y_test).toarray()

# y_train = y_train.reshape(-1,1)
# ohe.fit(y_train)
# y_train = ohe.transform(y_train).toarray()

# 2
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout

model = Sequential()
model.add(
    Conv2D(300, (2, 2), padding='same', strides=2, input_shape=(28, 28, 1)))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(200, (4, 4), padding='same', strides=2))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(100, (2, 2)))
model.add(Flatten())
model.add(Dense(128))
model.add(Dropout(0.2))
model.add(Dense(128))
model.add(Dropout(0.2))
model.add(Dense(64))
model.add(Dense(10, activation='softmax'))
model.summary()
def train_surv_model(input_x, input_y, l2, verbose=0, random_seed=0):
    seed(random_seed)
    tensorflow.random.set_seed(random_seed)
    model = Sequential()
    model.add(
        Dense(1,
              input_dim=input_x.shape[1],
              bias_initializer='zeros',
              kernel_regularizer=regularizers.l2(l2)))
    model.add(Dense(4))
    model.add(Activation('relu'))
    model.add(Dense(n_intervals))
    model.add(Activation('sigmoid'))
    model.compile(loss=surv_likelihood(n_intervals),
                  optimizer=optimizers.RMSprop())
    early_stopping = EarlyStopping(monitor='loss', patience=5)
    history = model.fit(input_x,
                        input_y,
                        batch_size=256,
                        epochs=100000,
                        callbacks=[early_stopping],
                        verbose=verbose)
    return model
Example #16
0
print(train_X.shape)
print(test_X.shape)


#Build LSTM model on training data
#The model architecture used here is slightly more complex. Its elements are:

#LSTM input layer with 50 units.
#Dropout layer to prevent overfitting (see: http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf).
#A second LSTM layer with 256 units.
#A further Dropout layer.
#A Dense layer to produce a single output.
#Use MSE as loss function.


model2 = Sequential()
model2.add(LSTM(input_shape = (window_size, 1),
               units = window_size,
               return_sequences = True))
model2.add(Dropout(0.5))
model2.add(LSTM(256))
model2.add(Dropout(0.5))
model2.add(Dense(1))
model2.add(Activation("linear"))
model2.compile(loss = "mse",
              optimizer = "adam")
print(model2.summary())

# Fit the model.
model2.fit(train_X,
          train_Y,
# Import testing dataset
test_dataset = pd.read_csv('iris_test.csv', names=COLUMN_NAMES, header=0)
test_x = test_dataset.iloc[:, 0:4].values
test_y = test_dataset.iloc[:, 4].values

# Encoding training dataset
encoding_train_y = to_categorical(train_y)

# Encoding testing dataset
encoding_test_y = to_categorical(test_y)

#print(encoding_train_y)

# Creating a model
model = Sequential()
model.add(Dense(10, input_dim=4, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10,  activation='relu'))
model.add(Dense(3, activation='softmax'))

# Compiling model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

# Training a model
model.fit(train_x, encoding_train_y, epochs=200, batch_size=10)

# Evaluate the model
scores = model.evaluate(test_x, encoding_test_y)
print("\nAccuracy: %.2f%%" % (scores[1]*100))
# x = scaler.transform(x)

x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8)

# model = Sequential() #r2 = 0.6
# model.add(Dense(30, activation='relu', input_shape=(10,)))
# model.add(Dense(80, activation='relu'))
# model.add(Dropout(0.3))
# model.add(Dense(300, activation='relu'))
# model.add(Dropout(0.3))
# model.add(Dense(80, activation='relu'))
# model.add(Dropout(0.3))
# model.add(Dense(30, activation='relu'))
# model.add(Dense(1))

model = Sequential()  #r2 =
model.add(Dense(10, activation='relu', input_shape=(10, )))
model.add(Dense(300, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(200, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(30, activation='relu'))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam', metrics=['mae'])
ealystopping = EarlyStopping(monitor='loss', patience=20, mode='min')

modelPath = './save/diabetes/keras49_cp_5_diabetes_dnn{epoch:02d}--{val_loss:.4f}.hdf5'
checkPoint = ModelCheckpoint(filepath=modelPath,
                             monitor='val_loss',
                             save_best_only=True,
        x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    print('x_train shape:', x_train.shape)
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    # Convert class vectors to binary class matrices
    y_train = tf.keras.utils.to_categorical(y_train, num_classes)
    y_test = tf.keras.utils.to_categorical(y_test, num_classes)

    model = Sequential()
    model.add(Conv2D(32, kernel_size=(3, 3),
                     activation='relu',
                     input_shape=input_shape))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='softmax'))

    # Horovod: adjust learning rate based on number of GPUs.
    opt = tf.keras.optimizers.Adadelta(1.0 * hvd.size())

    # Horovod: add Horovod Distributed Optimizer.
Example #20
0
def vgg16(input_shape, num_classes, weights_path=None, pooling='avg'):
    # 构造VGG16模型
    model = Sequential()

    # Block 1
    model.add(
        Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv1',
               input_shape=input_shape))
    model.add(
        Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv2'))
    # model.add(BatchNormalization(name='bn_1'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))

    # Block 2
    model.add(
        Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv1'))
    model.add(
        Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv2'))
    # model.add(BatchNormalization(name='bn_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))

    # Block 3
    model.add(
        Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv1'))
    model.add(
        Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv2'))
    model.add(
        Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv3'))
    # model.add(BatchNormalization(name='bn_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool'))

    # Block 4
    model.add(
        Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv1'))
    model.add(
        Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv2'))
    model.add(
        Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv3'))
    # model.add(BatchNormalization(name='bn_4'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool'))

    # Block 5
    model.add(
        Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv1'))
    model.add(
        Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv2'))
    model.add(
        Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv3'))
    # model.add(BatchNormalization(name='bn_5'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool'))

    if weights_path:
        model.load_weights(weights_path)

    out = model.get_layer('block5_pool').output

    if pooling is None:
        out = Flatten(name='flatten')(out)
        out = Dense(512,
                    activation='relu',
                    kernel_initializer='he_normal',
                    name='fc')(out)
        out = Dropout(0.5)(out)
        # out = Dense(512, activation='relu', kernel_initializer='he_normal', name='fc2')(out)
        # out = Dropout(0.5)(out)
    elif pooling == 'avg':
        out = GlobalAveragePooling2D(name='global_avg_pool')(out)
    elif pooling == 'max':
        out = GlobalMaxPooling2D(name='global_max_pool')(out)

    out = Dense(num_classes,
                activation='softmax',
                kernel_initializer='he_normal',
                name='predict')(out)

    model = Model(model.input, out)

    return model
Example #21
0
from tensorflow.keras.layers import Flatten,  MaxPooling2D, Conv2D
from tensorflow.keras.callbacks import TensorBoard

(X_train,y_train), (X_test, y_test) = mnist.load_data()

X_train = X_train.reshape(60000,28,28,1).astype('float32')
X_test = X_test.reshape(10000,28,28,1).astype('float32')

X_train /= 255
X_test /= 255

n_classes = 10
y_train = keras.utils.to_categorical(y_train, n_classes)
y_test = keras.utils.to_categorical(y_test, n_classes)

model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), activation='relu', input_shape=(28,28,1)) )
model.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

tensor_board = TensorBoard('./logs/LeNet-MNIST-1')

model.fit(X_train, y_train, batch_size=128, epochs=15, verbose=1, validation_data=(X_test,y_test), callbacks=[tensor_board])
Example #22
0
def bi_lstm(input_len):
    model = Sequential()
    model.add(
        Bidirectional(LSTM(128, return_sequences=True),
                      input_shape=(input_len[1], input_len[2])))
    model.add(Bidirectional(LSTM(128)))
    model.add(Dropout(0.25))

    if RESEARCH_QUESTION == "q1":
        model.add(Dense(3, activation="softmax"))
    if RESEARCH_QUESTION == "q2":
        model.add(Dense(2, activation="softmax"))
    if RESEARCH_QUESTION == "q3":
        model.add(Dense(2, activation="softmax"))

    return model
Example #23
0
def train_data():
    iq = login()

    #actives = ['EURUSD','GBPUSD','EURJPY','AUDUSD']

    df = get_data_needed(iq)

    df.isnull().sum().sum()  # there are no nans
    df.fillna(method="ffill", inplace=True)
    df = df.loc[~df.index.duplicated(keep='first')]

    df['future'] = df["close"].shift(
        -FUTURE_PERIOD_PREDICT)  # future prediction

    df['MA_20'] = df['close'].rolling(window=20).mean()  #moving average 20
    df['MA_50'] = df['close'].rolling(window=50).mean()  #moving average 50

    df['L14'] = df['min'].rolling(window=14).min()
    df['H14'] = df['max'].rolling(window=14).max()
    df['%K'] = 100 * ((df['close'] - df['L14']) /
                      (df['H14'] - df['L14']))  #stochastic oscilator
    df['%D'] = df['%K'].rolling(window=3).mean()

    df['EMA_20'] = df['close'].ewm(
        span=20, adjust=False).mean()  #exponential moving average
    df['EMA_50'] = df['close'].ewm(span=50, adjust=False).mean()

    rsi_period = 14
    chg = df['close'].diff(1)
    gain = chg.mask(chg < 0, 0)
    df['gain'] = gain
    loss = chg.mask(chg > 0, 0)
    df['loss'] = loss
    avg_gain = gain.ewm(com=rsi_period - 1, min_periods=rsi_period).mean()
    avg_loss = loss.ewm(com=rsi_period - 1, min_periods=rsi_period).mean()

    df['avg_gain'] = avg_gain
    df['avg_loss'] = avg_loss
    rs = abs(avg_gain / avg_loss)
    df['rsi'] = 100 - (100 / (1 + rs))  #rsi index

    df = df.drop(columns={
        'open', 'min', 'max', 'avg_gain', 'avg_loss', 'L14', 'H14', 'gain',
        'loss'
    })  #drop columns that are too correlated or are in somehow inside others

    df = df.dropna()
    dataset = df.fillna(method="ffill")
    dataset = dataset.dropna()

    dataset.sort_index(inplace=True)

    main_df = dataset

    main_df.fillna(
        method="ffill",
        inplace=True)  # if there are gaps in data, use previously known values
    main_df.dropna(inplace=True)

    main_df['target'] = list(map(classify, main_df['close'],
                                 main_df['future']))

    main_df.dropna(inplace=True)

    main_df['target'].value_counts()

    main_df.dropna(inplace=True)

    main_df = main_df.astype('float32')

    times = sorted(main_df.index.values)
    last_5pct = sorted(main_df.index.values)[-int(0.1 * len(times))]

    validation_main_df = main_df[(main_df.index >= last_5pct)]
    main_df = main_df[(main_df.index < last_5pct)]

    train_x, train_y = preprocess_df(main_df)
    validation_x, validation_y = preprocess_df(validation_main_df)

    print(f"train data: {len(train_x)} validation: {len(validation_x)}")
    print(f"sells: {train_y.count(0)}, buys: {train_y.count(1)}")
    print(
        f"VALIDATION sells: {validation_y.count(0)}, buys : {validation_y.count(1)}"
    )

    train_y = np.asarray(train_y)
    validation_y = np.asarray(validation_y)

    LEARNING_RATE = 0.001  #isso mesmo
    EPOCHS = 40  # how many passes through our data #20 deu bom
    BATCH_SIZE = 16  # how many batches? Try smaller batch if you're getting OOM (out of memory) errors.
    NAME = f"{LEARNING_RATE}-{SEQ_LEN}-SEQ-{FUTURE_PERIOD_PREDICT}-{EPOCHS}-{BATCH_SIZE}-PRED-{int(time.time())}"  # a unique name for the model
    print(NAME)

    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        try:
            # Currently, memory growth needs to be the same across GPUs
            for gpu in gpus:
                tf.config.experimental.set_memory_growth(gpu, True)
            logical_gpus = tf.config.experimental.list_logical_devices('GPU')
            print(len(gpus), "Physical GPUs,", len(logical_gpus),
                  "Logical GPUs")
        except RuntimeError as e:
            # Memory growth must be set before GPUs have been initialized
            print(e)

    model = Sequential()
    model.add(LSTM(128, input_shape=(train_x.shape[1:]),
                   return_sequences=True))
    model.add(Dropout(0.2))
    model.add(
        BatchNormalization()
    )  #normalizes activation outputs, same reason you want to normalize your input data.

    model.add(LSTM(128, return_sequences=True))
    model.add(Dropout(0.1))
    model.add(BatchNormalization())

    model.add(LSTM(128))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())

    model.add(Dense(32, activation='relu'))
    model.add(Dropout(0.2))

    model.add(Dense(2, activation='softmax'))

    opt = tf.keras.optimizers.Adam(lr=LEARNING_RATE, decay=5e-5)

    # Compile model
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))

    filepath = "LSTM-best"  # unique file name that will include the epoch and the validation acc for that epoch
    checkpoint = ModelCheckpoint("models/{}.model".format(filepath),
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')  # saves only the best ones

    # Train model
    history = model.fit(
        train_x,
        train_y,
        batch_size=BATCH_SIZE,
        epochs=EPOCHS,
        validation_data=(validation_x, validation_y),
        callbacks=[tensorboard, checkpoint],
    )

    prediction = pd.DataFrame(model.predict(validation_x))

    m = np.zeros_like(prediction.values)
    m[np.arange(len(prediction)), prediction.values.argmax(1)] = 1

    prediction = pd.DataFrame(m, columns=prediction.columns).astype(int)
    prediction = prediction.drop(columns={1})
    validation_y = pd.DataFrame(validation_y)

    high_acurate = prediction.loc[
        prediction[0] >
        0.55]  #VALORES QUE ELE PREVEU 0 COM PROB MAIOR QUE 0.55

    high_index = high_acurate.index  #PEGA OS INDEX DOS QUE TIVERAM PROB ACIMA DA ESPECIFICADA

    validation_y_used = pd.DataFrame(
        validation_y)  #TRANSFORMA NUMPY PRA DATAFRAM
    prediction_compare = validation_y_used.loc[
        high_index]  #LOCALIZA OS INDEX QUE FORAM SEPARADOS
    prediction_compare[0].value_counts(
    )  #MOSTRA OS VALORES. COMO A GENTE ESCOLHEU 0 NO OUTRO O 0 TEM QUE TER UMA PROB MAIOR
    len(prediction)

    from sklearn.metrics import accuracy_score

    acc = accuracy_score(validation_y, prediction)

    return filepath
Example #24
0
                                                  y_train,
                                                  test_size=.2,
                                                  random_state=45)

from sklearn.preprocessing import MinMaxScaler

scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
x_val = scaler.transform(x_val)

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout

model = Sequential()
model.add(Dense(32, activation='relu', input_shape=(10, )))
model.add(Dropout(0.2))
model.add(Dense(8, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(8, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(8, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam', metrics=['mae'])
model.fit(x_train,
          y_train,
          epochs=100,
          batch_size=1,
def CNNModel(config):
    input_shape = config.model_input_dim
    lamda = config.Regularization_term
    p_dropout = config.dropout
    filt1 = (config.Filter_shape_dim1[0], config.Filter_shape_dim2[0])
    filt2 = (config.Filter_shape_dim1[1], config.Filter_shape_dim2[1])
    filt3 = (config.Filter_shape_dim1[2], config.Filter_shape_dim2[2])

    size1 = config.hidden_size[0]
    size2 = config.hidden_size[1]
    size3 = config.hidden_size[2]

    pool_shape1 = (config.Pool_shape_dim1[0], config.Pool_shape_dim2[0])
    pool_shape2 = (config.Pool_shape_dim1[1], config.Pool_shape_dim2[1])
    pool_shape3 = (config.Pool_shape_dim1[2], config.Pool_shape_dim2[2])

    dense_size1 = config.Dense_size[0]
    dense_size2 = config.Dense_size[1]
    dense_size3 = config.Dense_size[2]
    model = Sequential()
    model.add(
        Conv2D(size1,
               kernel_size=filt1,
               activation='relu',
               bias_regularizer=keras.regularizers.l2(lamda),
               kernel_regularizer=keras.regularizers.l2(lamda),
               input_shape=input_shape))
    model.add(MaxPooling2D(pool_size=pool_shape1))
    if p_dropout != 0:
        model.add(Dropout(rate=p_dropout))
    model.add(
        Conv2D(size2,
               kernel_size=filt2,
               activation='relu',
               bias_regularizer=keras.regularizers.l2(lamda),
               kernel_regularizer=keras.regularizers.l2(lamda)))
    model.add(MaxPooling2D(pool_size=pool_shape2))
    if p_dropout != 0:
        model.add(Dropout(rate=p_dropout))
    model.add(
        Conv2D(size3,
               kernel_size=filt3,
               activation='relu',
               bias_regularizer=keras.regularizers.l2(lamda),
               kernel_regularizer=keras.regularizers.l2(lamda)))
    model.add(MaxPooling2D(pool_size=pool_shape3))
    if p_dropout != 0:
        model.add(Dropout(rate=p_dropout * 2))
    model.add(Flatten())
    model.add(
        Dense(dense_size1,
              kernel_regularizer=keras.regularizers.l2(lamda),
              activation='relu'))
    model.add(
        Dense(dense_size2,
              kernel_regularizer=keras.regularizers.l2(lamda),
              activation='relu'))
    model.add(
        Dense(dense_size3,
              kernel_regularizer=keras.regularizers.l2(lamda),
              activation='relu'))
    model.add(Dense(1, activation='sigmoid'))
    return model
from tensorflow.keras import Input
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Dropout, Flatten, Dense, ZeroPadding2D, DepthwiseConv2D, GlobalAveragePooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ReduceLROnPlateau

from load_data import tensor_load

if __name__ == "__main__":
	X_train1, X_test1, y_train1, y_test1 = tensor_load()

	model = Sequential()
	model.add(Input(shape=(28,28,1)))
	model.add(ZeroPadding2D(padding=(2,2)))
	# 1_32
	model.add(Conv2D(32, kernel_size=3, padding="same", activation="relu"))
	model.add(BatchNormalization())
	# 1_64
	model.add(DepthwiseConv2D(kernel_size=3, padding="same", strides=1, activation="relu"))
	model.add(BatchNormalization())
	model.add(Conv2D(64, kernel_size=1, padding="same", activation="relu"))
	model.add(BatchNormalization())
	# 1_128
	model.add(DepthwiseConv2D(kernel_size=3, padding="same", strides=2, activation="relu"))
	model.add(BatchNormalization())
	model.add(Conv2D(128, kernel_size=1, padding="same", activation="relu"))
	model.add(BatchNormalization())
	# 2_128
	model.add(DepthwiseConv2D(kernel_size=3, padding="same", strides=1, activation="relu"))
	model.add(BatchNormalization())
Example #27
0
import data_utils as du
import os

from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.layers import BatchNormalization
import numpy as np

np.random.seed(1000)

x, y = du.load_data()

# (3) Create a sequential model
model = Sequential()

# 1st Convolutional Layer
model.add(Conv2D(filters=96, input_shape=(224,224,3), kernel_size=(11,11),\
 strides=(4,4), padding='valid'))
model.add(Activation('relu'))
# Pooling
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))
# Batch Normalisation before passing it to the next layer
model.add(BatchNormalization())

# 2nd Convolutional Layer
model.add(
    Conv2D(filters=256, kernel_size=(11, 11), strides=(1, 1), padding='valid'))
model.add(Activation('relu'))
# Pooling
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))
Example #28
0
    def train_model(self, labeled_jobs, params={}):
        min_sequence_length = params.get(
            'min_sequence_length', config['model']['min_sequence_length'])
        max_sequence_length = params.get(
            'max_sequence_length', config['model']['max_sequence_length'])
        learning_rate = params.get('learning_rate',
                                   config['model']['learning_rate'])
        epochs = params.get('epochs', config['model']['epochs'])
        batch_size = params.get('batch_size', config['model']['batch_size'])
        lstm_units = params.get('lstm_units', config['model']['lstm_units'])
        lstm_layers = params.get('lstm_layers', config['model']['lstm_layers'])
        use_tfidf = params.get('use_tfidf', config['model']['use_tfidf'])
        dropout = params.get('dropout', config['model']['dropout'])
        test_split = params.get('test_split', config['model']['test_split'])
        dev_split = params.get('dev_split', config['model']['dev_split'])
        synthesize_factor = params.get('synthesize_factor',
                                       config['model']['synthesize_factor'])
        use_amsgrad = params.get('use_amsgrad', config['model']['use_amsgrad'])

        embedding_keyed_vectors = self.get_embedding_vectors()
        df = pandas.DataFrame(labeled_jobs)
        processed_labeled = self.process(
            df,
            params.get('min_sequence_length',
                       config['model']['min_sequence_length']))
        processed_labeled = self.fix_skew(processed_labeled, synthesize_factor)
        x_train, x_test, y_train, y_test = train_test_split(
            numpy.array(processed_labeled['tokens']),
            numpy.array(processed_labeled[LABEL_KEY]),
            test_size=test_split)
        x_train = self.labelize_jobs(x_train, 'TRAIN')
        x_test = self.labelize_jobs(x_test, 'TEST')

        tfidf = self.build_tfdif(x_train, 10) if use_tfidf else None

        train_vecs = self.build_vector_list(x_train, embedding_keyed_vectors,
                                            tfidf, max_sequence_length)
        test_vecs = self.build_vector_list(x_test, embedding_keyed_vectors,
                                           tfidf, max_sequence_length)
        y_train = y_train.astype(float)
        y_test = y_test.astype(float)

        model = Sequential()
        model.add(
            LSTM(lstm_units,
                 input_shape=train_vecs.shape[1:],
                 return_sequences=(lstm_layers > 1)))
        model.add(Dropout(dropout))
        for _ in range(max(0, lstm_layers - 2)):
            model.add(LSTM(lstm_units, return_sequences=True))
            model.add(Dropout(dropout))
        if lstm_layers > 1:
            model.add(LSTM(lstm_units))
            model.add(Dropout(dropout))
        model.add(Dense(1, activation='sigmoid'))
        opt = Adam(learning_rate=learning_rate, amsgrad=use_amsgrad)
        model.compile(optimizer=opt,
                      loss='binary_crossentropy',
                      metrics=['accuracy'])

        history = model.fit(train_vecs,
                            y_train,
                            epochs=epochs,
                            batch_size=batch_size,
                            validation_split=dev_split,
                            verbose=2)
        print(model.summary())
        score = model.evaluate(test_vecs,
                               y_test,
                               batch_size=batch_size,
                               verbose=2)
        print(f'Model score: {score[1]}')

        # TODO: use config to reject models below certain accuracy threshold
        model.save(MODEL_SAVE_PATH, save_format='tf')
        self.model = keras.models.load_model(MODEL_SAVE_PATH)

        return score[
            1], history.history, max_sequence_length  # Numpy float values are not serializable by flask
    (image_paths['validation'], image_labels['validation']))
validation_dataset = validation_dataset.map(load_image,
                                            num_parallel_calls=tf.data.experimental.AUTOTUNE)
validation_dataset = validation_dataset.map(process_and_not_augment_image,
                                            num_parallel_calls=tf.data.experimental.AUTOTUNE)
validation_dataset = validation_dataset.batch(BATCH_SIZE, drop_remainder=True)


# ## Train a small CNN from scratch
#
# Similarly as with MNIST digits, we can start from scratch and train
# a CNN for the classification task.
#
# ### Initialization

model = Sequential()

model.add(Conv2D(32, (3, 3), input_shape=INPUT_IMAGE_SIZE,
                 activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(43, activation='softmax'))
def vgg_face():
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
    model.add(Convolution2D(64, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Convolution2D(4096, (7, 7), activation='relu'))
    model.add(Dropout(0.5))
    model.add(Convolution2D(4096, (1, 1), activation='relu'))
    model.add(Dropout(0.5))
    model.add(Convolution2D(2622, (1, 1)))

    model.add(Flatten())

    model.load_weights('D:\\Downloads\\vgg_face_weights.h5')

    return model
Example #31
0
    [0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
    [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
    [0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
    [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
], dtype='float32')

# this takes a looong time to index, and
# python may crash several times before indexing is complete
import tensorflow as tf

from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation

model = Sequential()

model.add(Dense(8,
                activation=keras.activations.sigmoid,
                ))
model.add(Dense(17,
                activation=keras.activations.sigmoid,
                ))
model.compile(
    optimizer=tf.train.AdamOptimizer(0.001),
    loss=keras.losses.categorical_crossentropy,
    # loss=keras.losses.mse,
    metrics=[keras.metrics.binary_accuracy]
)

# This is the process I used to train my weights
Example #32
0
import pickle
import time
import numpy as np

# NAME ="Elephant-vs-Cat-cnn-64x2-{}".format(int(time.time()))
# tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))

# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
# sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

X = pickle.load(open("x.pickle", "rb"))
y = pickle.load(open("y.pickle", "rb"))
X = np.array(X)
X = X / 255.0

model = Sequential()

model.add(Conv2D(64, (3, 3), input_shape=X.shape[1:]))

model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())

model.add(Dense(64))
model.add(Activation("relu"))
Example #33
0
#!/usr/bin/python3

# import tensorflow as tf
import numpy as np

from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense

# Architecture du réseau
modele = Sequential()

# Couches de neurones
modele.add(Dense(2, input_dim=1, activation='relu'))
modele.add(Dense(1, activation='relu'))

# Couche 0 - Définir à la main les poids
coeff = np.array([[1., -0.5]])
biais = np.array([-1, 1])
poids = [coeff, biais]
modele.layers[0].set_weights(poids)

# Vérification
verif_poids = modele.layers[0].get_weights()
print(verif_poids)

# Couche 1 - Définir à la main les poids
coeff = np.array([[1.0], [1.0]])
biais = np.array([0])
poids = [coeff, biais]
modele.layers[1].set_weights(poids)
Example #34
0
                             str(row[0]))
    ids = list(range(len(labels)))
    labels = LabelBinarizer().fit_transform(labels)
    return DataGenerator(ids, labels, filepaths)


train_generator = createDataGenerator(
    "/home/samygarg/hanuman/20bn-jester-v1/annotations/jester-v1-train.csv")
test_generator = createDataGenerator(
    "/home/samygarg/hanuman/20bn-jester-v1/annotations/jester-v1-validation.csv"
)

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
NUM_CLASSES = 27

preprocess = Sequential()
preprocess.add(Masking(mask_value=0.))
preprocess.add(
    Conv2D(64, kernel_size=(4, 4), strides=(1, 1), activation='relu'))
preprocess.add(
    Conv2D(64, kernel_size=(4, 4), strides=(2, 2), activation='relu'))
preprocess.add(Dropout(0.5))
preprocess.add(
    Conv2D(64, kernel_size=(4, 4), strides=(1, 1), activation='relu'))
preprocess.add(
    Conv2D(64, kernel_size=(4, 4), strides=(2, 2), activation='relu'))
preprocess.add(Dropout(0.5))
preprocess.add(
    Conv2D(64, kernel_size=(4, 4), strides=(1, 1), activation='relu'))
preprocess.add(
    Conv2D(64, kernel_size=(4, 4), strides=(2, 2), activation='relu'))
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D

import pickle

pickle_in = open("X.pickle","rb")
X = pickle.load(pickle_in)

pickle_in = open("y.pickle","rb")
y = pickle.load(pickle_in)

X = X/255.0

model = Sequential()
model.add(Conv2D(256, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())  # this converts our 3D feature maps to 1D feature vectors

model.add(Dense(64))

model.add(Dense(1))
model.add(Activation('sigmoid'))
Example #36
0
    x_col='splimage_path',
    y_col='is_oval',
    class_mode='binary',
    batch_size=28,
    shuffle=False,
    target_size=Img_size,
)

model = Sequential([
    layers.Conv2D(16, (3, 3),
                  padding='same',
                  activation='relu',
                  input_shape=(256, 256, 3)),
    layers.MaxPool2D(pool_size=(2, 2)),
    layers.Conv2D(32, (3, 3), padding='same', activation='relu'),
    layers.MaxPool2D(pool_size=(2, 2)),
    layers.Conv2D(64, (3, 3), padding='same', activation='relu'),
    layers.MaxPool2D(pool_size=(2, 2)),
    layers.Conv2D(128, (3, 3), padding='same', activation='relu'),
    layers.MaxPool2D(pool_size=(2, 2)),
    layers.Flatten(),
    layers.Dense(256, activation='relu'),
    layers.Dense(1, activation='sigmoid')
])

model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])
model.summary()

history = model.fit(
    train_gen,
#https://www.youtube.com/watch?v=V23DmbdzMvg
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import to_categorical

from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn import preprocessing

iris = load_iris()
X = preprocessing.scale(iris['data'])
Y = to_categorical(iris['target'])

#print(X)
#print(Y)

#training data and test data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)

#model
model = Sequential()
model.add(Dense(10, input_dim=4, activation='relu'))
model.add(Dense(10,  activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

#fitting the model
model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=200, batch_size=10)
Example #38
0
plt.grid()
plt.show()

# adasdasdsa

import tensorflow as tf
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
# sc_X = StandardScaler()
# X2_train = sc_X.fit_transform(X_train)
# X2_test = sc_X.transform(X_test)
# test2 = sc_X.transform(test)

model = Sequential()

model.add(Dense(7, activation='relu'))
# model.add(Dropout(0.3))

model.add(Dense(5, activation='relu'))
# model.add(Dropout(0.2))

model.add(Dense(2, activation='relu'))
# model.add(Dropout(0.2))

# model.add(Dense(3, activation = 'relu'))
# model.add(Dropout(0.2))

model.add(Dense(1, activation='relu'))
Example #39
0
main_df = scaling(main_df)

# split into train and validation
training_df = main_df[:last_10pct]
validation_df = main_df[last_10pct:last_5pct]

# preprocess the data
train_x, train_y = preprocess_df(training_df)
validation_x, validation_y = preprocess_df(validation_df)

# some statistics
print(f"train data: {len(train_x)} validation: {len(validation_x)}")
print(f"don't buys: {train_y.count(0)}, buys: {train_y.count(1)}")
print(f"VALIDATION don't buys: {validation_y.count(0)}, buys: {validation_y.count(1)}")

model = Sequential()
model.add(LSTM(128, activation='tanh', input_shape=(train_x.shape[1:]), return_sequences=True))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(LSTM(128, activation='tanh', input_shape=(train_x.shape[1:]), return_sequences=True))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(LSTM(128, activation='tanh', input_shape=(train_x.shape[1:])))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))
Example #40
0
# update the internal vocabulary based on the list of messages(patterns)
# index dictionary is created for every word, and they get a unique integer
# based on frequency.
tokenizer.fit_on_texts(training_patterns)
# get the dictionary of words and their unique integer
word_index = tokenizer.word_index
# each word in the text gets replaced with the corresponding unique integer
# value
sequences = tokenizer.texts_to_sequences(training_patterns)
# takes the sequences and transforms it into a 2D Numpy array where each row
# is the message(pattern). Max length for each sequence is 20, and messages
# bigger gets values removed at the end.
padded_sequences = pad_sequences(sequences, truncating='post', maxlen=20)

# set up and define the neural network model
model = Sequential()
# add the embedding layer used to set up and update weights for each of the
# integer vectors(words). Weights can be retrieved by multiplying the one-hot
# vector assigned to each integer vector to the embedding matrix.
model.add(Embedding(1000, 16, input_length=20))
# add an average global pooling layer used to reduce the total number of
# parameters in the neural network model in order minimize overfitting
model.add(GlobalAveragePooling1D())
# a neural network layer is added to the model that creates a weights matrix.
# This layer implements the output = activation(dot(input, kernel) + bias
# operation where the kernel is the weights matrix and the activation is the
# rectified linear activation function.
model.add(Dense(16, activation='relu'))
# another neural network layer is added same as before
model.add(Dense(16, activation='relu'))
# another neural network is added, but the activation function is softmax
Example #41
0
    [0, 0, 1],
    [0, 0, 1],
    [0, 0, 1],
    [0, 0, 1],
])


# this takes a looong time to index, and
# python may crash several times before indexing is complete
import tensorflow as tf

from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation

model = Sequential()
model.add(Dense(8,
                activation=keras.activations.sigmoid,
                ))
model.add(Dense(3,
                activation=keras.activations.sigmoid,
                ))

model.compile(
              optimizer=tf.train.AdamOptimizer(0.001),
              # loss=keras.losses.categorical_crossentropy,
              loss=keras.losses.mse,
              metrics=[keras.metrics.binary_accuracy]
              )

# This is the process I used to train my weights
Example #42
0
def rand_sample(data, samples=10):
    ind = np.arange(len(data))
    np.random.shuffle(ind)
    return data[ind[:samples]]


equal = load_data('equal')
pure = load_data('pure')
pytha = load_data('pytha')

# model parameters
DROP_RATE = 0.8
L1 = 1e-2

model = Sequential()
model.add(Dropout(DROP_RATE, input_shape=(1025, 469, 1)))
model.add(AveragePooling2D((2, 50)))
model.add(MaxPooling2D((1, 9)))
model.add(Flatten())
model.add(Dense(3, activation='softmax', kernel_regularizer=regularizers.l1(L1)))

Adam = optimizers.Adam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss='categorical_crossentropy', optimizer=Adam, metrics=['acc'])
model.summary()
model.save_weights('initial.hdf5')

# `SAMPEL` songs for each temperament
# `TRAIN_NUM` of which for training, while others for validating
SAMPLE = 10
TRAIN_NUM = 7
Example #43
0
    [0, 0, 1],
    [0, 0, 1],
    [0, 0, 1],
    [0, 0, 1],
])


# this takes a looong time to index, and
# python may crash several times before indexing is complete
import tensorflow as tf

from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation

model = Sequential()
model.add(Dense(8,
                activation=keras.activations.sigmoid,
                ))
model.add(Dense(3,
                activation=keras.activations.sigmoid,
                ))

model.compile(
              optimizer=tf.train.AdamOptimizer(0.001),
              # loss=keras.losses.categorical_crossentropy,
              loss=keras.losses.mse,
              metrics=[keras.metrics.binary_accuracy]
              )

# This is the process I used to train my weights
Example #44
0
tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))


def unpickle_this(filename):
    with open(filename, 'rb') as file:
        unpickled = pickle.load(file)
    return unpickled


y = np.array(unpickle_this('/storage/y.pickle'))

X = unpickle_this('/storage/X.pickle')
X = X / 255.0
type(X)

model = Sequential()

model.add(Conv2D(64, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())

model.add(Dense(64))
model.add(Activation('relu'))

model.add(Dense(1))
Example #45
0





# this takes a looong time to index, and
# python may crash several times before indexing is complete
import tensorflow as tf

from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation


model = Sequential()


model.add(Dense(8,
                activation=keras.activations.sigmoid,
                ))
model.add(Dense(3,
                activation=keras.activations.sigmoid,
                ))

model.compile(
              optimizer=tf.train.AdamOptimizer(0.001),
              # loss=keras.losses.categorical_crossentropy,
              loss=keras.losses.mse,
              metrics=[keras.metrics.binary_accuracy]
              )
Example #46
0
def perform_solvability_training(initializing,
                                 netname,
                                 numlayers=6,
                                 epochs=3,
                                 training_sets=2,
                                 batch_size=32,
                                 learning_rate=.001):
    """

    Parameters
    ----------
    initializing : boolean
        Is True if the net already exists and we want to continue
        training and False if we want to make a new net.
    netname : string
        The name of the network in the file system.
    numlayers: int, optional
        Number of layers to use in the network. The default is 6.
    epochs: int, optional
        Number of epochs to do per training set. The default is 3.
    training_sets: int, optional
        Number of training sets to sample from all possible data
        points. The default is 5.
    learning_rate: float, optional
        Learning rate of the Adam optimizer. Default is .001.

    Returns
    -------
    The trained model

    """

    # Set up training and test data.  Inputs are positions,
    # outputs are (x,y,direction) tuples encoded to integers
    # and then to one-hot vectors, representing
    # either a push or a win.
    x_test, y_test = utils.load_solvability_data(constants.TEST_LEVELS)

    # This line implicitly assumes that all levels have the same size.
    # Therefore, small levels are padded with unmovables.
    img_x, img_y, img_z = x_test[0].shape

    input_shape = (img_x, img_y, img_z)

    x_test = x_test.astype('float32')
    print(x_test.shape[0], 'test samples')

    dconst = 0.3  # Dropout between hidden layers

    model = None  # To give the variable global scope
    if initializing:
        # Create a convolutional network with numlayers layers of 3 by 3
        # convolutions and a dense layer at the end.
        # Use batch normalization and regularization.
        model = Sequential()
        model.add(BatchNormalization())
        model.add(
            Conv2D(
                64,
                (3, 3),
                activation='relu',
                input_shape=input_shape,
                #padding = 'same'))
                kernel_regularizer=regularizers.l2(.5),
                padding='same'))
        model.add(Dropout(dconst))

        for i in range(numlayers - 1):
            model.add(BatchNormalization())
            model.add(
                Conv2D(
                    64,
                    (3, 3),
                    activation='relu',
                    #padding = 'same'))
                    kernel_regularizer=regularizers.l2(.5),
                    padding='same'))
            model.add(Dropout(dconst))
        model.add(Flatten())
        model.add(Dense(1, activation='sigmoid'))
    else:
        # Load the model and its weights
        json_file = open("networks/policy_" + netname + ".json", "r")
        loaded_model_json = json_file.read()
        json_file.close()
        model = model_from_json(loaded_model_json)
        model.load_weights("networks/policy_" + netname + ".h5")
        print("Loaded model from disk")

    model.compile(loss=tensorflow.keras.losses.binary_crossentropy,
                  optimizer=tensorflow.keras.optimizers.Adam(
                      learning_rate=learning_rate),
                  metrics=['accuracy'])

    # Keep track of the model's accuracy
    class AccuracyHistory(tensorflow.keras.callbacks.Callback):
        def on_train_begin(self, logs={}):
            self.acc = []

        def on_epoch_end(self, batch, logs={}):
            self.acc.append(logs.get('acc'))

    history = AccuracyHistory()

    # Use different training datasets by getting different random
    # samples from the shifts of the input data
    for i in range(training_sets):
        print("training set", i)
        levels_to_train = constants.TRAIN_LEVELS
        x_train, y_train = utils.load_solvability_data(levels_to_train,
                                                       shifts=True)
        utils.shuffle_in_unison(x_train, y_train)
        x_train = x_train.astype('float32')

        # Train the network
        track = model.fit(x_train,
                          y_train,
                          batch_size=batch_size,
                          epochs=epochs,
                          verbose=1,
                          validation_data=(x_test, y_test),
                          callbacks=[history])

    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
    plt.plot(range(1, epochs + 1), track.history['val_accuracy'])
    plt.plot(range(1, epochs + 1), track.history['accuracy'])
    plt.xlabel('Epochs')
    plt.ylabel('Accuracy')
    plt.show()

    # Save the trained network
    model_json = model.to_json()
    directory = os.getcwd() + '/networks'
    if not os.path.exists(directory):
        os.mkdir(directory)
    with open("networks/solvability_" + netname + ".json", "w") as json_file:
        json_file.write(model_json)

    model.save_weights("networks/solvability_" + netname + ".h5")
    print("Saved model to disk")

    return model