Пример #1
0
def sonar_model():
    model = Sequential()
    model.add(Dense(60, input_shape=(60,), activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(30, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(1, activation='sigmoid'))

    # Use the Binary Cross Entropy loss function for a Binary Classifier.
    # https://www.tensorflow.org/api_docs/python/tf/keras/models/Sequential#compile
    model.compile(loss='binary_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    return model
Пример #2
0
class DeepLearningModel:
    def __init__(self, FREQUENCY, TARGET_FREQUENCY):
        print(tf.version)

        self.FREQUENCY = FREQUENCY
        self.TARGET_FREQUENCY = TARGET_FREQUENCY
        self.DOWNSAMPLE = int(FREQUENCY/TARGET_FREQUENCY)
        print(self.DOWNSAMPLE)

        self.scale = [1024, 1024, 1024]
        self.mean = [0, 0, 0]

        self.Fs = self.TARGET_FREQUENCY #Samples per second
        self.Seconds = 2 # Seconds the frame covers
        self.frame_size = self.Fs*self.Seconds
        self.hop_size = 10 # The movement of the frame this results in overlapping data

    def create_save_and_ready_dataset(self):
        self.create_dataset()
        self.save_dataset()

    # takes a list of file paths to training data
    def create_dataset(self, files):
        self.datasets = self.preprocess_files(files)

        self.X, self.Y = self.create_frames(self.datasets[0], self.frame_size, self.hop_size)

        for i in range(1,len(self.datasets)):
            temp_x, temp_y = self.create_frames(self.datasets[i], self.frame_size, self.hop_size)
            self.X = np.append(self.X, temp_x, axis=0)
            self.Y = np.append(self.Y, temp_y)

    def save_dataset(self):
        np.save('serial_dataset_x.npy', self.X)

        np.save('serial_dataset_y.npy', self.Y)

        print('shapes: ', self.X.shape, self.Y.shape)   

    def load_dataset(self):
        self.Y = np.load('serial_dataset_y.npy')

        self.X = np.load('serial_dataset_x.npy')        

        print('X and Y loaded from files, shapes: ', self.X.shape, self.Y.shape)

    def load_and_ready_datasets(self):
        self.load_dataset()
        self.ready_datasets()

    def ready_datasets(self):
        # Here we are dividing the data into training data and test data using train_test_split() from sklearn 
        # which we have already imported. We are going to use 80% of the data for training the model and 20% of the data for testing. 
        # random_state controls the shuffling applied to the data before applying the split. stratify = y splits the data in a stratified fashion, using y as the class labels.
        self.X_train, self.X_val, self.Y_train, self.Y_val = train_test_split(self.X, self.Y, test_size = 0.95, random_state = 0, stratify = self.Y)
        self.X_val, self.X_test, self.Y_val, self.Y_test = train_test_split(self.X_val, self.Y_val, test_size = 0.8, random_state = 0, stratify = self.Y_val)

        print(self.X_train.shape)#Prints train dataset size
        print(self.X_val.shape)#Prints test dataset size

        # A CNN only accepts 3 dimentional data so we are going to reshape() our data and just a dimension with a constant value.
        self.X_train = self.X_train.reshape(self.X_train.shape + (1, ))
        self.X_val = self.X_val.reshape(self.X_val.shape + (1, ))
        self.X_test = self.X_test.reshape(self.X_test.shape + (1, ))

    def create_model(self):
        n_activities = len(np.unique(self.Y))

        ## Create the model
        self.model = Sequential()
        self.model.add(Conv2D(4, (3, 1), activation = 'relu', input_shape = (100, 3, 1))) 
        self.model.add(Dropout(0.1))

        self.model.add(Conv2D(4, (2, 2), activation='relu'))
        self.model.add(Dropout(0.2))

        self.model.add(Flatten())
        self.model.add(Dense(8, activation = 'relu'))
        self.model.add(Dropout(0.5))

        self.model.add(Dense(n_activities, activation='softmax'))

    def train(self, epochs, plot):
        #Here we are compiling the model and fitting it to the training data. We will use 200 epochs to train the model.
        #An epoch is an iteration over the entire data provided. validation_data is the data on which to evaluate the loss and any model metrics at the end of each epoch.
        #The model will not be trained on this data. As metrics = ['accuracy'] the model will be evaluated based on the accuracy.

        self.epochs = epochs

        lr_schedule = keras.optimizers.schedules.ExponentialDecay(initial_learning_rate=1e-4, decay_steps=10000, decay_rate=0.9)
        self.model.compile(optimizer=Adam(learning_rate = lr_schedule), loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])
        self.history = self.model.fit(self.X_train, self.Y_train, epochs = self.epochs, validation_data=(self.X_val, self.Y_val), verbose=1)

        if plot:
            # Plot training & validation accuracy values
            epoch_range = range(1, self.epochs+1)
            plt.plot(epoch_range, self.history.history['accuracy'])
            plt.plot(epoch_range, self.history.history['val_accuracy'])
            plt.title('Model accuracy')
            plt.ylabel('Accuracy')
            plt.xlabel('Epoch')
            plt.legend(['Train', 'Val'], loc='upper left')
            plt.show()

            # Plot training & validation loss values
            plt.plot(epoch_range, self.history.history['loss'])
            plt.plot(epoch_range, self.history.history['val_loss'])
            plt.title('Model loss')
            plt.ylabel('Loss')
            plt.xlabel('Epoch')
            plt.legend(['Train', 'Val'], loc='upper left')
            plt.show()

    def save_model(self, name: str):
        self.model.save(name)

    def load_model(self, name: str):
        self.model = tf.keras.models.load_model(name)

    def compile_tflite_model(self, name: str):
        MODELS_DIR = name+ '/'
        if not os.path.exists(MODELS_DIR):
            os.mkdir(MODELS_DIR)
        MODEL_TF = 'name'
        MODEL_NO_QUANT_TFLITE = MODELS_DIR + name + '_no_quant.tflite'
        MODEL_TFLITE = MODELS_DIR + name + '.tflite'
        MODEL_TFLITE_MICRO = MODELS_DIR + name + '.cc'

        # Convert the model to the TensorFlow Lite format without quantization
        converter = tf.lite.TFLiteConverter.from_saved_model(name)
        converter.optimizations = []
        model_no_quant_tflite = converter.convert()

        # Save the model to disk
        open(MODEL_NO_QUANT_TFLITE, "wb").write(model_no_quant_tflite)

        #size_tf = os.path.getsize(MODEL_TF)
        #print(size_tf)
        size_no_quant_tflite = os.path.getsize(MODEL_NO_QUANT_TFLITE)
        print('Size of file ' + str(size_no_quant_tflite) + ' bytes')

        c_model_name = name
        with open(c_model_name + '.h', 'w') as file:
            file.write(self.hex_to_c_array(model_no_quant_tflite, c_model_name))

    def hex_to_c_array(self, hex_data, var_name):
        c_str = ''

        # Create header guard
        c_str += '#ifndef ' + var_name.upper() + '_H\n'
        c_str += '#define ' + var_name.upper() + '_H\n\n'

        # Add array length at top of file
        c_str += '\nunsigned int ' + var_name + '_len = ' + str(len(hex_data)) + ';\n'

        # Declare C variable
        c_str += 'unsigned char ' + var_name + '[] = {'
        hex_array = []
        for i, val in enumerate(hex_data) :

            # Construct string from hex
            hex_str = format(val, '#04x')

            # Add formatting so each line stays within 80 characters
            if (i + 1) < len(hex_data):
                hex_str += ','
            if (i + 1) % 12 == 0:
                hex_str += '\n '
            hex_array.append(hex_str)

        # Add closing brace
        c_str += '\n ' + format(' '.join(hex_array)) + '\n};\n\n'

        # Close out header guard
        c_str += '#endif //' + var_name.upper() + '_H'

        return c_str

    def create_train_and_save_model(self, epochs, name: str, plot=False):
        self.create_model()
        self.train(epochs, plot)
        self.save_model(name)

    def confusion_matrix(self):
        Y_pred = self.model.predict_classes(self.X_test)
        array = ['Walking', 'Running']
        mat = confusion_matrix(self.Y_test, Y_pred)
        plot_confusion_matrix(conf_mat=mat, class_names= array, show_normed=True, figsize=(3,3))
        plt.show()

    def predict(self, inp):
        print('predicting with shape', inp.shape)
        return self.model.predict(inp)

    def preprocess_files(self, filenames):
        datasets = []

        for f in filenames:
            datasets.extend(self.preprocess_file(f))

        return datasets


    def preprocess_file(self, filename):
        x = []
        y = []
        z = []
        t = []

        datasets = []

        print(filename)

        f = open(filename, "r")
        for i,line in enumerate(f):
            try:
                split = line.split(',')
                if (len(split) >= 4) and i > self.FREQUENCY*10:
                    x.append((int(split[0]) - self.mean[0])/self.scale[0])
                    y.append((int(split[1]) - self.mean[1])/self.scale[1])
                    z.append((int(split[2]) - self.mean[2])/self.scale[2])
                    t.append(int(split[3]))
                else:
                    # split data
                    df = pd.DataFrame()
                    df["type"] = t
                    df["x"] = x
                    df["y"] = y
                    df["z"] = z
                    x = []
                    y = []
                    z = []
                    t = []

                    # downsample the dataframe
                    if len(df)/self.FREQUENCY > 5:
                        for j in range(self.DOWNSAMPLE):
                            downsampled = df.iloc[j::self.DOWNSAMPLE, :]
                            datasets.append(downsampled)
            except:
                print('error')
        f.close()

        return datasets

    def create_frames(self, df, frame_size, hop_size):
        N_FEATURES = 3 # Amount of inputs 3 because x, y, z,
        
        frames = []
        labels = []
        for i in range(0, len(df) - frame_size, hop_size):
            x = df['x'].values[i: i + frame_size]
            y = df['y'].values[i: i + frame_size]
            z = df['z'].values[i: i + frame_size]
            
            # Retrieve the most often used label in this segment
            label = stats.mode(df['type'][i: i + frame_size])[0][0]

            if len(x)==frame_size:
                frames.append([x, y, z])
                labels.append(label)

        # Bring the segments into a better shape
        frames = np.asarray(frames).transpose((0, 2, 1))
        labels = np.asarray(labels)

        return frames, labels

# 모델 생성
model=Sequential()
model.add(LSTM(units=50,activation="relu",return_sequences=True,input_shape=(window_size, data_size)))
model.add(Dropout(0.2))
model.add(LSTM(units=60,activation="relu",return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(units=80,activation="relu",return_sequences=True))
model.add(Dropout(0.4))
model.add(LSTM(units=120,activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(units=1))
model.summary()

model.compile(optimizer='adam', loss='mean_squared_error',metrics=['accuracy'])
model.fit(train_x, train_y, epochs=200, batch_size=30)
pred_y = model.predict(test_x)


# print('##### Test Result #####')
# print('loss : ',str(loss_and_metrics[0]))
# print('Accuracy : ',str(loss_and_metrics[1]))

from sklearn.metrics import mean_squared_error
rmse = np.sqrt(mean_squared_error(test_y, pred_y))
print(f"RMSE 값 : {rmse}")
# sklearn은 mse만 지원하기 때문에 rmse와 rmsle는 직접 만들어야 합니다.
# y_preds는 예측값들이 담긴 데이터, y_test는 실제값 데이터들입니다.

Пример #4
0
model = Sequential([
    baseModel,
    AveragePooling2D(pool_size=(7, 7)),
    Flatten(name="flatten"),
    Dense(128, activation="relu"),
    Dropout(0.5),
    Dense(2, activation="softmax")
])

model.summary()

# compilation
print("[INFO] compiling model...")
optimizer = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="binary_crossentropy",
              optimizer=optimizer,
              metrics=["accuracy"])

# training
print("[INFO] training head...")
H = model.fit_generator(train.flow(trainX, trainY, batch_size=BATCH_SIZE),
                        steps_per_epoch=len(trainX) // BATCH_SIZE,
                        validation_data=(testX, testY),
                        validation_steps=len(testX) // BATCH_SIZE,
                        epochs=EPOCHS)
# save the model
print("[INFO] saving mask detector model...")
model.save("MaskDetector.model", save_format="h5")

N = EPOCHS
Пример #5
0
train_univariate = Dataset.from_tensor_slices((datasets_train, labels_train))
train_univariate = train_univariate.shuffle(BUFFER_SIZE).batch(
    BATCH_SIZE).repeat()

val_univariate = Dataset.from_tensor_slices((datasets_test, labels_test))
val_univariate = val_univariate.batch(BATCH_SIZE).repeat()

model = Sequential()

model.add(layers.Dense(input_interval, input_shape=(input_interval, )))
model.add(layers.Dense(input_interval))
model.add(layers.Dense(int(input_interval / 2)))
model.add(layers.Dense(1))

model.compile(optimizer='SGD', loss='mean_squared_error')

model.fit(train_univariate,
          epochs=400,
          steps_per_epoch=10,
          validation_data=val_univariate,
          validation_steps=10)

train_predictions = np.array(
    [value[0] for value in model.predict(datasets_train)])
train_labels_ = np.array([value[0] for value in labels_train])
train_x_arr = x_arr[input_interval + output_offset:train_length]

test_predictions = np.array(
    [value[0] for value in model.predict(datasets_test)])
test_labels_ = np.array([value[0] for value in labels_test])
Пример #6
0
smooth = 1.
def dice_coef(y_true, y_pred):
    y_true_f = flatten(y_true)
    y_pred_f = flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)

from tensorflow.keras.layers import Conv2D
from tensorflow.keras import Sequential

model = Sequential()
model.add( Conv2D(16, 3, activation='relu', padding='same', input_shape=(320, 480, 9)) )
model.add( Conv2D(32, 3, activation='relu', padding='same') )
model.add( Conv2D(1, 5, activation='sigmoid', padding='same') )

model.compile(optimizer = 'Adam', loss = BinaryCrossentropy(), metrics=['accuracy',dice_coef])
print(model.summary())

# Include the epoch in the file name (uses `str.format`)
checkpoint_path = "G:/Github/Caravan_challenge/training_tf/naive_org/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)

# Create a callback that saves the model's weights every 5 epochs
cp_callback = tf.keras.callbacks.ModelCheckpoint(
    filepath=checkpoint_path, 
    verbose=1, 
    save_weights_only=False,
    save_freq='epoch')

model.fit(X_train, y_train, epochs=15, validation_data=(X_val, y_val), batch_size=5, verbose=2)
sample = next(iter(db))
print(sample[0].shape, sample[1].shape)

network = Sequential([
    layers.Dense(256, activation='relu'),
    layers.Dense(128, activation='relu'),
    layers.Dense(64, activation='relu'),
    layers.Dense(32, activation='relu'),
    layers.Dense(10)
])
network.build(input_shape=(None, 28 * 28))
network.summary()

network.compile(optimizer=optimizers.Adam(lr=0.01),
                loss=tf.losses.CategoricalCrossentropy(from_logits=True),
                metrics=['accuracy'])

network.fit(db, epochs=3, validation_data=ds_val, validation_freq=2)

network.evaluate(ds_val)

#保存整个模型
network.save('model.h5')
print('saved total model.')
del network  #删除模型

print('loaded model from file.')
network = tf.keras.models.load_model('model.h5', compile=False)
network.compile(optimizer=optimizers.Adam(lr=0.01),
                loss=tf.losses.CategoricalCrossentropy(from_logits=True),
print(x.shape, y.shape)

db = tf.data.Dataset.from_tensor_slices((x, y))
db = db.map(preprocess).shuffle(10000).batch(256)

db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))
db_test = db_test.map(preprocess).shuffle(10000).batch(256)

db_iter = iter(db)
sample = next(db_iter)

print('batch:', sample[0].shape, sample[1].shape)

network = Sequential([
    layers.Dense(256, activation=tf.nn.relu),  #[b, 784] => [b, 256]
    layers.Dense(128, activation=tf.nn.relu),  # [b, 256] => [b, 128]
    layers.Dense(64, activation=tf.nn.relu),  # [b, 128] => [b, 64]
    layers.Dense(32, activation=tf.nn.relu),  # [b, 64] => [b, 32]
    # layers.Dense(16, activation=tf.nn.relu),  # [b, 32] => [b, 16] # 尝试加一层
    layers.Dense(10, activation=tf.nn.relu),  # [b, 32] => [b, 10]
])

network.build(input_shape=[None, 28 * 28])
network.summary()

network.compile(optimizer=optimizers.Adam(lr=1e-3),
                loss=tf.losses.MSE,
                metrics=['accuracy'])

network.fit(db, epochs=10, validation_data=db_test, validation_freq=2)
network.save('./model.h5')
Пример #9
0
from tensorflow.keras.datasets import imdb
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Embedding, Flatten
from tensorflow.keras import preprocessing
import tensorflow as tf

max_features = 10000
max_len = 20
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
x_train = preprocessing.sequence.pad_sequences(x_train, maxlen=max_len)
x_test = preprocessing.sequence.pad_sequences(x_test, maxlen=max_len)

model = Sequential()
model.add(Embedding(10000, 8, input_length=max_len))
model.add(Flatten())
model.add(Dense(1, activation='softmax'))
model.compile(optimizer='rmsprop',
              loss='binary_crossentropy',
              metrics=['accuracy'])
model.summary()
tf_callback = tf.keras.callbacks.TensorBoard(log_dir='logs')
history = model.fit(x_train,
                    y_train,
                    epochs=10,
                    batch_size=32,
                    validation_split=0.2,
                    callbacks=[tf_callback])
Пример #10
0
    BatchNormalization(),
    ReLU(),
    Dropout(0.1),
    Dense(64, use_bias=False),
    BatchNormalization(),
    ReLU(),
    Dropout(0.1),
    Dense(32, use_bias=False),
    BatchNormalization(),
    ReLU(),
    Dropout(0.1),
    Dense(Dout)
])

# compile model w/ MAE loss, adam optimizer
model.compile(tf.keras.optimizers.Adam(lr=L_RATE), loss='mean_absolute_error')

# Fit the model
history = model.fit(X,
                    y,
                    batch_size=BATCH_SIZE,
                    epochs=EPOCHS,
                    validation_split=VAL_SPLIT,
                    verbose=1)

# validation error over training
plt.plot(history.history['loss'], label='Train')
plt.plot(history.history['val_loss'], label='Val')
plt.legend()
plt.xlabel('Epoch')
plt.ylabel('MAE')
        # so we just initialize by treating j==0 separately
        for j in range(self.kernel_size):
            if j==0:
                z=self.w[j]*tf.roll(inputs,shift=j-j0,axis=1)
            else:
                z+=self.w[j]*tf.roll(inputs,shift=j-j0,axis=1)
        return z


# In[3]:


Net=Sequential()
Net.add(PeriodicConvolution(kernel_size=3))

Net.compile(loss='mean_square_error', optimizer='adam')


# In[4]:


y_in=np.array([[0.,0.,3.,0.,0.]])


# In[5]:


y_out=Net.predict_on_batch(y_in)
print(y_out)

model.add(
    CuDNNLSTM(128, input_shape=(train_x.shape[1:]), return_sequences=True))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(
    CuDNNLSTM(128, input_shape=(train_x.shape[1:]), return_sequences=True))
model.add(Dropout(0.1))
model.add(BatchNormalization())

model.add(CuDNNLSTM(128, input_shape=(train_x.shape[1:])))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))

model.add(Dense(2, activation='softmax'))

opt = tf.keras.optimizers.Adam(lr=0.001, decay=1e-6)

model.compile(loss='sparse_categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])

history = model.fit(train_x,
                    train_y,
                    batch_size=BATCH_SIZE,
                    epochs=EPOCHS,
                    validation_data=(validation_x, validation_y))
Пример #13
0
class CNNModel:
    def __init__(self):
        self.x_train = self.x_test = self.y_train = self.y_test = None
        self.history = None
        self.model = None
        self.model_dir = 'models/'

    def load_data(self):
        IMG = Image_Loader()
        [self.x_train, self.y_train, self.x_test,
         self.y_test] = IMG.load_images()
        print('Training and testing data loaded.')

    def build_model(self):
        x_shape = self.x_train[0].shape
        self.model = Sequential()
        self.model.add(
            layers.Conv2D(32, (3, 3),
                          activation='relu',
                          kernel_initializer='he_uniform',
                          padding='same',
                          input_shape=x_shape))
        self.model.add(layers.MaxPooling2D(2, 2))
        self.model.add(layers.Dropout(0.2))
        self.model.add(
            layers.Conv2D(64, (3, 3),
                          activation='relu',
                          kernel_initializer='he_uniform',
                          padding='same',
                          input_shape=x_shape))
        self.model.add(layers.MaxPooling2D(2, 2))
        self.model.add(layers.Dropout(0.2))
        self.model.add(
            layers.Conv2D(128, (3, 3),
                          activation='relu',
                          kernel_initializer='he_uniform',
                          padding='same',
                          input_shape=x_shape))
        self.model.add(layers.MaxPooling2D(2, 2))
        self.model.add(layers.Dropout(0.2))
        self.model.add(layers.Flatten())
        self.model.add(
            layers.Dense(128,
                         activation='relu',
                         kernel_initializer='he_uniform'))
        self.model.add(layers.Dense(10, activation='sigmoid'))
        self.model.add(layers.Dropout(0.2))
        self.model.compile(loss='categorical_crossentropy',
                           metrics=['accuracy'],
                           optimizer='adam')
        self.model.summary()

    def train(self):
        num_epochs = 100
        num_batches = 64
        self.history = self.model.fit(self.x_train,
                                      self.y_train,
                                      batch_size=num_batches,
                                      epochs=num_epochs,
                                      validation_data=(self.x_test,
                                                       self.y_test),
                                      callbacks=[TqdmCallback()])

    def eval_model(self):
        try:
            score = self.model.evaluate(self.x_train, self.y_train)
            print("Training Loss: ", score[0])
            print("Training Accuracy: ", score[1])
            score = self.model.evaluate(self.x_test, self.y_test)
            print("Testing Loss: ", score[0])
            print("Testing Accuracy: ", score[1])

            if (self.history):
                plt.subplot(1, 2, 1)
                plt.plot(self.history.history['accuracy'], label='accuracy')
                plt.plot(self.history.history['val_accuracy'],
                         label='val_accuracy')
                plt.title('Training and Validation Accuracy')
                plt.xlabel('Epoch')
                plt.ylabel('Accuracy')
                plt.ylim([0.7, 1])
                plt.legend(loc='lower right')

                plt.subplot(1, 2, 2)
                plt.plot(self.history.history['loss'], label='loss')
                plt.plot(self.history.history['val_loss'], label='val_loss')
                plt.title('Training and Validation Loss')
                plt.xlabel('Epoch')
                plt.ylabel('Accuracy')
                plt.ylim([0.7, 1])
                plt.legend(loc='upper right')
        except:
            if (self.x_train is None):
                print(
                    'Train and test data not loaded. Run CNNModel.load_data().'
                )
            else:
                print(
                    'Please make sure train and test data are loaded correctly.'
                )

    def load_image(self, image, url=0):
        # set url to 1 if image is from internet
        if url:
            resp = get(image)
            img_bytes = BytesIO(resp.content)
            img = load_img(img_bytes, target_size=(32, 32))
        else:
            img = load_img(image, target_size=(32, 32))
        img_pix = asarray(img)
        img_pix = img_pix.reshape(1, 32, 32, 3)
        img_pix = img_pix.astype('float32')
        img_pix = img_pix / 255.0
        print('Image loaded.')
        return img_pix

    def predict(self, img_pix):
        y_pred = self.model.predict_classes(img_pix)
        y_prob = self.model.predict_proba(img_pix)
        y_pred = y_pred[0]
        y_prob = y_prob[0][y_pred]
        pred_dict = {'prediction': str(y_pred), 'confidence': str(y_prob)}
        return pred_dict

    def serialize(self, filename):
        file = self.model_dir + filename
        self.model.save(file, save_format='h5')
        print('Model saved.')

    def deserialize(self, filename):
        file = self.model_dir + filename
        model = tf.keras.models.load_model(file)
        self.model = model
        print('Model loaded.')
model
])

plt.figure(figsize=(10, 10))
for i in range(9):
    plt.subplot(3, 3, i + 1)
    aug_img = model_(tf.expand_dims(x_train[i], 0))
    plt.imshow(aug_img[0, ...].numpy())
    plt.grid(False)
    plt.axis("off")
    plt.title(class_names[y_train[i].item()])
plt.show()

model_.compile(
optimizer="Adam",
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy']
)

train_dataset = train_dataset.batch(16)

model_.fit(train_dataset, epochs = 1)




tensorflow.keras.layers.experimental.preprocessing
- Normalization
- RandomFlip("horizontal"),
- RandomRotation(0.1),
- RandomZoom(0.1),
Пример #15
0
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
    print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
    # determine the number of input features
    n_features = X_train.shape[1]

    # define model
    model = Sequential()
    model.add(
        Dense(20,
              activation='relu',
              kernel_initializer='he_normal',
              input_shape=(n_features, )))
    model.add(Dense(16, activation='relu', kernel_initializer='he_normal'))
    model.add(Dense(1))
    # compile the model
    model.compile(optimizer='adam', loss='mse')

    # fit the model
    history = model.fit(X_train, y_train, epochs=50, batch_size=32, verbose=0)

    # evaluate the model
    error = model.evaluate(X_test, y_test, verbose=0)
    print('MSE: %.3f, RMSE: %.3f' % (error, sqrt(error)))

    # make a prediction
    row = [
        100000.0, 85.0, 39.0, 652.0,
        le_occupancy_status.transform(['I'])[0],
        le_property_state.transform(['NJ'])[0]
    ]
Пример #16
0
    test_input[:, :, i] = col_test_input
    test_target[:, i] = col_test_target

print(f"Train input:target shape = {train_input.shape}:{train_target.shape}")
print(f"Test input:target shape = {test_input.shape}:{test_target.shape}")

# Input has to be in form [samples, time steps, sel_features]
#   - Samples. One sequence is one sample. A batch is comprised of one or more samples.
#   - Time Steps. One time step is one point of observation in the sample.
#   - Features. One feature is one observation at a time step.

# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(LOOK_BACK, n_features)))
model.add(Dense(n_features))
model.compile(loss='mean_squared_error', optimizer='adam')
model.summary()
model.fit(train_input,
          train_target,
          epochs=EPOCHS,
          batch_size=BATCH_SIZE,
          verbose=VERBOSE)

# make predictions
train_predict = model.predict(train_input)
test_predict = model.predict(test_input)
# invert predictions
train_predict = scaler.inverse_transform(train_predict)
train_target = scaler.inverse_transform(train_target)
test_predict = scaler.inverse_transform(test_predict)
test_target = scaler.inverse_transform(test_target)
regressior.add(LSTM(units=60, return_sequences=True))
regressior.add(Dropout(0.2))

# Adding a third LSTM layer and some Dropout regularisation
regressior.add(LSTM(units=80, return_sequences=True))
regressior.add(Dropout(0.2))

# Adding a fourth LSTM layer and some Dropout regularisation
regressior.add(LSTM(units=120))
regressior.add(Dropout(0.2))

# Adding the output layer
regressior.add(Dense(units=1))

# Compiling the RNN
regressior.compile(optimizer='nadam', loss='mean_squared_error')

# Fitting the RNN to the Training set
history = regressior.fit(X_train, y_train, epochs=120, batch_size=39)

# Part 3 - Making the predictions and visualising the results

# Getting the real traffic values of Cell_000112
dataset_test = pd.read_csv('Datasets/Cell_000231/test_Cell000231.csv')
date_only = dataset_test.drop(['Traffic'], axis=1)
past_24_hours = dataset.tail(24)
df = past_24_hours.append(dataset_test, ignore_index=True)
df = df.drop(['DateTime'], axis=1)

# Getting the predicted traffic values of Cell_000112
inputs = scaler.fit_transform(df)
Пример #18
0
           padding='same'),
    MaxPooling2D(),
    GaussianNoise(.2),
    Conv2D(64, (3, 3), activation='relu', padding='same'),
    MaxPooling2D(),
    # Conv2D(128, (3, 3), activation='relu'),
    # MaxPooling2D(),
    # Conv2D(256, (3, 3), activation='relu'),
    # MaxPooling2D(),
    # Dropout(0.2),
    Flatten(),
    Dense(size, activation='softmax'),
])

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
print("\nNetwork:")
print(model.summary())

# x_train, y_train, x_test, y_test = seed.load_data('data.npz')
x_train, y_train = seed.create_data(50000)
x_test, y_test = seed.create_data(10000)

print("\nData example:")
print(x_train[0])

x_train = x_train.reshape((-1, 28, 28, 1)).astype('float32')
x_test = x_test.reshape((-1, 28, 28, 1)).astype('float32')

x_train /= 255.0
Пример #19
0
    Conv2D(64,
           kernel_size=(5, 5),
           strides=(1, 1),
           activation='relu',
           input_shape=(223, 217, 3)))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(128, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(7, activation='softmax'))

model.compile(loss=keras.losses.sparse_categorical_crossentropy,
              optimizer=keras.optimizers.SGD(lr=0.01),
              metrics=['accuracy'])

# model.compile(loss=keras.losses.sparse_categorical_crossentropy,
#               optimizer='adam',
#               metrics=['accuracy'])

model.summary()


class AccuracyHistory(keras.callbacks.Callback):
    def on_train_begin(self, logs={}):
        self.acc = []

    def on_epoch_end(self, batch, logs={}):
        self.acc.append(logs.get('acc'))
Пример #20
0
X, y = make_circles(n_samples=1000, noise=0.1, random_state=1)
# change y from {0,1} to {-1,1}
y[where(y == 0)] = -1

# split into train and test
n_train = 500
trainX, testX = X[:n_train, :], X[n_train:, :]
trainy, testy = y[:n_train], y[n_train:]

# define model - use tanh for activation, use hingeloss
model = Sequential()
model.add(
    Dense(50, input_dim=2, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(1, activation='tanh'))
opt = SGD(lr=0.01, momentum=0.9)
model.compile(loss='hinge', optimizer=opt, metrics=['accuracy'])
# fit model
history = model.fit(trainX,
                    trainy,
                    validation_data=(testX, testy),
                    epochs=200,
                    verbose=0)
# evaluate the model
_, train_acc = model.evaluate(trainX, trainy, verbose=0)
_, test_acc = model.evaluate(testX, testy, verbose=0)
print('Train: %.3f, Test: %.3f' % (train_acc, test_acc))
# plot loss during training
pyplot.subplot(211)
pyplot.title('Loss')
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
Пример #21
0
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)

model = Sequential()
model.add(
    tf.keras.applications.ResNet50(include_top=False,
                                   input_shape=(96, 96, 3),
                                   weights='imagenet',
                                   input_tensor=None,
                                   pooling='avg',
                                   classes=2))
model.add(tf.keras.layers.Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dense(1, activation="sigmoid"))
model.layers[0].trainable = False
model.compile(optimizer="sgd",
              loss='binary_crossentropy',
              metrics=['accuracy', f1_m, precision_m, recall_m])
model.summary()
train_generator = data_generator.flow_from_directory(
    r'.\dataset\transferlearningdata\train',
    target_size=(96, 96),
    color_mode='rgb',
    batch_size=240,
    class_mode='binary')
validation_generator = data_generator.flow_from_directory(
    r'.\dataset\transferlearningdata\valid',
    target_size=(96, 96),
    color_mode='rgb',
    batch_size=200,
    class_mode='binary')
def bert_tensorflow_test(X_train, X_test, Y_train, Y_test):
    # Model
    model = Sequential()
    model.add(Masking(mask_value=0., input_shape=(MAX_SEQUENCE_LEN,VECTOR_DIM)))
    #forward_layer = LSTM(200, return_sequences=True)
    forward_layer = GRU(10, return_sequences=False, dropout=0.5)
    #backward_layer = LSTM(200, activation='relu', return_sequences=True,
    backward_layer = GRU(10, return_sequences=False, dropout=0.5,
                       go_backwards=True)
    model.add(Bidirectional(forward_layer, backward_layer=backward_layer,
                         input_shape=(MAX_SEQUENCE_LEN,VECTOR_DIM)))
    #model.add(TimeDistributed(Dense(NUM_CLASSES)))
    # Remove TimeDistributed() so that predictions are now made for the entire sentence
    model.add(Dense(NUM_CLASSES))
    model.add(Activation('softmax'))

    #print('preds shape', model.predict(X_train[:3]).shape)
    #print('Y_train shape', Y_train[:3].shape)
    #print(list(Y_train[:3]))
    classes = []
    for y in Y_train:
        cls = np.argmax(y)
        classes.append(cls)
    print(Counter(classes))

    model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    print('compiled model')
    model.fit(X_train, Y_train, batch_size=8, epochs=10)#, validation_split=0.1)
    print('fit model')
    eval = model.evaluate(X_test, Y_test, batch_size=8)
    #print('X_test[0]')
    #print(X_test[0])
    #print(X_train[0])
    preds = model.predict_proba(X_test, verbose=1, batch_size=8)
    print(preds)
    num_correct = 0
    num_incorrect = 0
    TP = 0
    TN = 0
    FP = 0
    FN = 0
    # idiomatic = 2, non-idiomatic = 3
    with open('preds_out_temp.txt', 'w') as tempoutf:
        for pred, y in zip(preds, Y_test):
            if np.argmax(y) == 2 or np.argmax(y) == 3:
                if np.argmax(y) == np.argmax(pred):
                    num_correct += 1
                else:
                    num_incorrect += 1
            if np.argmax(pred) == 2 and np.argmax(y) == 2:
                TP += 1
            if np.argmax(pred) == 3 and np.argmax(y) == 3:
                TN += 1
            if np.argmax(pred) == 2 and np.argmax(y) == 3:
                FP += 1
            if np.argmax(pred) == 3 and np.argmax(y) == 2:
                FN += 1
    custom_accuracy = num_correct/(num_correct+num_incorrect)
    print('custom accuracy is', num_correct/(num_correct+num_incorrect))
    for y in Y_test:
        cls = np.argmax(y)
        classes.append(cls)
    class_nums = Counter(classes)
    print(class_nums)
    default_acc = class_nums[2] / (class_nums[2] + class_nums[3])
    print('default accuracy is', default_acc, 'or', 1 - default_acc)
    return eval, custom_accuracy, default_acc, [TP, TN, FP, FN]
Пример #23
0
test_id = test['Id']
test_inputs = test[inputs.columns]

### Model ###

model = Sequential()

model.add(Dense(10, activation='relu'))
model.add(Dense(300, activation='relu'))
model.add(Dense(500, activation='relu'))
model.add(Dense(100, activation='relu'))

model.add(Dense(1, activation='linear'))

model.compile(loss='MSE',
              optimizer=Adam(learning_rate=LEARN_RATE),
              metrics=['accuracy'])

### Training ##

history = model.fit(inputs, targets, validation_split=VAL_SPLIT, epochs=EPOCHS)

plt.figure()
plt.grid()
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.yscale('log')
plt.xlabel('Epochs')
plt.legend(fontsize=13)
plt.savefig('losses.png', dpi=10000)
Пример #24
0
class BiLSTM(BaseModel):
    _model_name = 'BiLSTMClf'

    def __init__(self,
                 output_dim=None,
                 input_length=None,
                 run_time=None,
                 X_train=None,
                 y_train=None,
                 save_path=None,
                 epochs=None,
                 batch_size=None,
                 validation_data=None,
                 validation_split=None,
                 verbose=1,
                 embedding_path=None,
                 n_words=10000):
        super().__init__()
        self._output_dim = output_dim
        self._input_length = input_length
        self._run_time = run_time
        self._X_train = X_train
        self._y_train = y_train
        self._save_path = save_path
        self._epochs = epochs
        self._batch_size = batch_size
        self._validation_data = validation_data
        self._validation_split = validation_split
        self._verbose = verbose
        self._embedding_path = embedding_path
        self._n_words = n_words
        self._input_dim = ''
        self._weights = ''
        self._tokenizer = ''
        self._model = None
        self._history = None

    def _create_model(self):
        self._model = Sequential([
            Embedding(
                input_dim=self._input_dim,  # vocab_size
                output_dim=self._output_dim,
                weights=[self._weights],  # embedding_matrix
                input_length=self._input_length,
                name='embeddings')
        ])  # max_len

        self._model.add(Bidirectional(LSTM(64, return_sequences=True)))
        self._model.add(GlobalMaxPooling1D())
        self._model.add(Dense(16, activation='relu'))
        self._model.add(Dropout(0.30))
        self._model.add(Dense(6, activation='sigmoid'))

        self._model.compile(loss='binary_crossentropy',
                            optimizer='adam',
                            metrics=[
                                'accuracy'
                            ])  # TODO: change to correct metrics once all ok

    def _preprocess_data(self):
        self._tokenizer = Tokenizer(num_words=self._n_words, oov_token='<oov>')
        self._tokenizer.fit_on_texts(self._X_train)

        self._input_length = max(
            [len(row) for row in self._X_train]
        ) if self._input_length is None or self._input_length == 'None' else int(
            self._input_length)
        self._X_train = self._tokenize_and_pad(self._X_train, self._tokenizer,
                                               self._input_length)
        self._input_dim = len(self._tokenizer.word_index) + 1

    def _tokenize_and_pad(self,
                          data,
                          tokenizer,
                          maxlen,
                          padding='post',
                          truncating='post'):
        print('tokenizing data')
        data = tokenizer.texts_to_sequences(data)
        print('padding tokens')
        return pad_sequences(data,
                             maxlen=maxlen,
                             padding=padding,
                             truncating=truncating)

    def _get_embeddings(self):
        embeddings_index = {}
        print('reading pre-trained embeddings')
        glove = open(self._embedding_path, 'r', encoding='utf-8')
        for line in tqdm(glove):
            values = line.split(" ")
            word = values[0]
            coefs = np.asarray(values[1:], dtype='float32')
            embeddings_index[word] = coefs
        glove.close()
        print('Found %s word vectors.' % len(embeddings_index))

        # creating embedding matrix for words dataset
        print('creating embedding matrix')
        self._weights = np.zeros(
            (len(self._tokenizer.word_index) + 1, self._output_dim))
        for word, index in tqdm(self._tokenizer.word_index.items()):
            embedding_vector = embeddings_index.get(word)
            if embedding_vector is not None:
                self._weights[index] = embedding_vector

    def get_summary(self):
        return self._model.summary()

    def train(self):

        # preprocess data
        print('preprocessing data')
        self._preprocess_data()

        # get embeddings
        print('getting embeddings weights')
        self._get_embeddings()

        # create model architecture
        self._create_model()
        summary = self.get_summary()
        print('mode summary:', summary)

        self._save_path = os.path.join(self._save_path, 'checkpoints',
                                       f'{self._model_name}_{self._run_time}')
        create_folder(self._save_path)

        cp_callback = ModelCheckpoint(
            filepath=self._save_path,
            save_weights_only=False,
            # verbose=verbose,
            save_best_only=True,
            monitor='val_loss',
            mode='min')

        self._history = self._model.fit(
            self._X_train,
            self._y_train,
            epochs=self._epochs,
            validation_data=self._validation_data,
            validation_split=self._validation_split,
            batch_size=self._batch_size,
            callbacks=[cp_callback])

    def predict(self, X, threshhold=0.5):
        pred = self._model.predict(X)
        return (pred > threshhold).astype(np.int)

    def evaluate(self, X, y):
        X = self._tokenize_and_pad(X, self._tokenizer, self._input_length)
        return self._model.evaluate(X, y)

    def load_model(self, path):
        self._model = load_model(path)

    def save_model(self, mlflow, path):
        path = os.path.join(path, 'saved_models',
                            f'{self._model_name}_{self._run_time}')
        create_folder(path)
        self._save_tokenizer(path)
        self._save_model(mlflow, path)
        self._save_embeddings(path)

    def _save_embeddings(self, path):
        embeddings = {}
        model_embeddings = self._model.get_layer('embeddings').get_weights()[0]
        for word, index in self._tokenizer.word_index.items():
            embeddings[word] = model_embeddings[index]
        with open(os.path.join(path, f'embeddings.pkl'), 'wb') as handle:
            pickle.dump(embeddings, handle, protocol=pickle.HIGHEST_PROTOCOL)

    def _save_tokenizer(self, path):
        tokenizer_json = self._tokenizer.to_json()
        with io.open(os.path.join(path, f'tokenizer.json'),
                     'w',
                     encoding='utf-8') as f:
            f.write(json.dumps(tokenizer_json, ensure_ascii=False))

    def _save_model(self, mlflow, path):
        mlflow.keras.save_model(self._model, os.path.join(path, f'model'))
Пример #25
0
def train():

    #Pre-processing

    df = pd.read_csv(DATA_PATH)
    df.drop('Unnamed: 0', axis=1, inplace=True)
    df.dropna(axis=0, inplace=True)
    # df.head(10)

    df['Party'] = pd.Categorical(df.Party)
    df['Party'] = pd.get_dummies(df['Party'], drop_first=True)

    X = df['Tweet']
    Y = df['Party']

    x, y = pre_process(X, Y=Y)

    X_train, X_test, y_train, y_test = train_test_split(x,
                                                        y,
                                                        test_size=0.2,
                                                        random_state=420)

    #Creating a tokenizer
    t = Tokenizer(oov_token="UNK")
    t.fit_on_texts(x)

    vocab_size = len(t.word_index) + 1
    print("Vocabulary size: {}".format(vocab_size))
    max_sent_len = len(max(x, key=len).split()) + 1
    print("Maximum sentence length: {}".format(max_sent_len))
    emb_dim = 75
    print("Embedding Dimensions: {}".format(emb_dim))

    padded_X_train = encode_and_pad(X_train, t, max_sent_len)

    x_train, x_val, Y_train, y_val = train_test_split(padded_X_train,
                                                      y_train,
                                                      test_size=0.1,
                                                      random_state=420)

    checkpoint_path = TRAINING_PATH
    cp_callback = ModelCheckpoint(checkpoint_path,
                                  verbose=1,
                                  save_weights_only=True,
                                  period=20)

    model = Sequential([
        Embedding(input_dim=vocab_size,
                  output_dim=emb_dim,
                  input_length=max_sent_len,
                  trainable=True),
        Bidirectional(CuDNNLSTM(64, return_sequences=False)),
        Dropout(0.5),
        Dense(2, activation='softmax')
    ])

    model.save_weights(checkpoint_path.format(epoch=0))
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    model.fit(x_train,
              Y_train,
              epochs=200,
              batch_size=300,
              shuffle=True,
              callbacks=[cp_callback],
              validation_data=(x_val, y_val))
    model.save_weights(WEIGHTS_PATH)

    dic = {
        'Vocab Size': vocab_size,
        'Max Sent Length': max_sent_len,
        'Emb Dim': emb_dim
    }
    meta_df = pd.DataFrame(dic, index=['Model 1'])
    meta_df.to_csv(META_PATH)

    with open(TOKENIZER_PATH, 'wb') as handle:
        pickle.dump(t, handle, protocol=pickle.HIGHEST_PROTOCOL)

    print("Training done.")
Пример #26
0
model= Sequential([
                Dense(512,input_shape=(2,),activation='relu'),
                Dense(256,activation='relu'),
                LeakyReLU(alpha='0.01'),
                Dense(128,activation='relu'),
                Dense(64,activation='relu'),
                Dropout(0.1),
                
                
                Dense(2,activation='sigmoid')
                ])


model.compile(optimizer='adam',
            loss=SCC(from_logits=True),
            metrics=['accuracy'])

inputs=np.column_stack((train_df['x'].values,train_df['y'].values))



before_training_predictions = model.predict(inputs_test)


evolution = model.fit(inputs,train_df['color'].values, batch_size=32,epochs=epochs)



predictions = model.predict(inputs_test)
Пример #27
0
wenben_long_term_train = DM.wenben_long_term_train_data(wenben_norm_train_df)
wenben_short_term_train = DM.wenben_short_term_train_data(wenben_norm_train_df)
wenben_long_term_test = DM.wenben_long_term_test_data(wenben_norm_test_df)
wenben_short_term_test = DM.wenben_short_term_test_data(wenben_norm_test_df)

print('文本数据', wenben_long_term_train)
print('交易数据', daily_train)

model = Sequential()
model.add(LSTM(100, input_shape=(daily_train.shape[1], daily_train.shape[2])))

model.add(Dropout(0.02))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer=keras.optimizers.Adam(lr=1e-3),
              metrics=['acc'])
# fit network
history = model.fit(daily_train,
                    target_train,
                    epochs=epochs,
                    batch_size=batch_size,
                    validation_split=validation_split,
                    shuffle=False)

loss, accuracy = model.evaluate(daily_test, y=target_test)

print(loss, accuracy)

y_predict = model.predict(daily_test).reshape(test_num - wenben_back).tolist()
                        name="Input_Conv_Layer"))

    cnn_lstm_model.add(
        TimeDistributed(MaxPool3D(pool_size=(2, 2, 2),
                                  strides=(2, 2, 2),
                                  padding='valid'),
                        name="Pool_Layer_1"))

    cnn_lstm_model.add(TimeDistributed(Flatten(), name="Flatten_Layer"))

with tf.device('/cpu:0'):

    cnn_lstm_model.add(
        LSTM(10, dropout=0.3, recurrent_dropout=0.3, name="LSTM_Layer"))

with tf.device('/gpu:0'):

    cnn_lstm_model.add(
        Dense(1, activation='sigmoid', name="Output_Dense_Layer"))

    cnn_lstm_model.compile(optimizer=optimizers.Adam(lr=0.0001),
                           loss='binary_crossentropy',
                           metrics=['accuracy'])

cnn_lstm_model.fit_generator(generator=training_generator,
                             steps_per_epoch=train_steps_per_epoch,
                             verbose=1,
                             callbacks=callbacks,
                             validation_data=validation_generator,
                             validation_steps=validate_steps_per_epoch,
                             epochs=epochs)
Пример #29
0
import numpy as np
import matplotlib.pyplot as plt

import tensorflow as tf

from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense

from sklearn.model_selection import train_test_split

model = Sequential()
model.add(Dense(100, activation='sigmoid', input_shape=(784, )))
model.add(Dense(10, activation='softmax'))

model.compile(optimizer='sgd',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
(x_train_all,
 y_train_all), (x_text, y_test) = tf.keras.datasets.fashion_mnist.load_data()

x_train, x_val, y_train, y_val = train_test_split(x_train_all,
                                                  y_train_all,
                                                  stratify=y_train_all,
                                                  test_size=0.2,
                                                  random_state=42)
x_train = x_train / 255
x_val = x_val / 255

x_train = x_train.reshape(-1, 784)
x_val = x_val.reshape(-1, 784)
def train_a_model(trainfile):
    '''
    :param trainfile:
    :return:
    '''
    x = trainfile.iloc[:,1:].values
    y = trainfile['emotion'].values
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, stratify=y,random_state=42)
    generate_images_folderwise(x_train,y_train,x_test,y_test)
    path = os.getcwd()

    TRAINING_DIR = path+"/images/train/"
    training_datagen = ImageDataGenerator(
        rescale = 1./255,
        rotation_range=40,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        fill_mode='nearest')

    VALIDATION_DIR = path+"/images/test/"
    validation_datagen = ImageDataGenerator(rescale = 1./255)

    train_generator = training_datagen.flow_from_directory(
        TRAINING_DIR,
        color_mode='grayscale',
        target_size = (48,48),
        batch_size = 64,
        class_mode='categorical',
        shuffle=True)

    validation_generator = validation_datagen.flow_from_directory(
        VALIDATION_DIR,
        color_mode = 'grayscale',
        target_size = (48,48),
        batch_size = 64,
        class_mode='categorical',
        shuffle=False)

    print(train_generator.class_indices)


    num_classes = 3
    epochs = 100

    # Creating the model
    model = Sequential()

    model.add(Conv2D(32, kernel_size=(5, 5), input_shape=(48,48,1),activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(32, kernel_size=(5, 5),activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2),strides=2))
    model.add(Dropout(0.25))


    model.add(Conv2D(64, kernel_size=(3, 3),activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(64, kernel_size=(3, 3),activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(3, 3),strides=2))
    model.add(Dropout(0.25))

    model.add(Conv2D(128, kernel_size=(3, 3),activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(128, kernel_size=(3, 3),activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(1, 1),strides=2))
    model.add(Dropout(0.25))



    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(3, activation='softmax'))

    model.compile(
    loss = 'categorical_crossentropy', 
    optimizer=Adam(), 
    metrics=['accuracy'])

    steps_per_epoch = train_generator.n//train_generator.batch_size
    validation_steps = validation_generator.n//validation_generator.batch_size

    history = model.fit(
        x=train_generator,
        steps_per_epoch=steps_per_epoch,
        epochs=epochs,
        validation_data = validation_generator,
        validation_steps = validation_steps,
    )

    model.save('model')

    pass
Пример #31
0
def create_model(MLP_C_layer,
                 MLP_m_layer,
                 low_C,
                 up_C,
                 low_m,
                 up_m,
                 F,
                 a0RNN,
                 batch_input_shape,
                 selectaux,
                 selectdk,
                 myDtype,
                 return_sequences=False,
                 unroll=False):

    batch_adjusted_shape = (batch_input_shape[2] + 1, )  #Adding state
    placeHolder = Input(shape=(batch_input_shape[2] + 1, ))  #Adding state

    filterLayer = inputsSelection(batch_adjusted_shape, selectaux)(placeHolder)

    filterdkLayer = inputsSelection(batch_adjusted_shape,
                                    selectdk)(placeHolder)

    MLP_C_min = low_C
    MLP_C_range = up_C - low_C

    MLP_C_layer = MLP_C_layer(filterLayer)
    C_layer = Lambda(lambda x: ((x * MLP_C_range) + MLP_C_min))(MLP_C_layer)

    MLP_m_min = low_m
    MLP_m_range = up_m - low_m

    MLP_m_layer = MLP_m_layer(filterLayer)
    MLP_scaled_m_layer = Lambda(lambda x: ((x * MLP_m_range) + MLP_m_min))(
        MLP_m_layer)

    dk_input_shape = filterdkLayer.get_shape()

    dkLayer = StressIntensityRange(input_shape=dk_input_shape,
                                   dtype=myDtype,
                                   trainable=False)
    dkLayer.build(input_shape=dk_input_shape)
    dkLayer.set_weights([np.asarray([F], dtype=dkLayer.dtype)])
    dkLayer = dkLayer(filterdkLayer)

    ldK_layer = Lambda(lambda x: tf.math.log(x) /
                       (tf.math.log(tf.constant(10.))))(dkLayer)

    dKm_layer = Multiply()([MLP_scaled_m_layer, ldK_layer])

    aux_layer = Add()([C_layer, dKm_layer])

    da_layer = Lambda(lambda x: 10**(x))(aux_layer)

    functionalModel = Model(inputs=[placeHolder], outputs=[da_layer])
    "-------------------------------------------------------------------------"
    CDMCellHybrid = CumulativeDamageCell(model=functionalModel,
                                         batch_input_shape=batch_input_shape,
                                         dtype=myDtype,
                                         initial_damage=a0RNN)

    CDMRNNhybrid = RNN(cell=CDMCellHybrid,
                       return_sequences=return_sequences,
                       return_state=False,
                       batch_input_shape=batch_input_shape,
                       unroll=unroll)

    model = Sequential()
    model.add(CDMRNNhybrid)
    model.compile(loss='mse',
                  optimizer=RMSprop(learning_rate=1e-6),
                  metrics=['mae'])
    return model