Пример #1
0
model.add(Conv2D(32, (2, 2), activation='relu', padding='same'))
model.add(Dropout(0.2))
#model.add(Conv2D(64, (2, 2), activation='relu', padding='same'))
#model.add(Dropout(0.2))
model.add(Flatten())
#model.add(Dense(128, activation = 'relu'))
#model.add(Dropout(0.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(12, activation='softmax'))

# Model summary
model.summary()

# Defining the optimizer
model.compile(optimizer=Adam(learning_rate=0.001),
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

# Fitting the model
history = model.fit(X_train,
                    y_train,
                    epochs=50,
                    validation_data=(X_val, y_val),
                    verbose=1)

test_loss, test_accuracy = model.evaluate(X_test, y_test)
print("Test accuracy: {}".format(test_accuracy))

prediction = model.predict(X_test)
prediction = np.argmax(prediction, axis=1)
Пример #2
0
network = Sequential([
    layers.Dense(256, activation="relu"),
    layers.Dense(128, activation="relu"),
    layers.Dense(64, activation="relu"),
    layers.Dense(32, activation="relu"),
    layers.Dense(10),
])
network.build(input_shape=(None, 28 * 28))
network.summary()

network.compile(
    optimizer=optimizers.Adam(lr=0.01),
    loss=tf.losses.CategoricalCrossentropy(from_logits=True),
    metrics=["accuracy"],
)

network.fit(db, epochs=5, validation_data=ds_val, validation_steps=2)

network.evaluate(ds_val)

sample = next(iter(ds_val))
x = sample[0]
y = sample[1]  # one-hot
pred = network.predict(x)  # [b, 10]
# convert back to number
y = tf.argmax(y, axis=1)
pred = tf.argmax(pred, axis=1)

print(pred)
print(y)
Пример #3
0
# determine the number of input features
n_features = X_train.shape[1]
# define model
model = Sequential()
model.add(Dense(10, activation='relu', kernel_initializer='he_normal', input_shape=(n_features,)))
model.add(Dense(8, activation='relu', kernel_initializer='he_normal'))
model.add(Dense(1))
# compile the model
model.compile(optimizer='adam', loss='mse')
# fit the model
model.fit(X_train, y_train, epochs=150, batch_size=32, verbose=0)
# evaluate the model
error = model.evaluate(X_test, y_test, verbose=0)
print('MSE: %.3f' % (error))

yhat=model.predict(X_test)

# svr
from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error
svr_rbf = SVR(kernel='rbf', C=100, gamma=0.1, epsilon=.1)
svr_rbf.fit(X_train, y_train)
y_pred=svr_rbf.predict(X_test)
err=mean_squared_error(y_test, y_pred)
print('MSE: %.3f' % (err))

# LR
from sklearn.linear_model import LinearRegression
reg = LinearRegression().fit(X_train, y_train)
y_pred2=reg.predict(X_test)
err=mean_squared_error(y_test, y_pred2)
Пример #4
0
# In[5]:

# ada 2 cara load model, jika cara pertama berhasil maka bisa lasngusng di lanjutkan ke fungsi prediksi

MODEL_PATH = 'models/project face gemastik/model.h5'
model = load_model(MODEL_PATH, compile=False)

# In[47]:

# read image
im = Image.open(
    'Dataset/test_image/1lvl-1-surgical-mask-white-41805-front-copy.jpg')
X = preprocess(im, input_size)
X = reshape([X])
y = model.predict(X)

print(labels[np.argmax(y)], np.max(y))

# In[48]:

y

# In[49]:

print(labels[np.argmax(y)], np.max(y))

# In[7]:

import cv2
import numpy as np
Пример #5
0
class Model:
    labels_file_name = 'labels.json'
    freq_file_name = 'freq.json'

    # noinspection PyShadowingNames
    def __init__(self,
                 layers: Optional[List[Layer]] = None,
                 labels: Optional[List[str]] = None,
                 model: Optional[_Model] = None,
                 optimizer='adam',
                 loss='sparse_categorical_crossentropy',
                 metrics=('accuracy', ),
                 low_freq: int = AudioSegment.default_low_freq,
                 high_freq: int = AudioSegment.default_high_freq):
        assert layers or model
        self.label_names = labels
        self.cutoff_frequencies = (int(low_freq), int(high_freq))

        if layers:
            self._model = Sequential(layers)
            self._model.compile(optimizer=optimizer,
                                loss=loss,
                                metrics=list(metrics))
        else:
            self._model = model

    def fit(self, dataset: Dataset, *args, **kwargs):
        return self._model.fit(dataset.train_samples, dataset.train_classes,
                               *args, **kwargs)

    def evaluate(self, dataset: Dataset, *args, **kwargs):
        return self._model.evaluate(dataset.validation_samples,
                                    dataset.validation_classes, *args,
                                    **kwargs)

    def predict(self, audio: AudioSegment):
        spectrum = audio.spectrum(low_freq=self.cutoff_frequencies[0],
                                  high_freq=self.cutoff_frequencies[1])
        output = self._model.predict(np.array([spectrum]))
        prediction = int(np.argmax(output))
        return self.label_names[prediction] if self.label_names else prediction

    def save(self, path: str, *args, **kwargs):
        path = os.path.abspath(os.path.expanduser(path))
        is_file = path.endswith('.h5') or path.endswith('.pb')
        if is_file:
            model_dir = str(pathlib.Path(path).parent)
        else:
            model_dir = path

        pathlib.Path(model_dir).mkdir(parents=True, exist_ok=True)
        self._model.save(path, *args, **kwargs)
        if self.label_names:
            labels_file = os.path.join(model_dir, self.labels_file_name)
            with open(labels_file, 'w') as f:
                json.dump(self.label_names, f)

        if self.cutoff_frequencies:
            freq_file = os.path.join(model_dir, self.freq_file_name)
            with open(freq_file, 'w') as f:
                json.dump(self.cutoff_frequencies, f)

    @classmethod
    def load(cls, path: str, *args, **kwargs):
        path = os.path.abspath(os.path.expanduser(path))
        is_file = path.endswith('.h5') or path.endswith('.pb')
        if is_file:
            model_dir = str(pathlib.Path(path).parent)
        else:
            model_dir = path

        model = load_model(path, *args, **kwargs)
        labels_file = os.path.join(model_dir, cls.labels_file_name)
        freq_file = os.path.join(model_dir, cls.freq_file_name)
        label_names = []
        frequencies = []

        if os.path.isfile(labels_file):
            with open(labels_file, 'r') as f:
                label_names = json.load(f)

        if os.path.isfile(freq_file):
            with open(freq_file, 'r') as f:
                frequencies = json.load(f)

        return cls(model=model,
                   labels=label_names,
                   low_freq=frequencies[0],
                   high_freq=frequencies[1])
Пример #6
0
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at

# https://www.apache.org/licenses/LICENSE-2.0

# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an \"AS IS\" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import tensorflow as tf
import numpy as np
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense

l0 = Dense(units=1, input_shape=[1])
model = Sequential([l0])
model.compile(optimizer='sgd', loss='mean_squared_error')

xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)
ys = np.array([-3.0, -1.0, 1.0, 3.0, 5.0, 7.0], dtype=float)

model.fit(xs, ys, epochs=500)

print(model.predict([10.0]))
print("Here is what I learned: {}".format(l0.get_weights()))
Пример #7
0
        self.fc3 = MyDense(128, 64)
        self.fc4 = MyDense(64, 32)
        self.fc5 = MyDense(32, 10)

    def call(self, inputs, training=None):
        x = self.fc1(inputs)
        x = tf.nn.relu(x)
        x = self.fc2(x)
        x = tf.nn.relu(x)
        x = self.fc3(x)
        x = tf.nn.relu(x)
        x = self.fc4(x)
        x = tf.nn.relu(x)
        x = self.fc5(x)
        return x


network = MyModel()
network.compile(optimizer=optimizers.Adam(lr=0.05),
                # 与tf.losses.categorical_crossentropy()不同
                loss=tf.losses.CategoricalCrossentropy(from_logits=True),
                metrics=['accuracy'])
network.fit(db, epochs=10, validation_data=ds_val, validation_freq=2)
network.evaluate(ds_val)
sample = next(iter(ds_val))
x = sample[0]
y = sample[1]
pred = network.predict(x)
y = tf.argmax(y, axis=1)
pred = tf.argmax(pred, axis=1)
Пример #8
0
class SparseModel(BaseModel):
    def __init__(self):
        self.embedding_dimension = 64
        self.vocab_size = 2000
        self.max_length = 100
        self.oov_tok = '<OOV>'
        self.truncate_type = 'post'
        self.padding_type = 'post'
        self.tokenizer = Tokenizer(num_words=self.vocab_size,
                                   oov_token=self.oov_tok)
        self.le = LabelEncoder()
        self.le.fit_transform(['neu', 'neg', 'pos'])

    def train(self, X_train, Y_train):
        Y_train = self.le.transform(Y_train)
        self.tokenizer.fit_on_texts(X_train)
        X_train = self.tokenizer.texts_to_sequences(X_train)
        word_index = self.tokenizer.word_index
        X_train = pad_sequences(X_train,
                                maxlen=self.max_length,
                                padding=self.padding_type,
                                truncating=self.truncate_type)
        self.model = Sequential([
            Embedding(input_dim=len(word_index) + 1,
                      output_dim=self.embedding_dimension),
            SpatialDropout1D(0.3),
            Bidirectional(
                LSTM(self.embedding_dimension,
                     dropout=0.3,
                     recurrent_dropout=0.3)),
            Dense(self.embedding_dimension, activation='relu'),
            Dropout(0.8),
            Dense(3, activation='softmax')
        ])
        self.model.compile(loss='sparse_categorical_crossentropy',
                           optimizer='adam',
                           metrics=['accuracy'])
        self.history = self.model.fit(x=X_train, y=Y_train, epochs=8)

        # Save
        self.model.save(f'{self.ASSETS_DIR}/sparse.model')
        tokenizer_json = self.tokenizer.to_json()
        with io.open(f'{self.ASSETS_DIR}/sparse.tokenizer.json',
                     'w',
                     encoding='utf-8') as f:
            f.write(json.dumps(tokenizer_json, ensure_ascii=False))

    def analyze(self, X_test, Y_test):
        self.model.summary()
        Y_test = self.le.transform(Y_test)
        X_test = self.tokenizer.texts_to_sequences(X_test)
        X_test = pad_sequences(X_test,
                               maxlen=self.max_length,
                               padding=self.padding_type,
                               truncating=self.truncate_type)
        test_loss, test_acc = self.model.evaluate(x=X_test, y=Y_test)

        print(
            f'{self.__class__.__name__} Accuracy: {test_acc} Loss: {test_loss}'
        )

    def predict(self, texts):
        texts = [text_cleaner(text) for text in texts]
        texts = self.tokenizer.texts_to_sequences(texts)
        texts = pad_sequences(texts,
                              maxlen=self.max_length,
                              padding=self.padding_type,
                              truncating=self.truncate_type)
        p = self.model.predict(texts)
        y_classes = [np.argmax(y, axis=None, out=None) for y in p]

        return self.le.inverse_transform(y_classes)

    def load(self):
        self.model = load_model(f'{self.ASSETS_DIR}/sparse.model')
        with open(f'{self.ASSETS_DIR}/sparse.tokenizer.json') as f:
            data = json.load(f)
            self.tokenizer = tokenizer_from_json(data)
class HAN:
    def __init__(self, embedding_layer, sentence_width, model_width,
                 input_shape, output_width):
        """
        initialize a new hierarchical network
        Args:
            embedding_layer (tf.keras.layers.Embedding): Pre-trained embedding layer
            sentence_width (int): Width of internal layers in word-level attention network
            model_width (int): Width of internal layers in sentence-level attention network
            input_shape (tuple): Input dimension shape tuple
            output_width (int): Output dimension (i.e. number of categories)
        """
        self.sentence_network = Sequential()
        self.sentence_network.add(embedding_layer)
        self.sentence_network.add(
            Bidirectional(GRU(sentence_width, return_sequences=True)))
        self.sentence_network.add(AttLayer(sentence_width))

        self.model = Sequential()
        self.model.add(
            TimeDistributed(self.sentence_network,
                            input_shape=(input_shape[0], input_shape[1])))
        self.model.add(Bidirectional(GRU(model_width, return_sequences=True)))
        self.model.add(AttLayer(model_width))
        self.model.add(Dense(output_width, activation='sigmoid'))

        self.model.compile(loss='binary_crossentropy',
                           optimizer='rmsprop',
                           metrics=['acc'])

    def summary(self):
        """
        Print summary of network
        """
        self.sentence_network.summary()
        self.model.summary()

    def fit(self, x_train, y_train, x_val, y_val, epochs, batch_size):
        """
        Train the network
        Args:
            x_train (np.array): Training inputs
            y_train (np.array): Training target labels
            x_val (np.array): Validation inputs
            y_val (np.array): Validation target labels
            epochs (int): Number of training epochs
            batch_size (int): Training batch size
        """
        self.model.fit(x_train,
                       y_train,
                       validation_data=(x_val, y_val),
                       epochs=epochs,
                       batch_size=batch_size)
        self.hidden_word_output = Model(self.sentence_network.input,
                                        self.sentence_network.layers[1].output)
        self.word_ctx_0 = self.sentence_network.layers[-1].get_weights()[0]
        self.word_ctx_1 = self.sentence_network.layers[-1].get_weights()[1]
        self.word_ctx_2 = self.sentence_network.layers[-1].get_weights()[2]
        self.hidden_sent_output = Model(self.model.input,
                                        self.model.layers[-3].output)
        self.sent_ctx_0 = self.model.layers[-2].get_weights()[0]
        self.sent_ctx_1 = self.model.layers[-2].get_weights()[1]
        self.Sent_ctx_2 = self.model.layers[-2].get_weights()[2]

    def predict(self, x):
        """
        Predict category of sample(s)
        Args:
            x (np.array): Input samples(s)

        Returns:
            (np.array): Category predictions array
        """
        return self.model.predict(x)

    def attention_matrix(self, x):
        """
        Get per-word attention matrix
        Args:
            x (np.array): Input sample

        Returns:
            (np.array): Per word attention values, normalized over all words
        """
        word_att = self.hidden_word_output.predict(x)
        u_watt = np.exp(
            np.dot(
                np.tanh(np.dot(word_att, self.word_ctx_0) + self.word_ctx_1),
                self.word_ctx_2)[:, :, 0])
        u_watt = normalize(u_watt, axis=1, norm='l2')

        sent_att = self.hidden_sent_output.predict(np.expand_dims(x, axis=0))
        u_satt = np.exp(
            np.dot(
                np.tanh(np.dot(sent_att, self.sent_ctx_0) + self.sent_ctx_1),
                self.Sent_ctx_2)[:, :, 0])
        u_satt = normalize(u_satt, axis=1, norm='l2')[0]

        return (u_satt * u_watt.T).T
Пример #10
0
    def build_model(self, idx):
        # idx denotes which dataset to build the model on

        common_words = [
            'i', 'you', 'they', 'has', 'have', 'are', 'is', 'a', 's', 'the',
            'there', 'of', 'was', 'were', 'to', 'and', 'it', 'we', 're'
        ]

        for i in range(len(self.df_text[idx])):
            j = 0
            while j < len(self.df_text[idx][i]):
                word = self.df_text[idx][i][j]
                if word in common_words:
                    self.df_text[idx][i].pop(j)
                else:
                    j += 1

        max_length = max([len(s) for s in self.df_text[idx]])
        num_reviews = len(self.df_text[idx])
        print(max_length)

        text = np.zeros((num_reviews, max_length, self.embedding_dim))
        for i in range(len(self.df_text[idx])):
            train_idx = 0
            for j in range(len(self.df_text[idx][i])):
                # get jth word of ith review
                word = self.df_text[idx][i][j]
                #print("word: ", word)
                if word in self.word2vec_models[idx].wv.vocab:
                    vec = self.word2vec_models[idx][word]
                    text[i, train_idx, :] = vec
                    train_idx += 1
                #else:
                #    print("review[{0:d}], word[{1:d}]: not in vocab".format(i, j))

        print("Finished creating text... ")
        print("text size: ", text.shape)
        batch_size = 24
        rnn_dim = 100
        # model
        model = Sequential()
        model.add(
            Dense(batch_size, input_shape=(max_length, self.embedding_dim)))
        model.add(Dense(300, activation='relu'))
        model.add(Dense(150, activation='relu'))
        model.add(Dense(50, activation='relu'))
        model.add(Bidirectional(LSTM(rnn_dim, return_sequences=False)))
        model.add(Dense(1, activation='relu'))
        print(model.summary)

        model.compile(loss='mse',
                      optimizer=tf.keras.optimizers.Adam(0.003),
                      metrics=['acc'])

        labels = self.df[idx]['useful'].values
        train_text, test_text, train_labels, test_labels = train_test_split(
            text, labels, test_size=0.1)

        print("train data: ", train_text.shape, train_labels.shape)
        print("test data: ", test_text.shape, test_labels.shape)

        i = num_reviews // batch_size
        i *= batch_size
        train_text = train_text[:i, :, :]
        train_labels = train_labels[:i]

        model.fit(train_text,
                  train_labels,
                  batch_size=batch_size,
                  shuffle=True,
                  epochs=10)

        predicted = model.predict(test_text)

        print("predicted: ", predicted[:30])
        print("real: ", test_labels[:30])

        loss, acc = model.evaluate(test_text, test_labels)
        print("loss: ", loss, " accuracy; ", acc)
Пример #11
0
def main(args):
    '''Ths is a similar Tensorflow/Keras implementation of the LSTM model from the paper:
        "Real-Time Guitar Amplifier Emulation with Deep Learning"
        https://www.mdpi.com/2076-3417/10/3/766/htm

        Uses a stack of two 1-D Convolutional layers, followed by LSTM, followed by 
        a Dense (fully connected) layer. Three preset training modes are available, 
        with further customization by editing the code. A Sequential tf.keras model 
        is implemented here.

        Note: RAM may be a limiting factor for the parameter "input_size". The wav data
          is preprocessed and stored in RAM, which improves training speed but quickly runs out
          if using a large number for "input_size".  Reduce this if you are experiencing
          RAM issues. 
        
        --training_mode=0   Speed training (default)
        --training_mode=1   Accuracy training
        --training_mode=2   Extended training (set max_epochs as desired, for example 50+)
    '''

    name = args.name
    if not os.path.exists('models/' + name):
        os.makedirs('models/' + name)
    else:
        print(
            "A model folder with the same name already exists. Please choose a new name."
        )
        return

    train_mode = args.training_mode  # 0 = speed training,
    # 1 = accuracy training
    # 2 = extended training
    batch_size = args.batch_size
    test_size = 0.2
    epochs = args.max_epochs
    input_size = args.input_size

    # TRAINING MODE
    if train_mode == 0:  # Speed Training
        learning_rate = 0.01
        conv1d_strides = 12
        conv1d_filters = 16
        hidden_units = 36
    elif train_mode == 1:  # Accuracy Training (~10x longer than Speed Training)
        learning_rate = 0.01
        conv1d_strides = 4
        conv1d_filters = 36
        hidden_units = 64
    else:  # Extended Training (~60x longer than Accuracy Training)
        learning_rate = 0.0005
        conv1d_strides = 3
        conv1d_filters = 36
        hidden_units = 96

    # Load and Preprocess Data ###########################################
    in_rate, in_data = wavfile.read(args.in_file)
    out_rate, out_data = wavfile.read(args.out_file)

    X = in_data.astype(np.float32).flatten()
    X = normalize(X).reshape(len(X), 1)
    y = out_data.astype(np.float32).flatten()
    y = normalize(y).reshape(len(y), 1)

    y_ordered = y[input_size - 1:]

    indices = np.arange(input_size) + np.arange(len(X) - input_size +
                                                1)[:, np.newaxis]
    X_ordered = tf.gather(X, indices)

    shuffled_indices = np.random.permutation(len(X_ordered))
    X_random = tf.gather(X_ordered, shuffled_indices)
    y_random = tf.gather(y_ordered, shuffled_indices)

    # Create Sequential Model ###########################################
    clear_session()
    model = Sequential()
    model.add(
        Conv1D(conv1d_filters,
               12,
               strides=conv1d_strides,
               activation=None,
               padding='same',
               input_shape=(input_size, 1)))
    model.add(
        Conv1D(conv1d_filters,
               12,
               strides=conv1d_strides,
               activation=None,
               padding='same'))
    model.add(LSTM(hidden_units))
    model.add(Dense(1, activation=None))
    model.compile(optimizer=Adam(learning_rate=learning_rate),
                  loss=error_to_signal,
                  metrics=[error_to_signal])
    print(model.summary())

    # Train Model ###################################################
    model.fit(X_random,
              y_random,
              epochs=epochs,
              batch_size=batch_size,
              validation_split=test_size)

    model.save('models/' + name + '/' + name + '.h5')

    # Run Prediction #################################################
    print("Running prediction..")
    y_the_rest, y_last_part = np.split(y_ordered, [int(len(y_ordered) * .8)])
    x_the_rest, x_last_part = np.split(X, [int(len(X) * .8)])

    x_the_rest, x_ordered_last_part = np.split(X_ordered,
                                               [int(len(X_ordered) * .8)])
    prediction = model.predict(x_ordered_last_part, batch_size=batch_size)

    save_wav('models/' + name + '/y_pred.wav', prediction)
    save_wav('models/' + name + '/x_test.wav', x_last_part)
    save_wav('models/' + name + '/y_test.wav', y_last_part)

    # Add additional data to the saved model (like input_size)
    filename = 'models/' + name + '/' + name + '.h5'
    f = h5py.File(filename, 'a')
    grp = f.create_group("info")
    dset = grp.create_dataset("input_size", (1, ), dtype='int16')
    dset[0] = input_size
    f.close()

    # Create Analysis Plots ###########################################
    if args.create_plots == 1:
        print("Plotting results..")
        import plot

        plot.analyze_pred_vs_actual({
            'output_wav': 'models/' + name + '/y_test.wav',
            'pred_wav': 'models/' + name + '/y_pred.wav',
            'input_wav': 'models/' + name + '/x_test.wav',
            'model_name': name,
            'show_plots': 1,
            'path': 'models/' + name
        })
Пример #12
0
    def build_classification_model(self, idx):
        # idx denotes which dataset to build the model on

        common_words = [
            'i', 'you', 'they', 'has', 'have', 'are', 'is', 'a', 's', 'the',
            'there', 'of', 'was', 'were', 'to', 'and', 'it', 'we', 're'
        ]

        for i in range(len(self.df_text[idx])):
            j = 0
            while j < len(self.df_text[idx][i]):
                word = self.df_text[idx][i][j]
                if word in common_words:
                    self.df_text[idx][i].pop(j)
                else:
                    j += 1

        max_length = max([len(s) for s in self.df_text[idx]])
        num_reviews = len(self.df_text[idx])
        print(max_length)

        text = np.zeros((num_reviews, max_length, self.embedding_dim))
        for i in range(len(self.df_text[idx])):
            train_idx = 0
            for j in range(len(self.df_text[idx][i])):
                # get jth word of ith review
                word = self.df_text[idx][i][j]
                #print("word: ", word)
                if word in self.word2vec_models[idx].wv.vocab:
                    vec = self.word2vec_models[idx][word]
                    text[i, train_idx, :] = vec
                    train_idx += 1
                #else:
                #    print("review[{0:d}], word[{1:d}]: not in vocab".format(i, j))

        print("Finished creating text... ")
        print("text size: ", text.shape)

        batch_size = 24
        rnn_dim = 100
        # model
        model = Sequential()
        model.add(
            Dense(batch_size, input_shape=(max_length, self.embedding_dim)))
        model.add(Dense(300, activation='relu'))
        model.add(Dense(150, activation='relu'))
        model.add(Bidirectional(LSTM(rnn_dim, return_sequences=False)))
        model.add(Dense(3, activation='softmax'))
        print(model.summary)

        model.compile(loss='categorical_crossentropy',
                      optimizer=tf.keras.optimizers.Adam(0.05),
                      metrics=['acc'])

        class_labels = []
        labels = self.df[idx]['useful'].values
        for l in labels:
            if l < 0.33:
                class_labels.append('not useful')
            elif l >= 0.33 and l < 0.66:
                class_labels.append('quite useful')
            elif l >= 0.66 and l <= 1.0:
                class_labels.append('very useful')

        print("class_labels shape: ", len(class_labels))
        # encode class values as integers
        encoder = LabelEncoder()
        encoder.fit(class_labels)
        encoded_labels = encoder.transform(class_labels)
        # convert integers to dummy variables (i.e. one hot encoded)
        one_hot_labels = np_utils.to_categorical(encoded_labels)

        print("one_hot_labels shape: ", one_hot_labels.shape)

        train_text, test_text, train_labels, test_labels = train_test_split(
            text, one_hot_labels, test_size=0.1)

        print("train data: ", train_text.shape, train_labels.shape)
        print("test data: ", test_text.shape, test_labels.shape)

        i = num_reviews // batch_size
        i *= batch_size
        train_text = train_text[:i, :, :]
        train_labels = train_labels[:i]

        model.fit(train_text,
                  train_labels,
                  batch_size=batch_size,
                  shuffle=True,
                  epochs=10)

        predicted = model.predict(test_text)

        print("predicted: ", predicted[:30])
        print("real: ", test_labels[:30])

        loss, acc = model.evaluate(test_text, test_labels)
        print("loss: ", loss, " accuracy; ", acc)
Пример #13
0
from tensorflow.keras.preprocessing.image import *
from tensorflow.keras.utils import get_file

classifier_url = ('https://tfhub.dev/google/imagenet/'
                  'resnet_v2_152/classification/4')

model = Sequential([
    hub.KerasLayer(classifier_url, input_shape=(224, 224, 3))
])

image = load_img('beetle.jpg', target_size=(224, 224))
image = img_to_array(image)
image = image / 255.0
image = np.expand_dims(image, axis=0)

predictions = model.predict(image)

predicted_index = np.argmax(predictions[0], axis=-1)

file_name = 'ImageNetLabels.txt'
file_url = ('https://storage.googleapis.com/'
            'download.tensorflow.org/data/ImageNetLabels.txt')
labels_path = get_file(file_name, file_url)

with open(labels_path) as f:
    imagenet_labels = np.array(f.read().splitlines())

predicted_class = imagenet_labels[predicted_index]
print(predicted_class)

plt.figure()
    #scores = model.evaluate(inputs[test], targets[test], verbose=1)
#%% load model
model = tf.keras.models.load_model(os.path.join(os.path.join(r'C:\Users\boehm\Google_Drive\Masterarbeit\Part4_ANN_versions\ANNs\Models\03_FFNN','model_epochs12')))
    
#%% prediction 
X_pred1 = X_test[0].reshape(1,78)
y_sim1 = y_test[0].reshape(1,361315)

X_pred2 = X_test[1].reshape(1,78)
y_sim2 = y_test[1].reshape(1,361315)

X_pred3 = X_test[2].reshape(1,78)
y_sim3 = y_test[2].reshape(1,361315)

# make a prediction
yhat1 = model.predict(X_pred1)#
yhat2 = model.predict(X_pred2)
yhat3 = model.predict(X_pred3)

rmse1  = np.sqrt(metrics.mean_squared_error(yhat1, y_sim1))
rmse2  = np.sqrt(metrics.mean_squared_error(yhat2, y_sim2))
rmse3  = np.sqrt(metrics.mean_squared_error(yhat3, y_sim3))

# index_yhat = np.where(yhat>0.01,yhat,0)
# index_y_event = np.where(y_sim>0.01,y_sim,0)
# pred_RMSE = metrics.mean_squared_error(index_yhat, index_y_event) #wrong because rmse dependent on n cells

# denormalization
# yhat1 = yhat1 * y_std + y_mean
# y_sim1 = y_sim1 * y_std + y_mean
Пример #15
0
model_univar.add(
    LSTM(units=64,
         return_sequences=True,
         input_shape=(n_past, dataset_train.shape[1])))  # 1st layer
model_univar.add(LSTM(units=30, return_sequences=False))  # 2nd layer
model_univar.add(Dropout(0.55))  # Dropout layer
model_univar.add(Dense(units=1, activation='linear'))  # Output Layer

model_univar.compile(optimizer=Adam(learning_rate=0.0001),
                     loss='mean_squared_error')  # compile model

# LOAD DYNAMIC
model = model_univar.load_weights(model_to_load)

# PREDICTIONS MADE ON PAST VALUES
predictions_past = model_univar.predict(X[n_past:])

# CONSTRUCT A DF TO HOLD RESULTS
univar_pst_result_df = dataset_train[2 * n_past + n_future -
                                     1:].copy()  # now reference the true set
univar_pst_result_df[
    'DynPredCottHeight'] = predictions_past  # add prediction column to trueset
#univar_pst_result_df = univar_pst_result_df.rename(columns={"CottHeight": "ActlRottHeight"})

# FINAL Dataframe
_S2D_Final_df = waves_fcst.drop(waves_fcst.columns.difference(['CottHeight']),
                                axis=1)
S2D_Final_df = _S2D_Final_df.join(univar_pst_result_df)
S2D_Final_df['DateTime'] = S2D_Final_df.index  # add DateTime index back in

# ---------------------------------------------------------------------------------------------------
Пример #16
0
label2 = real_temperature['speed_diff']

y_real.append(label2.iloc[:n_future])

for i in range(len(y_real)):
    y_real[i] = np.array(y_real[i])

for i in range(len(y_real)):
    y_real[i] = np.reshape(y_real[i], (y_real[0].shape[0]))

for i in range(17):
    x_test[:, i] = scalers[i].fit_transform(x_test[:, i])

print(y_real)

print('############################')

#testing = sc.transform(testdataset)
#testing = np.reshape(x_test,(testing.shape[1],testing.shape[0]))

predicted_temperature = regressor.predict(x_test)
predicted_temperature = sc1.inverse_transform(predicted_temperature)
predicted_temperature = np.reshape(
    predicted_temperature,
    (predicted_temperature.shape[1], predicted_temperature.shape[0]))

for i in range(len(predicted_temperature)):
    print('Valor real na hora: ', i, y_real[0][i], 'Valor previsto: ',
          predicted_temperature[i][0])
Пример #17
0

def convert_image(file):
    return np.array(Image.open(file).convert('L'))


image = convert_image(r'C:\WAT\house-small.jpg')
plt.imshow(image, cmap='gray')
plt.show()
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv2D
model = Sequential(
    Conv2D(filters=1, kernel_size=(3, 3), input_shape=(302, 403, 1)))
image4Conv = tensorflow.expand_dims(image, 0)
image4Conv = tensorflow.expand_dims(image4Conv, -1)
result = model.predict(image4Conv)
result = tensorflow.squeeze(result)
plt.imshow(result, cmap='gray')
plt.show()
from tensorflow.keras import backend as K


def my_filter(shape, dtype=None):
    # Ustawiamy filtr na detekcję pionowych i poziomych krawędzi
    f = np.array([[[[-1]], [[-1]], [[-1]]], [[[-1]], [[8]], [[-1]]],
                  [[[-1]], [[-1]], [[-1]]]])
    return K.variable(f, dtype='float32')


model_edge = Sequential(
    Conv2D(filters=1,
Пример #18
0
class LSTM_model:
    '''LSTM model'''
    def __init__(self, config, datasource):
        self.config = config
        self.opt = None
        self.checkpoint = None
        self.tensorboard = None
        self.early_stopping = None
        self.x_train = None
        self.x_test = None
        self.y_train = None
        self.y_test = None
        self.weights = None
        self.train_data_gen = None
        self.test_data_gen = None
        self.datasource = datasource
        self.set_params()

    def set_params(self):
        '''prepares the hypermarameters to be used by the model'''

        if self.config == 1:
            self.model_params = {
                'optimizer': Adam,
                'epochs': 150,
                'learning_rate': 0.001,
                'decay': 1e-6,
                'dropout': 0.5,
                'SEQ_LEN': 90,
                'NAME': f"LSTM-{self.config}-\
                {time.strftime('%Y-%m-%d %H-%M-%S')}",
                'patience': 100,
                'lstm_neurons': [256, 256, 128],
                'shuffle': True,
                'batch_size': 32,
                'steps_per_epoch': 50
                }
        elif self.config == 2:
            self.model_params = {
                'optimizer': Adam,
                'epochs': 150,
                'learning_rate': 0.001,
                'decay': 1e-6,
                'dropout': 0.5,
                'SEQ_LEN': 10,
                'NAME': f"LSTM-{self.config}-\
                {time.strftime('%Y-%m-%d %H-%M-%S')}",
                'patience': 100,
                'lstm_neurons': [256, 256, 128],
                'shuffle': True,
                'batch_size': 32,
                'steps_per_epoch': 50
                }
        else:
            assert 0, "Bad Config creation: " + self.config.name

    @staticmethod
    def get_weights(dependent_var):
        '''calculate classification weights'''
        classes, cnt = np.unique(dependent_var, return_counts=True,
                                 axis=0)
        classes = classes.astype(int)
        weights = 1/(cnt/cnt.sum())
        weights = weights/weights.sum()
        return dict(zip(classes, weights))

    def add_optimizer(self):
        '''add optimiser to be used by LSTM'''
        self.opt = self.model_params['optimizer'](
            lr=self.model_params['learning_rate'],
            decay=self.model_params['decay'], clipnorm=1.)

    def create_model(self):
        '''create LSTM model'''
        self.model = Sequential()
        self.model.add(LSTM(units=self.model_params['lstm_neurons'][0],
                            return_sequences=True,
                            input_shape=(self.model_params['SEQ_LEN'],
                                         self.train_generator.df.shape[1]-3)))
        self.model.add(Dropout(self.model_params['dropout']))
        self.model.add(BatchNormalization())

        if len(self.model_params['lstm_neurons']) > 1:
            for i in self.model_params['lstm_neurons'][1:]:
                self.model.add(LSTM(units=i, return_sequences=True if i !=
                                    self.model_params['lstm_neurons'][-1]
                                    else False))
                self.model.add(Dropout(self.model_params['dropout']))
                self.model.add(BatchNormalization())

        self.model.add(Dense(32, activation='relu'))
        self.model.add(Dropout(self.model_params['dropout']))

        self.model.add(Flatten())
        self.model.add(Dense(3, activation='softmax'))

        self.add_optimizer()

        self.model.compile(loss='sparse_categorical_crossentropy',
                           optimizer=self.opt, metrics=['accuracy'])
        self.model.summary()
        '''
        self.tensorboard = TensorBoard(log_dir=f"./LSTM_Models/LSTM_logs/\
                                       {self.model_params['NAME']}")'''
        # run tensorboard from the console with next comment to follow training
        # tensorboard --logdir=LSTM_Models/LSTM_logs/

        self.checkpoint = ModelCheckpoint('./LSTM_Models/Models/LSTM_T1-Best-'
                                          + self.datasource,
                                          monitor='val_accuracy', verbose=1,
                                          save_weights_only=True,
                                          save_best_only=True, mode='max')
        self.early_stopping =\
            EarlyStopping(monitor='val_loss',
                          patience=self.model_params['patience'])

    def load_model(self):
        '''loading a trained model'''
        self.create_model()

        self.model.load_weights('./LSTM_Models/Models/LSTM_T1-Best-'
                                + self.datasource)

        self.model.compile(loss='sparse_categorical_crossentropy',
                           optimizer=self.opt, metrics=['accuracy'])

    def input_data(self):
        '''timeseries data creation'''
        self.weights = self.get_weights(self.train_generator.df.iloc[:, -1])

        self.ttl_batches = int((len(self.train_generator.df) -
                                self.model_params['SEQ_LEN']) /
                               self.model_params['batch_size'])

    def train(self, train_gen, validation_gen):
        '''train on the dataset provided'''
        self.train_generator = train_gen
        self.validation_generator = validation_gen
        self.input_data()
        self.create_model()

        history = \
            self.model.fit(
                x=self.train_generator,
                epochs=self.model_params['epochs'],
                shuffle=self.model_params['shuffle'],
                validation_data=self.validation_generator,
                steps_per_epoch=self.model_params['steps_per_epoch'],
                validation_steps=self.model_params['steps_per_epoch'],
                class_weight=self.weights,
                callbacks=[# self.tensorboard,
                           self.checkpoint,
                           self.early_stopping])
        print(history)

    def test(self, start_date=dt.datetime.today() -
             dt.timedelta(days=15), end_date=dt.datetime.today()):
        '''test and return confusion_matrix and classification_report'''

        self.set_params()
        data = dataset(self.datasource,
                       max_date=end_date)  # TEST
        data.prepare_test_dataset()
        self.test_data_gen = datagen(data.sdf,
                                     gen_length=self.model_params['SEQ_LEN'],
                                     start_date=start_date)
        self.x_train =\
            self.test_data_gen.df.loc[self.test_data_gen.df.date.between(
                start_date, end_date)].iloc[:, :-1]
        self.y_train =\
            self.test_data_gen.df.loc[self.test_data_gen.df.date.between(
                start_date, end_date)].iloc[:, -1]
        self.load_model()
        y_lstm_pred =\
            self.model.predict_generator(self.test_data_gen,
                                         steps=len(self.test_data_gen))
        y_lstm_pred = np.argmax(y_lstm_pred, axis=1)
        self.test_data_gen.result = self.test_data_gen.result[:-10]
        print("LSTM: Predictions have finished")
        cm_lstm = confusion_matrix(self.test_data_gen.result,
                                   y_lstm_pred)
        o_acc = np.around(np.sum(np.diag(cm_lstm)) / np.sum(cm_lstm)*100, 1)
        plt.title(f'Confusion Matrix \n Accuracy={o_acc}%', size=18)
        sn.heatmap(cm_lstm, fmt=".0f", annot=True, cbar=False,
                   annot_kws={"size": 15}, xticklabels=['Sell', 'Hold', 'Buy'],
                   yticklabels=['Sell', 'Hold', 'Buy'])
        plt.xlabel('Predicted Label', size=15)
        plt.ylabel('True Label', size=15)
        print(np.diag(cm_lstm).sum())
        cr_lstm = classification_report(self.test_data_gen.result,
                                        y_lstm_pred)
        print(cr_lstm)
        return cm_lstm, cr_lstm

    def predict(self, start_date=dt.datetime.today().date(),
                end_date=dt.datetime.today().date()):
        self.set_params()
        data = dataset(self.datasource,
                       min_date=start_date, max_date=end_date)  # TEST
        print("The dataset is", len(data), "datapoints long")
        data.prepare_test_dataset()
        self.test_data_gen = datagen(data.sdf,
                                     gen_length=self.model_params['SEQ_LEN'],
                                     start_date=start_date)
        print(self.test_data_gen.df.head())
        self.x_train =\
            self.test_data_gen.df.loc[self.test_data_gen.df.date.between(
                start_date, end_date)].iloc[:, :-1]
        print(self.x_train.columns)
        self.y_train =\
            self.test_data_gen.df.loc[self.test_data_gen.df.date.between(
                start_date, end_date)].iloc[:, -1]
        self.load_model()
        y_lstm_pred =\
            self.model.predict_generator(self.test_data_gen,
                                         steps=len(self.test_data_gen))
        return self.test_data_gen.ticks, y_lstm_pred

    def predict2(self, start_date=dt.datetime.today().date() -
                 dt.timedelta(days=120),
                 end_date=dt.datetime.today().date()):
        self.set_params()
        data = dataset(self.datasource,
                       min_date=start_date, max_date=end_date)
        print("The dataset is", len(data), "datapoints long")
        data.prepare_test_dataset()
        self.x_train = data.sdf.drop(['target'], axis=1)
        self.x_train = np.array(self.x_train.iloc[-90:, :])
        self.load_model()
        predictions = pd.DataFrame(columns=['sell', 'hold', 'buy'])
        results = pd.DataFrame(columns=['sym', 'date', 'target'])
        for sym in data.sdf.symbol.unique():
            temp = data.sdf.loc[data.sdf.symbol == sym]
            for date in temp.date[90:]:
                results =\
                    results.append({'sym': sym, 'date': date,
                                    'target':
                                    temp.loc[temp.date == date,
                                             ['adjusted_close']].values[0][0]},
                                   ignore_index=True)
                self.x_train = temp.loc[temp.date <= date].iloc[-90:, :]\
                    .drop(['symbol', 'date', 'target'], axis=1)
                self.x_train = np.array(self.x_train)
                self.x_train =\
                    np.reshape(self.x_train,
                               (1, self.x_train.shape[0],
                                self.x_train.shape[1])).astype('float32')
                predictions = predictions.append(pd.DataFrame(
                    self.model.predict(self.x_train),
                    columns=['sell', 'hold', 'buy']), ignore_index=True)
        results = pd.concat([results, predictions], axis=1)
        return results
Пример #19
0
    print("Model trained in %e s." % elapsedTime)

    # turn the given validation set into a testing set
    test_AP_features = scale(
        np.asarray(test_df.iloc[:, 0:520]).astype(float),
        axis=1)  # convert integer to float and scale jointly (axis=1)
    x_test_utm = np.asarray(test_df['LONGITUDE'])
    y_test_utm = np.asarray(test_df['LATITUDE'])
    blds = blds_all[len_train:]
    flrs = flrs_all[len_train:]

    ### evaluate the model
    print("\nPart 3: evaluating the model ...")

    # calculate the accuracy of building and floor estimation
    preds = model.predict(test_AP_features, batch_size=batch_size)
    n_preds = preds.shape[0]
    blds_results = (np.equal(np.argmax(blds, axis=1),
                             np.argmax(preds[:, :3], axis=1))).astype(int)
    acc_bld = blds_results.mean()
    flrs_results = (np.equal(np.argmax(flrs, axis=1),
                             np.argmax(preds[:, 3:8], axis=1))).astype(int)
    acc_flr = flrs_results.mean()
    acc_bf = (blds_results * flrs_results).mean()

    # calculate positioning error when building and floor are correctly estimated
    mask = np.logical_and(
        blds_results, flrs_results
    )  # mask index array for correct location of building and floor
    x_test_utm = x_test_utm[mask]
    y_test_utm = y_test_utm[mask]
regressor.fit(X_train, y_train, epochs=5, batch_size=16)

past2Days = data[data["DateTime"] < "2021-03-01 10:00:09"].copy().tail(3)

df = past2Days.append(testingData, ignore_index=True)
df = df.drop(["DateTime", "DJIAChange", "S&P500Change"], axis=1)

inputs = np.array(df.values.tolist())

X_test = []
y_test = []

for i in range(3, inputs.shape[0]):
    X_test.append(inputs[i - 3:i])
    y_test.append(inputs[i, 0])

X_test, y_test = np.array(X_test), np.array(y_test)

yPred = regressor.predict(X_test)

print(mda(y_test, yPred))

plt.figure(figsize=(14, 5))
plt.plot(y_test, color='red', label='Real Change')
plt.plot(yPred, color='blue', linestyle="dashed", label='Predicted Change')
plt.title('LSTM NASDAQ Composite Change Prediction')
plt.xlabel('Time')
plt.ylabel('NASDAQ Composite Change')
plt.legend()
plt.show()
    for _ in range(n):  # 添加 n 层,共 n+2 层
        model.add(layers.Dense(32, activation='relu'))
    # 创建最末层
    model.add(layers.Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])  # 模型装配与训练

    history = model.fit(db_train.batch(256), epochs=N_EPOCHS, verbose=1)

    # 绘制不同层数的网络决策边界曲线
    # np.c_()转换成(m,2)的形状
    # XX.ravel()相当于50*50个
    # YY.ravel()相当于50*50个
    # 那么相当于预测2500个点的分类
    preds = model.predict(np.c_[XX.ravel(), YY.ravel()])
    title = "网络层数({})".format(n)
    file_name = f"网络层数{int(n)}.png"
    make_plot(X_train, y_train, title, file_name, XX, YY, preds)

# %%
# 不同dropout层数
for n in range(5):  # 构建 5 种不同数量 Dropout 层的网络
    model = Sequential()  # 创建
    # 创建第一层
    model.add(layers.Dense(8, input_dim=2, activation='relu'))
    counter = 0

    for _ in range(5):  # 网络层数固定为 5
        model.add(layers.Dense(64, activation='relu'))
        if counter < n:  # 添加 n 个 Dropout 层
Пример #22
0
* Loss Function
* Optimizer
* Metrics
"""

model.compile(optimizer='adam',loss ='sparse_categorical_crossentropy', metrics=['accuracy'])

model.fit(x_train, y_train,epochs=10)

test_loss, test_acc = model.evaluate(x_test, y_test)
print(test_acc)

from sklearn.metrics import accuracy_score

y_pred = model.predict_classes(x_test)

accuracy_score(y_test, y_pred)

pred = model.predict(x_test)

pred

y_pred

pred[0]

np.argmax(pred[0])

np.argmax(pred[1])

Пример #23
0
"""Fitting the Created model."""

#Fitting the designed model.

STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size
STEP_SIZE_VALID=val_generator.n//val_generator.batch_size

#print(STEP_SIZE_TRAIN, STEP_SIZE_VALID)

history = model_2.fit(train_generator,
                      steps_per_epoch = 315,
                      validation_data = val_generator,
                      validation_steps = 1,
                      epochs = 50)

"""As this process requires a lot of time we have the final output Screenshot."""

path_test = '/content/drive/MyDrive/BE_Project/Bone_Age_Detection/boneage-training-dataset/boneage-training-dataset/1377.png'

img = image.load_img(path_test, target_size=(256, 256))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)

images = np.vstack([x])

print('Mean Age is {} months'.format(mean_bone_age))
print('Standard Deviation Age is {} months'.format(std_bone_age))
pred = mean_bone_age + std_bone_age*(model_2.predict(images, batch_size=1))
print('Age is {} months'.format(float(pred)))
print(model_2.predict(images, batch_size=1))
print('Age is {} months'.format(float(pred/12.0)))
Пример #24
0
test_input = np.reshape(test_input, (test_input.shape[0], 1, test_input.shape[1]))

print("Reshape input to be [samples, time steps, features]")
print(f"Train input:target shape after reshaping = {train_input.shape}:{train_target.shape}")
print(f"Test input:target shape after reshaping = {test_input.shape}:{test_target.shape}")

# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(1, LOOK_BACK)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.summary()
model.fit(train_input, train_target, epochs=100, batch_size=1, verbose=2)

# make predictions
trainPredict = model.predict(train_input)
testPredict = model.predict(test_input)
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
train_target = scaler.inverse_transform([train_target])
testPredict = scaler.inverse_transform(testPredict)
test_target = scaler.inverse_transform([test_target])

# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(train_target[0], trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(test_target[0], testPredict[:, 0]))
print('Test Score: %.2f RMSE' % (testScore))

# shift train predictions for plotting
trainPredictPlot = np.empty_like(dataset)
Пример #25
0
    Dense(28, activation='relu'),
    Dense(10, activation='softmax')
])
model.build(input_shape=(None, 28 * 28))
# 3、编译模型
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['acc'])

model.load_weights('./mnist_model.h5')

weights = model.get_weights()

image_matrix = np.array(image_matrix)

image_matrix = image_matrix.reshape(10, 28 * 28).astype('float32') / 255.0

lables = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]

image_labels = tf.keras.utils.to_categorical(lables)

image_lable = model.predict(image_matrix)

model.evaluate(image_matrix, image_labels)

#loss = model.train_on_batch(image_matrix,image_lable)

np.set_printoptions(suppress=True, threshold=sys.maxsize)

for i in range(0, 10):
    print(image_lable[i][i])
Пример #26
0
          epochs=epochNo,
          batch_size=32,
          validation_data=(testData_features_normed, testData_labels_one),
          callbacks=callback_list)

# ### 추론

# In[ ]:

print("predicting .....")

# In[37]:

# predict= pd.DataFrame(model_rf.predict(featureData_norm[features_norm]),columns=["predict_rf"])
featureData['predict_dnn'] = pd.DataFrame(
    model.predict(featureData_norm[features_norm])).idxmax(axis=1)

# In[38]:

# featureData.to_sql(name='TB_ORG_DATA_PREDICT', con=engine, if_exists='append', index=False)

# In[39]:

featureData.head(10)

# In[40]:

predict = pd.DataFrame(pd.DataFrame(
    model.predict(testData_features_normed)).idxmax(axis=1),
                       columns=["predict_dnn"])
    train_y,
    epochs=50,
    batch_size=72,
    validation_data=(
        test_X,
        test_y),
    verbose=2)

# 画图
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()

# make the prediction
yHat = model.predict(test_X)

inv_yHat = concatenate((yHat, test_x[:, 1:]), axis=1)   # 数组拼接
inv_yHat = inv_yHat[:, 0]

test_y = test_y.reshape((len(test_y), 1))
inv_y = concatenate((test_y, test_x[:, 1:]), axis=1)
inv_y = inv_y[:, 0]

target = inv_yHat
prediction = inv_y
error = []
for i in range(len(target)):
    error.append(target[i] - prediction[i])
squaredError = []
absError = []
Пример #28
0
# 모델 생성
model = Sequential()
model.add(
    layers.LSTM(units=10,
                activation='relu',
                return_sequences=True,
                input_shape=(window_size, data_size)))
model.add(layers.Dropout(0.1))
model.add(layers.LSTM(units=10, activation='relu'))
model.add(layers.Dropout(0.1))
model.add(layers.Dense(units=1))
model.summary()

model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(train_x, train_y, epochs=20, batch_size=200)
pred_y = model.predict(test_x)

# Visualising the results
plt.figure()
plt.plot(test_y, color='red', label='real SEC stock price')
plt.plot(pred_y, color='blue', label='predicted SEC stock price')
plt.title('SEC stock price prediction')
plt.xlabel('time')
plt.ylabel('stock price')
plt.legend()
plt.show()

# raw_df.close[-1] : dfy.close[-1] = x : pred_y[-1]
print("Tomorrow's SEC price :", raw_df.close[-1] * pred_y[-1] / dfy.close[-1],
      'KRW')
Пример #29
0
            sr=44100,
            n_dft=2048,
            n_hop=512,
            input_shape=(1, 80000),
            padding='same',
            n_mels=128,
            fmin=0.0,
            fmax=44100 / 2,
            power_melgram=1.0,
            return_decibel_melgram=False,
            trainable_fb=False,
        ))
    timing = []
    for e in range(20):
        t_start = time.time()
        spec = model.predict(y_list.reshape(1770, 1, 80000))
        time_used = time.time() - t_start
        print(time_used)
        timing.append(time_used)

    print("mean = ", np.mean(timing))
    print("std = ", np.std(timing))

    data = pd.DataFrame(timing, columns=['t_avg'])
    data['Type'] = 'kapre_GPU'
    data.to_csv(Path(__file__).parent / f'./result/Mel_kapre_GPU')

elif args.device == "tensorflow":

    import tensorflow as tf
    mel_filterbank = tf.signal.linear_to_mel_weight_matrix(128, 1025)
X_scaled_test = scaler.transform(X_test)

import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense

tf.random.set_seed(42)

model = Sequential([Dense(units=32, activation='relu'), Dense(units=1)])

model.compile(loss='mean_squared_error',
              optimizer=tf.keras.optimizers.Adam(0.1))

model.fit(X_scaled_train, y_train, epochs=100, verbose=True)

predictions = model.predict(X_scaled_test)[:, 0]

from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
print(f'MSE: {mean_squared_error(y_test, predictions):.3f}')
print(f'MAE: {mean_absolute_error(y_test, predictions):.3f}')
print(f'R^2: {r2_score(y_test, predictions):.3f}')

from tensorboard.plugins.hparams import api as hp
HP_HIDDEN = hp.HParam('hidden_size', hp.Discrete([64, 32, 16]))
HP_EPOCHS = hp.HParam('epochs', hp.Discrete([300, 1000]))
HP_LEARNING_RATE = hp.HParam('learning_rate', hp.RealInterval(0.01, 0.4))


def train_test_model(hparams, logdir):
    model = Sequential(
        [Dense(units=hparams[HP_HIDDEN], activation='relu'),