Ejemplo n.º 1
0
class TestTrainModel(TestCase):
    def setUp(self):
        self.config = Config()
        self.test_train_model = TrainModel(self.config)
        self.dummy_data_df = pd.DataFrame(
            np.array([['val11', 'val12', 'val13'], ['val21', 'val22', 'val23'],
                      ['val31', 'val32', 'val33']]),
            columns=self.config.ALL_PRODUCT_ATTRS)

    @mock.patch.object(Recommender, 'fit')
    def test_train(self, mocked_fit):

        # mock calls.
        self.test_train_model.read_data = MagicMock(
            return_value=self.dummy_data_df)
        self.test_train_model.write_data = MagicMock(return_value=None)
        self.test_train_model.transform_data = MagicMock(
            return_value=(([1, 2]), ([3, 4]), ([5, 6]), ([7, 8])))
        mocked_fit.return_value = (self.dummy_data_df, self.dummy_data_df)

        # call the method to be tested.
        self.test_train_model.train()

        # assertions to make sure all methods are called with expected arguments.
        self.test_train_model.read_data.assert_has_calls([
            call('dummy_path1'),
            call('dummy_path2'),
            call('dummy_path3'),
            call('dummy_path4')
        ])

        self.test_train_model.transform_data.assert_has_calls([
            call(self.dummy_data_df, self.config.ALL_PRODUCT_ATTRS, [
                'SID_IDX', 'CONFIG_ID', 'PRODUCT_CATEGORY', 'PRODUCT_TYPE',
                'BRAND'
            ], 'SID_IDX'),
            call(self.dummy_data_df, self.config.ALL_PRODUCT_ATTRS, [
                'CUSTOMER_IDX', 'CONFIG_ID', 'PRODUCT_CATEGORY',
                'PRODUCT_TYPE', 'BRAND'
            ], 'CUSTOMER_IDX')
        ])

        mocked_fit.assert_called_once_with(
            ([1, 2]), ([1, 2]), ([3, 4]), ([3, 4]), ([5, 6]), ([5, 6]),
            ([7, 8]), ([7, 8]), self.dummy_data_df, self.dummy_data_df)

        self.test_train_model.write_data.assert_called_once_with(
            self.dummy_data_df)
Ejemplo n.º 2
0
#!/usr/bin/env python3

import numpy as np
import tensorflow as tf
from train_model import TrainModel
from utils import load_images, load_csv, generate_triplets

images, filenames = load_images('HBTNaligned', as_array=True)
images = images.astype('float32') / 255

triplet_names = load_csv('FVTriplets.csv')
A, P, N = generate_triplets(images, filenames, triplet_names)
triplets = [A[:-2], P[:-2], N[:-2]]  # to make all batches divisible by 32

tm = TrainModel('models/face_verification.h5', 0.2)
history = tm.train(triplets, epochs=1)
print(history.history)
Ejemplo n.º 3
0
                                          output_dim=output_emb,
                                          input_length=seq_len)(inputs)
    forward_pass = tf.keras.layers.LSTM(rnn_unit,
                                        return_sequences=True)(embedding)
    forward_pass = tf.keras.layers.Dropout(dropout)(forward_pass)
    forward_pass = tf.keras.layers.LSTM(rnn_unit)(forward_pass)
    forward_pass = tf.keras.layers.Dense(dense_unit)(forward_pass)
    forward_pass = tf.keras.layers.Dropout(dropout)(forward_pass)
    outputs = tf.keras.layers.Dense(unique_notes + 1,
                                    activation="softmax")(forward_pass)

    model = tf.keras.Model(inputs=inputs,
                           outputs=outputs,
                           name='generate_scores_rnn')
    return model


model = create_model(seq_len, unique_notes)
model.summary()

optimizer = tf.keras.optimizers.RMSprop()
loss_fn = tf.keras.losses.categorical_crossentropy

train_class = TrainModel(EPOCHS, note_tokenizer, sampled_200_midi,
                         FRAME_PER_SECOND, BATCH_NNET_SIZE, BATCH_SONG,
                         optimizer, loss_fn, TOTAL_SONGS, model, seq_len)

train_class.train()
model.save('model_ep4.h5')
pickle.dump(note_tokenizer, open("tokenizer.p", "wb"))
Ejemplo n.º 4
0
#!/usr/bin/env python3

import os
import numpy as np
import tensorflow as tf
from train_model import TrainModel
from utils import load_images, load_csv, generate_triplets

images, filenames = load_images('HBTNaligned', as_array=True)
triplet_names = load_csv('FVTriplets.csv')
A, P, N = generate_triplets(images, filenames, triplet_names)
triplets = [A[:-2], P[:-2], N[:-2]]

tm = TrainModel('models/face_verification.h5', 0.2)
tm.train(triplets, epochs=1)
base_model = tm.save('models/trained_fv.h5')
print(base_model is tm.base_model)
print(os.listdir('models'))