예제 #1
0
class LDCF:

    def __init__(self, args, density):

        self.dataset = DataSet(args.dataType, density)
        self.dataType = self.dataset.dataType
        self.density = self.dataset.density
        self.shape = self.dataset.shape

        self.train = self.dataset.train
        self.test = self.dataset.test

        self.epochNum = args.epochNum
        self.batchSize = args.batchSize
        self.layers = args.layers
        self.regLayers = args.regLayers
        self.lr = args.lr
        self.decay = args.decay
        self.optimizer = args.optimizer
        self.verbose = args.verbose

        self.store = args.store
        self.modelPath = args.modelPath
        self.resultPath = args.resultPath

        self.model = self.compile_model()

        self.run()

    def run(self):
        # Initialization
        x_test, y_test = self.dataset.getTestInstance(self.test)
        sys.stdout.write('\rInitializing...')
        mae, rmse = evaluate(self.model, x_test, y_test)
        sys.stdout.write('\rInitializing completes.MAE = %.4f|RMSE = %.4f.\n' % (mae, rmse))
        best_mae, best_rmse, best_epoch = mae, rmse, -1
        evalResults = np.zeros((self.epochNum, 2))
        # Training model
        print('=' * 14 + 'Start Training' + '=' * 22)
        for epoch in range(self.epochNum):
            sys.stdout.write('\rEpoch %d starts...' % epoch)
            start = time()
            x_train, y_train = self.dataset.getTrainInstance(self.train)
            # Training
            history = self.model.fit(x_train, y_train, batch_size=self.batchSize, epochs=1, verbose=0, shuffle=True)
                                     #, callbacks=[TensorBoard(log_dir='./Log')])
            end = time()
            sys.stdout.write('\rEpoch %d ends.[%.1fs]' % (epoch, end - start))
            # Evaluation
            if epoch % self.verbose == 0:
                sys.stdout.write('\rEvaluating Epoch %d...' % epoch)
                mae, rmse = evaluate(self.model, x_test, y_test)
                loss = history.history['loss'][0]
                sys.stdout.write('\rEvaluating completes.[%.1fs] ' % (time() - end))
                if mae < best_mae:
                    best_mae, best_rmse, best_epoch = mae, rmse, epoch
                    if self.store:
                        self.saveModel(self.model)
                evalResults[epoch, :] = [mae, rmse]
                sys.stdout.write('\rEpoch %d : MAE = %.4f|RMSE = %.4f|Loss = %.4f\n' % (epoch, mae, rmse, loss))
        print('=' * 14 + 'Training Complete!' + '=' * 18)
        print('The best is at epoch %d : MAE = %.4f|RMSE = %.4f.' % (best_epoch, best_mae, best_rmse))
        if self.store:
            saveResult(self.resultPath, self.dataType, self.density, evalResults, ['MAE', 'RMSE'])
            print('The model is stored in %s.' % self.modelPath)
            print('The result is stored in %s.' % self.resultPath)

    def compile_model(self):

        _model = self.build_model(self.shape[0], self.shape[1], self.layers, self.regLayers)
        _model.compile(optimizer=self.optimizer(lr=self.lr, decay=self.decay), loss=[self.huber_loss])
        return _model

    def build_model(self, num_users, num_item, layers, reg_layers):

        assert len(layers) == len(reg_layers)

        # Input Layer
        user_id_input = Input(shape=(1,), dtype='int64', name='user_id_input')
        user_lc_input = Input(shape=(2,), dtype='int64', name='user_lc_input')

        item_id_input = Input(shape=(1,), dtype='int64', name='item_id_input')
        item_lc_input = Input(shape=(2,), dtype='int64', name='item_lc_input')

        user_id_embedding = self.getEmbedding(num_users, int(layers[0] / 4), 1, reg_layers[0], 'user_id_embedding')
        user_lc_embedding = self.getEmbedding(num_users, int(layers[0] / 4), 2, reg_layers[0], 'user_lc_embedding')

        item_id_embedding = self.getEmbedding(num_item, int(layers[0] / 4), 1, reg_layers[0], 'item_id_embedding')
        item_lc_embedding = self.getEmbedding(num_item, int(layers[0] / 4), 2, reg_layers[0], 'item_lc_embedding')

        user_id_latent = Flatten()(user_id_embedding(user_id_input))
        user_lc_latent = Flatten()(user_lc_embedding(user_lc_input))

        item_id_latent = Flatten()(item_id_embedding(item_id_input))
        item_lc_latent = Flatten()(item_lc_embedding(item_lc_input))

        # concatenate
        predict_user_vector = concatenate([user_id_latent, user_lc_latent])
        predict_item_vector = concatenate([item_id_latent, item_lc_latent])

        mlp_vector = concatenate([predict_user_vector, predict_item_vector])

        # AC-COS
        cosine_vector = dot([user_lc_latent, item_lc_latent], axes=1, normalize=True)

        # AC_EUC
        #euclidean_vector = Lambda(self.euclidean_distance, output_shape=self.eucl_dist_output_shape)([user_lc_latent, item_lc_latent])

        # Middle Layer
        for index in range(1, len(layers) - 1):
            layer = Dense(units=layers[index], kernel_initializer=initializers.random_normal(),
                          kernel_regularizer=l2(reg_layers[index]), activation='relu', name='mlpLayer%d' % index)
            mlp_vector = layer(mlp_vector)

        predict_vector = concatenate([mlp_vector, cosine_vector])

        # Output layer
        prediction = Dense(units=layers[-1], activation='linear', kernel_initializer=initializers.lecun_normal(),
                           kernel_regularizer=l2(reg_layers[-1]), name='prediction')(predict_vector)

        _model = Model(inputs=[user_id_input, user_lc_input, item_id_input, item_lc_input], outputs=prediction)
        plot_model(_model, to_file='model.png')
        return _model

    def huber_loss(self, y_true, y_pred, clip_delta=1.0):
        error = y_true - y_pred
        cond = K.abs(error) < clip_delta
        squared_loss = 0.5 * K.square(error)
        linear_loss = clip_delta * (K.abs(error) - 0.5 * clip_delta)
        return K.tf.where(cond, squared_loss, linear_loss)

    # One-hot encoding + 0-layer mlp
    def getEmbedding(self, input_dim, output_dim, input_length, reg_layers, name):
        _Embedding = Embedding(input_dim=input_dim, output_dim=output_dim, input_length=input_length,
                               embeddings_initializer=initializers.random_normal(),
                               embeddings_regularizer=l2(reg_layers), name=name)
        return _Embedding

    def euclidean_distance(self, vects):
        x, y = vects
        return K.sqrt(K.sum(K.square(x - y), axis=1, keepdims=True))

    def eucl_dist_output_shape(self, shapes):
        shape1, shape2 = shapes
        return shape1[0], 1

    def saveModel(self, _model):
        _model.save_weights(self.modelPath + '/%s_%.2f_%s.h5'
                            % (self.dataType, self.density, self.layers), overwrite=True)
예제 #2
0
class GTF:

    def __init__(self, args, density):

        self.dataSet = DataSet(args, density)
        self.dataType = self.dataSet.dataType
        self.density = self.dataSet.density
        self.shape = self.dataSet.shape

        self.train = self.dataSet.train
        self.test = self.dataSet.test

        self.epochNum = args.epochNum
        self.batchSize = args.batchSize
        self.gtfLayers = args.gtfLayers
        self.regLayers = args.regLayers
        self.dropLayers = args.dropLayers
        self.lr = args.lr
        self.decay = args.decay
        self.optimizer = args.optimizer
        self.verbose = args.verbose

        self.store = args.store
        self.imagePath = args.imagePath
        self.modelPath = args.modelPath
        self.resultPath = args.resultPath
        self.model = self.load_model()
        self.run()

    def run(self):
        # Initialization
        x_test, y_test = self.dataSet.getTestInstance(self.test)
        print('Initializing...')
        mae, rmse = evaluate(self.model, x_test, y_test, self.batchSize)
        sys.stdout.write('\rInitializing done.MAE = %.4f|RMSE = %.4f.\n' % (mae, rmse))
        best_mae, best_rmse, best_epoch = mae, rmse, -1
        metrics = ['MAE', 'RMSE']
        evalResults = np.zeros((self.epochNum, 2))
        # Training model
        print('=' * 14 + 'Start Training' + '=' * 22)
        for epoch in range(self.epochNum):
            sys.stdout.write('\rEpoch %d starts...' % epoch)

            x_train, y_train = self.dataSet.getTrainInstance(self.train)
            # Training
            history = self.model.fit(x_train, y_train, batch_size=self.batchSize, epochs=1, verbose=0, shuffle=True)
            sys.stdout.write('\rEpoch %d ends.')
            # Evaluation
            if epoch % self.verbose == 0:
                sys.stdout.write('\rEvaluating Epoch %d...' % epoch)
                mae, rmse = evaluate(self.model, x_test, y_test, self.batchSize)
                loss = history.history['loss'][0]
                sys.stdout.write('\rEvaluating done.')
                if mae < best_mae:
                    best_mae, best_rmse, best_epoch = mae, rmse, epoch
                evalResults[epoch, :] = [mae, rmse]
                saveResult('gtf', self.resultPath, self.dataType, self.density, evalResults, metrics)
                sys.stdout.write('\rEpoch %d : MAE = %.4f|RMSE = %.4f|Loss = %.4f\n' % (epoch, mae, rmse, loss))
        print('=' * 14 + 'Training Complete!' + '=' * 18)
        print('The best is at epoch %d : MAE = %.4f|RMSE = %.4f.' % (best_epoch, best_mae, best_rmse))
        if self.store:
            self.save_model(self.model)
            print('The model is stored in %s.' % self.modelPath)
            print('The result is stored in %s.' % self.resultPath)

    def load_model(self):

        _model = self.build_model(self.shape, self.gtfLayers, self.regLayers, self.dropLayers)
        _model.compile(optimizer=self.optimizer(lr=self.lr, decay=self.decay), loss=self.hybrid_loss)
        plot_model(_model, to_file=self.imagePath + 'GTF.jpg', show_shapes=True)
        return _model

    @staticmethod
    def build_model(shape, gtf_layers, reg_layers, drop_layers):

        # Embedding Layer
        user_input = Input(shape=(1,), dtype='int32', name='user_input')

        item_input = Input(shape=(1,), dtype='int32', name='item_input')

        time_input = Input(shape=(1,), dtype='int32', name='time_input')

        user_embedding = Flatten()(Embedding(input_dim=shape[0], output_dim=gtf_layers[0], input_length=1,
                                             embeddings_initializer=initializers.random_normal(),
                                             embeddings_regularizer=regularizers.l2(reg_layers[0]),
                                             name='gtf_user_embedding')(user_input))
        item_embedding = Flatten()(Embedding(input_dim=shape[1], output_dim=gtf_layers[0], input_length=1,
                                             embeddings_initializer=initializers.random_normal(),
                                             embeddings_regularizer=regularizers.l2(reg_layers[0]),
                                             name='gtf_item_embedding')(item_input))
        time_embedding = Flatten()(Embedding(input_dim=shape[2], output_dim=gtf_layers[0], input_length=1,
                                             embeddings_initializer=initializers.random_normal(),
                                             embeddings_regularizer=regularizers.l2(reg_layers[0]),
                                             name='gtf_time_embedding')(time_input))

        user_embedding = Dropout(drop_layers[0])(user_embedding)
        item_embedding = Dropout(drop_layers[0])(item_embedding)
        time_embedding = Dropout(drop_layers[0])(time_embedding)

        us = Dot(axes=-1)([user_embedding, item_embedding])
        ut = Dot(axes=-1)([user_embedding, time_embedding])
        st = Dot(axes=-1)([item_embedding, time_embedding])

        mf_vector = Add()([us, ut, st])

        mf_vector = Dropout(drop_layers[-1])(mf_vector)

        prediction = Dense(units=gtf_layers[-1], activation='relu', use_bias=True,
                           kernel_initializer=initializers.lecun_normal(),
                           kernel_regularizer=regularizers.l2(reg_layers[-1]), name='gtf_prediction')(mf_vector)

        _model = Model(inputs=[user_input, item_input, time_input], outputs=prediction)
        return _model

    def hybrid_loss(self, y_true, y_pred, delta=0.5):
        l1 = K.abs(y_true - y_pred)
        l2 = K.square(y_true - y_pred)
        hybrid_loss = delta*l1+(1-delta)*l2
        return hybrid_loss

    def save_model(self, _model):
        _model.save_weights(self.modelPath + 'gtf_%s_%.2f_%s.h5'
                            % (self.dataType, self.density, self.gtfLayers), overwrite=True)
예제 #3
0
class RTF:
    def __init__(self, args, density):

        self.dataSet = DataSet(args, density)
        self.dataType = self.dataSet.dataType
        self.density = self.dataSet.density
        self.shape = self.dataSet.shape

        self.train = self.dataSet.train
        self.test = self.dataSet.test

        self.epochNum = args.epochNum
        self.batchSize = args.batchSize
        self.gruLayers = args.gruLayers
        self.gtfLayers = args.gtfLayers
        self.regLayers = args.regLayers
        self.dropLayers = args.dropLayers
        self.lr = args.lr
        self.decay = args.decay
        self.optimizer = args.optimizer
        self.verbose = args.verbose

        self.preTraining = args.preTraining
        self.store = args.store
        self.modelPath = args.modelPath
        self.imagePath = args.imagePath
        self.resultPath = args.resultPath

        self.model = self.load_model()

        self.run()

    def run(self):

        # Initializing model
        x_test, y_test = self.dataSet.getTestInstance(self.test)
        print('Initializing...')
        mae, rmse = evaluate(self.model, x_test, y_test, self.batchSize)
        best_mae, best_rmse, best_epoch = mae, rmse, -1
        metrics = ['MAE', 'RMSE', 'Loss']
        evalResults = np.zeros((self.epochNum, len(metrics)))
        print('Initializing done.MAE = %.4f|RMSE = %.4f.' % (mae, rmse))
        # Training model
        print('=' * 25 + 'Start Training' + '=' * 30)
        for epoch in range(self.epochNum):
            sys.stdout.write('\rEpoch %d starts...' % epoch)
            x_train, y_train = self.dataSet.getTrainInstance(self.train)
            # Training
            history = self.model.fit(x_train,
                                     y_train,
                                     batch_size=self.batchSize,
                                     epochs=1,
                                     verbose=0,
                                     shuffle=True)
            # , callbacks=[TensorBoard(log_dir='./Log')])
            sys.stdout.write('\rEpoch %d ends.' % epoch)
            # Evaluation
            if epoch % self.verbose == 0:
                sys.stdout.write('\rEvaluating Epoch %d...' % epoch)
                mae, rmse = evaluate(self.model, x_test, y_test,
                                     self.batchSize)
                loss = history.history['loss'][0]
                evalResults[epoch, :] = [mae, rmse, loss]
                sys.stdout.write('\rEvaluating done.')
                if mae < best_mae:
                    best_mae, best_rmse, best_epoch = mae, rmse, epoch
                saveResult('rtf', self.resultPath, self.dataType, self.density,
                           evalResults, metrics)
                sys.stdout.write(
                    '\rEpoch %d : MAE = %.4f|RMSE = %.4f|Loss = %.4f \n' %
                    (epoch, mae, rmse, loss))
        print('=' * 23 + 'Training Complete!' + '=' * 28)
        print('The best is at epoch %d : MAE = %.4f RMSE = %.4f.' %
              (best_epoch, best_mae, best_rmse))
        if self.store:
            self.save_model(self.model)
            print('The model is stored in %s.' % self.modelPath)
            print('The result is stored in %s.' % self.resultPath)

    def load_model(self):

        rtf_model = self.build_model(self.shape, self.gruLayers,
                                     self.gtfLayers, self.regLayers,
                                     self.dropLayers)

        if self.preTraining:
            gtf_model = GTF.build_model(self.shape, self.gtfLayers,
                                        self.regLayers, self.dropLayers)
            gtf_model.load_weights(
                self.modelPath + 'gtf_%s_%.2f_%s.h5' %
                (self.dataType, self.density, self.gtfLayers))

            gru_model = PGRU.build_model(self.shape, self.gruLayers,
                                         self.regLayers, self.dropLayers)
            gru_model.load_weights(
                self.modelPath + 'pgru_%s_%.2f_%s.h5' %
                (self.dataType, self.density, self.gruLayers))

            rtf_model = self.load_pretrain_model(rtf_model, gtf_model,
                                                 gru_model, self.gruLayers)

        rtf_model.compile(optimizer=self.optimizer(lr=self.lr,
                                                   decay=self.decay),
                          loss=self.hybrid_loss)
        plot_model(rtf_model,
                   to_file=self.imagePath + 'RTF.jpg',
                   show_shapes=True)
        return rtf_model

    def build_model(self, shape, gru_layers, gtf_layers, reg_layers,
                    drop_layers):

        # Granulation
        user_input = Input(shape=(1, ), dtype='int32', name='user_input')

        item_input = Input(shape=(1, ), dtype='int32', name='item_input')

        time_input = Input(shape=(1, ), dtype='int32', name='time_input')

        # GTF

        tf_user_embedding = Flatten()(Embedding(
            input_dim=shape[0],
            output_dim=gtf_layers[0],
            embeddings_initializer=initializers.random_normal(),
            embeddings_regularizer=regularizers.l2(reg_layers[0]),
            input_length=1,
            name='gtf_user_embedding')(user_input))
        tf_item_embedding = Flatten()(Embedding(
            input_dim=shape[1],
            output_dim=gtf_layers[0],
            embeddings_initializer=initializers.random_normal(),
            embeddings_regularizer=regularizers.l2(reg_layers[0]),
            input_length=1,
            name='gtf_item_embedding')(item_input))
        tf_time_embedding = Flatten()(Embedding(
            input_dim=shape[2],
            output_dim=gtf_layers[0],
            embeddings_initializer=initializers.random_normal(),
            embeddings_regularizer=regularizers.l2(reg_layers[0]),
            input_length=1,
            name='gtf_time_embedding')(time_input))

        tf_user_embedding = Dropout(drop_layers[0])(tf_user_embedding, )
        tf_item_embedding = Dropout(drop_layers[0])(tf_item_embedding)
        tf_time_embedding = Dropout(drop_layers[0])(tf_time_embedding)

        us = Dot(axes=-1)([tf_user_embedding, tf_item_embedding])

        ut = Dot(axes=-1)([tf_user_embedding, tf_time_embedding])

        st = Dot(axes=-1)([tf_item_embedding, tf_time_embedding])

        gtf_vector = Add()([us, ut, st])

        gtf_prediction = Dense(units=gtf_layers[-1],
                               activation='relu',
                               kernel_initializer=initializers.lecun_normal(),
                               kernel_regularizer=regularizers.l2(
                                   reg_layers[-1]),
                               name='gtf_prediction')(gtf_vector)

        # PGRU

        gru_user_embedding = Flatten()(Embedding(
            input_dim=shape[0],
            output_dim=gru_layers[0],
            embeddings_initializer=initializers.random_normal(),
            embeddings_regularizer=regularizers.l2(reg_layers[0]),
            input_length=1,
            name='gru_user_embedding')(user_input))
        gru_item_embedding = Flatten()(Embedding(
            input_dim=shape[1],
            output_dim=gru_layers[0],
            embeddings_initializer=initializers.random_normal(),
            embeddings_regularizer=regularizers.l2(reg_layers[0]),
            input_length=1,
            name='gru_item_embedding')(item_input))
        gru_time_embedding = Flatten()(Embedding(
            input_dim=shape[2],
            output_dim=gru_layers[1],
            embeddings_initializer=initializers.random_normal(),
            embeddings_regularizer=regularizers.l2(reg_layers[0]),
            input_length=1,
            name='gru_time_embedding')(time_input))

        gru_user_embedding = Dropout(drop_layers[0])(gru_user_embedding)
        gru_item_embedding = Dropout(drop_layers[0])(gru_item_embedding)
        gru_time_embedding = Dropout(drop_layers[0])(gru_time_embedding)

        gru_vector = Concatenate(axis=-1)(
            [gru_user_embedding, gru_item_embedding])

        gru_vector = Reshape(target_shape=(int(gru_layers[1]), -1))(gru_vector)

        for index in range(1, len(gru_layers) - 1):

            layers = GRU(units=gru_layers[index],
                         kernel_initializer=initializers.he_uniform(),
                         kernel_regularizer=regularizers.l2(reg_layers[index]),
                         activation='tanh',
                         recurrent_activation='hard_sigmoid',
                         dropout=drop_layers[index],
                         return_sequences=(index != (len(gru_layers) - 2)),
                         name='gru_layer_%d' % index)
            gru_vector = layers([gru_vector, gru_time_embedding])

        gru_vector = Dropout(drop_layers[-1])(gru_vector)

        gru_prediction = Dense(units=gru_layers[-1],
                               activation='relu',
                               kernel_initializer=initializers.lecun_normal(),
                               kernel_regularizer=regularizers.l2(
                                   reg_layers[-1]),
                               name='gru_prediction')(gru_vector)

        rtf_prediction = Maximum(name='rtf_prediction')(
            [gru_prediction, gtf_prediction])

        _model = Model(inputs=[user_input, item_input, time_input],
                       outputs=rtf_prediction)
        return _model

    def load_pretrain_model(self, rtf_model, gtf_model, gru_model, gru_layers):

        print("Loading pre-training models...")

        # GTF
        gtf_user_embedding_weight = gtf_model.get_layer(
            'gtf_user_embedding').get_weights()
        gtf_item_embedding_weight = gtf_model.get_layer(
            'gtf_item_embedding').get_weights()
        gtf_time_embedding_weight = gtf_model.get_layer(
            'gtf_time_embedding').get_weights()
        gtf_prediction_weight = gtf_model.get_layer(
            'gtf_prediction').get_weights()

        rtf_model.get_layer('gtf_user_embedding').set_weights(
            gtf_user_embedding_weight)
        rtf_model.get_layer('gtf_item_embedding').set_weights(
            gtf_item_embedding_weight)
        rtf_model.get_layer('gtf_time_embedding').set_weights(
            gtf_time_embedding_weight)
        rtf_model.get_layer('gtf_prediction').set_weights(
            gtf_prediction_weight)

        # GRU
        gru_user_embedding_weight = gru_model.get_layer(
            'gru_user_embedding').get_weights()
        gru_item_embedding_weight = gru_model.get_layer(
            'gru_item_embedding').get_weights()
        gru_time_embedding_weight = gru_model.get_layer(
            'gru_time_embedding').get_weights()
        rtf_model.get_layer('gru_user_embedding').set_weights(
            gru_user_embedding_weight)
        rtf_model.get_layer('gru_item_embedding').set_weights(
            gru_item_embedding_weight)
        rtf_model.get_layer('gru_time_embedding').set_weights(
            gru_time_embedding_weight)

        for index in range(1, len(gru_layers) - 1):

            gru_layer_weights = gru_model.get_layer('gru_layer_%d' %
                                                    index).get_weights()
            rtf_model.get_layer('gru_layer_%d' %
                                index).set_weights(gru_layer_weights)

        gru_prediction_weight = gru_model.get_layer(
            'gru_prediction').get_weights()
        rtf_model.get_layer('gru_prediction').set_weights(
            gru_prediction_weight)

        print("Loading pre-training models done.")
        return rtf_model

    def hybrid_loss(self, y_true, y_pred, delta=0.5):
        l1 = K.abs(y_true - y_pred)
        l2 = K.square(y_true - y_pred)
        hybrid_loss = delta * l1 + (1 - delta) * l2
        return hybrid_loss

    def save_model(self, _model):

        _model.save_weights(self.modelPath + 'rtf_%s_%.2f.h5' %
                            (self.dataType, self.density),
                            overwrite=True)
예제 #4
0
class PGRU:
    def __init__(self, args, density):

        self.dataSet = DataSet(args, density)
        self.dataType = self.dataSet.dataType
        self.density = self.dataSet.density
        self.shape = self.dataSet.shape

        self.train = self.dataSet.train
        self.test = self.dataSet.test

        self.epochNum = args.epochNum
        self.batchSize = args.batchSize
        self.gruLayers = args.gruLayers
        self.regLayers = args.regLayers
        self.dropLayers = args.dropLayers
        self.lr = args.lr
        self.decay = args.decay
        self.optimizer = args.optimizer
        self.verbose = args.verbose

        self.store = args.store
        self.modelPath = args.modelPath
        self.imagePath = args.imagePath
        self.resultPath = args.resultPath

        self.model = self.load_model()

        self.run()

    def run(self):
        # Initialization
        x_test, y_test = self.dataSet.getTestInstance(self.test)
        print('Initializing...')
        mae, rmse = evaluate(self.model, x_test, y_test, self.batchSize)
        sys.stdout.write('\rInitializing done.MAE = %.4f|RMSE = %.4f.\n' %
                         (mae, rmse))
        best_mae, best_rmse, best_epoch = mae, rmse, -1
        metrics = ['MAE', 'RMSE']
        evalResults = np.zeros((self.epochNum, 2))
        # Training model
        print('=' * 14 + 'Start Training' + '=' * 22)
        for epoch in range(self.epochNum):
            sys.stdout.write('\rEpoch %d starts...' % epoch)
            start = time()
            x_train, y_train = self.dataSet.getTrainInstance(self.train)
            # Training
            history = self.model.fit(x_train,
                                     y_train,
                                     batch_size=self.batchSize,
                                     epochs=1,
                                     verbose=0,
                                     shuffle=True)
            # , callbacks=[TensorBoard(log_dir='./Log')])
            end = time()
            sys.stdout.write('\rEpoch %d ends.[%.1fs]' % (epoch, end - start))
            # Evaluation
            if epoch % self.verbose == 0:
                sys.stdout.write('\rEvaluating Epoch %d...' % epoch)
                mae, rmse = evaluate(self.model, x_test, y_test,
                                     self.batchSize)
                loss = history.history['loss'][0]
                sys.stdout.write('\rEvaluating completes.[%.1fs] ' %
                                 (time() - end))
                if mae < best_mae:
                    best_mae, best_rmse, best_epoch = mae, rmse, epoch
                evalResults[epoch, :] = [mae, rmse]
                saveResult('pgru', self.resultPath, self.dataType,
                           self.density, evalResults, metrics)
                sys.stdout.write(
                    '\rEpoch %d : MAE = %.4f|RMSE = %.4f|Loss = %.4f\n' %
                    (epoch, mae, rmse, loss))
        print('=' * 14 + 'Training Complete!' + '=' * 18)
        print('The best is at epoch %d : MAE = %.4f|RMSE = %.4f.' %
              (best_epoch, best_mae, best_rmse))
        if self.store:
            self.save_model(self.model)
            print('The model is stored in %s.' % self.modelPath)
            print('The result is stored in %s.' % self.resultPath)

    def load_model(self):

        _model = self.build_model(self.shape, self.gruLayers, self.regLayers,
                                  self.dropLayers)
        _model.compile(optimizer=self.optimizer(lr=self.lr, decay=self.decay),
                       loss=self.hybrid_loss)
        plot_model(_model,
                   to_file=self.imagePath + 'PGRU.jpg',
                   show_shapes=True)
        return _model

    @staticmethod
    def build_model(shape, gru_layers, reg_layers, drop_layers):

        # Embedding Layer
        user_input = Input(shape=(1, ), dtype='int32', name='user_input')

        item_input = Input(shape=(1, ), dtype='int32', name='item_input')

        time_input = Input(shape=(1, ), dtype='int32', name='time_input')

        user_embedding = Flatten()(Embedding(
            input_dim=shape[0],
            output_dim=gru_layers[0],
            embeddings_initializer=initializers.random_normal(),
            embeddings_regularizer=regularizers.l2(reg_layers[0]),
            input_length=1,
            name='gru_user_embedding')(user_input))
        item_embedding = Flatten()(Embedding(
            input_dim=shape[1],
            output_dim=gru_layers[0],
            embeddings_initializer=initializers.random_normal(),
            embeddings_regularizer=regularizers.l2(reg_layers[0]),
            input_length=1,
            name='gru_item_embedding')(item_input))
        time_embedding = Flatten()(Embedding(
            input_dim=shape[2],
            output_dim=gru_layers[1],
            embeddings_initializer=initializers.random_normal(),
            embeddings_regularizer=regularizers.l2(reg_layers[0]),
            input_length=1,
            name='gru_time_embedding')(time_input))

        user_embedding = Dropout(drop_layers[0])(user_embedding)
        item_embedding = Dropout(drop_layers[0])(item_embedding)
        time_embedding = Dropout(drop_layers[0])(time_embedding)

        gru_vector = Concatenate(axis=1)([user_embedding, item_embedding])
        gru_vector = Reshape(target_shape=(int(gru_layers[1]), -1))(gru_vector)

        for index in range(1, len(gru_layers) - 1):
            layers = GRU(units=gru_layers[index],
                         kernel_initializer=initializers.he_normal(),
                         kernel_regularizer=regularizers.l2(reg_layers[index]),
                         activation='tanh',
                         recurrent_activation='hard_sigmoid',
                         dropout=drop_layers[index],
                         return_sequences=(index != (len(gru_layers) - 2)),
                         name='gru_layer_%d' % index)
            gru_vector = layers([gru_vector, time_embedding])

        gru_vector = Dropout(drop_layers[-1])(gru_vector)

        prediction = Dense(units=gru_layers[-1],
                           activation='relu',
                           kernel_initializer=initializers.lecun_normal(),
                           kernel_regularizer=regularizers.l2(reg_layers[-1]),
                           name='gru_prediction')(gru_vector)
        _model = Model(inputs=[user_input, item_input, time_input],
                       outputs=prediction)
        return _model

    def hybrid_loss(self, y_true, y_pred, delta=0.5):
        l1 = K.abs(y_true - y_pred)
        l2 = K.square(y_true - y_pred)
        hybrid_loss = delta * l1 + (1 - delta) * l2
        return hybrid_loss

    def save_model(self, _model):
        _model.save_weights(self.modelPath + 'pgru_%s_%.2f_%s.h5' %
                            (self.dataType, self.density, self.gruLayers),
                            overwrite=True)