Ejemplo n.º 1
0
    def fit(self, train_x, train_y, validation_data_fit, epochs, **kwargs):
        val_x, val_y = validation_data_fit

        patience = 2
        callbacks = [
            keras.callbacks.EarlyStopping(monitor='val_loss',
                                          patience=patience)
        ]
        if self._is_multilabel:
            train_y = train_y
            val_y = val_y
        else:
            train_y = ohe2cat(train_y)
            val_y = ohe2cat(val_y)

        self._model.fit(
            train_x,
            train_y,
            epochs=epochs,
            callbacks=callbacks,
            # validation_data=(val_x, val_y),
            verbose=1,  # Logs once per epoch.
            batch_size=32,
            shuffle=True,
            # use_multiprocessing=True
        )
Ejemplo n.º 2
0
    def fit(self, train_x, train_y, validation_data_fit, params, epochs,
            **kwargs):
        patience = 2
        callbacks = [
            keras.callbacks.EarlyStopping(monitor='val_loss',
                                          patience=patience)
        ]
        val_x, val_y = validation_data_fit
        if self._class_num == 2:
            train_y = ohe2cat(train_y)
            val_y = ohe2cat(val_y)
        batch_size = params["batch_size"]
        steps_per_epoch = int(len(train_x) // batch_size)
        train_data_generator = ModelSequenceDataGenerator(
            train_x, train_y, **params)

        history = self._model.fit_generator(train_data_generator,
                                            steps_per_epoch=steps_per_epoch,
                                            validation_data=(val_x, val_y),
                                            epochs=epochs,
                                            max_queue_size=10,
                                            callbacks=callbacks,
                                            use_multiprocessing=False,
                                            workers=1,
                                            verbose=VERBOSE)
        return history
Ejemplo n.º 3
0
    def fit(self,
            train_x,
            train_y,
            validation_data_fit,
            epochs,
            batch_size=32,
            **kwargs):
        val_x, val_y = validation_data_fit
        callbacks = []

        if epochs > 1:
            early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
                                                       patience=3)
            callbacks.append(early_stop)
            self._model.fit(
                train_x,
                ohe2cat(train_y),
                epochs=epochs,
                callbacks=callbacks,
                validation_data=(val_x, ohe2cat(val_y)),
                verbose=1,  # Logs once per epoch.
                batch_size=batch_size,
                shuffle=True)
        else:
            self._model.fit(
                train_x,
                ohe2cat(train_y),
                epochs=1,
                callbacks=callbacks,
                verbose=1,  # Logs once per epoch.
                batch_size=batch_size,
                shuffle=True)
Ejemplo n.º 4
0
    def fit(self, train_x, train_y, validation_data_fit, epochs, **kwargs):
        val_x, val_y = validation_data_fit
        callbacks = [keras.callbacks.EarlyStopping(
            monitor='val_loss', patience=3)]

        self._model.fit(train_x, ohe2cat(train_y),
                        epochs=epochs,
                        callbacks=callbacks,
                        validation_data=(val_x, ohe2cat(val_y)),
                        verbose=1,  # Logs once per epoch.
                        batch_size=32,
                        shuffle=True)
Ejemplo n.º 5
0
    def fit(self,
            train_x,
            train_y,
            validation_data_fit,
            epochs,
            cur_model_run_loop,
            batch_size=32,
            **kwargs):
        val_x, val_y = validation_data_fit
        callbacks = []

        if self._use_step_decay:
            lrate = LearningRateScheduler(self.step_decay)
            callbacks.append(lrate)
        if self._is_multilabel:
            train_y = train_y
            val_y = val_y
        else:
            train_y = ohe2cat(train_y)
            val_y = ohe2cat(val_y)

        if epochs > 1:
            early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
                                                       patience=3)
            callbacks.append(early_stop)
            self._model.fit(
                train_x,
                train_y,
                epochs=epochs,
                callbacks=callbacks,
                validation_data=(val_x, val_y),
                verbose=1,  # Logs once per epoch.
                batch_size=batch_size,
                shuffle=True)
        else:
            self._model.fit(
                train_x,
                train_y,
                epochs=1,
                callbacks=callbacks,
                verbose=1,  # Logs once per epoch.
                batch_size=batch_size,
                shuffle=True)
Ejemplo n.º 6
0
    def fit(self,
            train_x,
            train_y,
            validation_data_fit,
            epochs,
            cur_model_run_loop,
            batch_size=64,
            **kwargs):
        index = [i for i in range(len(train_y))]
        np.random.shuffle(index)
        X = train_x
        train_y = ohe2cat(train_y)
        Y_b = np.eye(self.num_classes)[train_y]
        X = X[index]
        Y_b = Y_b[index]

        if batch_size >= len(X):
            batch_size = int(len(X) / 2)

        print("TF model---------- batch_size:%d, X length: %d" %
              (batch_size, len(X)))
        train_step = tf.train.AdamOptimizer(0.0025).minimize(self.losses)
        init_global = tf.global_variables_initializer()
        init_local = tf.local_variables_initializer()
        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run([init_global, init_local])
            rounds = min(int(len(X) / batch_size), self.rr)
            print("TF model ---------- Rounds:%d" % rounds)

            for i in range(rounds):
                start = i * batch_size
                end = (i + 1) * batch_size
                _ = sess.run(train_step,
                             feed_dict={
                                 self.input_x: X[start:end],
                                 self.input_y: Y_b[start:end]
                             })

            self.rr += self.add_rr
            saver.save(sess, './ft.ckpt')

        return 0
Ejemplo n.º 7
0
 def fit(self, x_train, y_train, *args, **kwargs):
     # sscaler = StandardScaler()
     # x_train = sscaler.fit_transform(x_train[:, :])
     self._existed_classes = set(ohe2cat(y_train))
     print("=== svm class {}".format(len(self._existed_classes)))
     self._model.fit(x_train, ohe2cat(y_train))
Ejemplo n.º 8
0
 def fit(self, x_train, y_train, *args, **kwargs):
     print("=== lr fit {}".format(y_train.shape))
     self._existed_classes = set(ohe2cat(y_train))
     print("=== lr class {}".format(len(self._existed_classes)))
     self._model.fit(x_train, ohe2cat(y_train))
 def fit(self, x_train, y_train, *args, **kwargs):
     if not self._is_multilabel:
         self._model.fit(x_train, ohe2cat(y_train))
     else:
         self._model.fit(x_train, y_train)