Example #1
0
def train_Keras(train_X, train_y, test_X, test_y, kwargs, cae_model_func=None, n_features=None, epochs=150):
    normalization = normalization_func()
    num_classes = train_y.shape[-1]

    norm_train_X = normalization.fit_transform(train_X)
    norm_test_X = normalization.transform(test_X)

    batch_size = max(2, len(train_X) // 50)
    class_weight = train_y.shape[0] / np.sum(train_y, axis=0)
    class_weight = num_classes * class_weight / class_weight.sum()
    sample_weight = None
    print('mu :', kwargs['mu'], ', batch_size :', batch_size)
    print('reps : ', reps, ', weights : ', class_weight)
    if num_classes == 2:
        sample_weight = np.zeros((len(norm_train_X),))
        sample_weight[train_y[:, 1] == 1] = class_weight[1]
        sample_weight[train_y[:, 1] == 0] = class_weight[0]
        class_weight = None

    model_clbks = [
        callbacks.LearningRateScheduler(scheduler()),
    ]


    if cae_model_func is not None:
        svc_model = LinearSVC(nfeatures=(n_features,), **kwargs)
        svc_model.create_keras_model(nclasses=num_classes)
        classifier = svc_model.model
        cae_model = cae_model_func(output_function=classifier, K=n_features)
        start_time = time.process_time()
        cae_model.fit(
            norm_train_X, train_y, norm_test_X, test_y, num_epochs=800, batch_size=batch_size,
            class_weight=class_weight
        )
        model = cae_model.model
        model.indices = cae_model.get_support(True)
        model.heatmap = cae_model.probabilities.max(axis=0)
        model.fs_time = time.process_time() - start_time
    else:
        svc_model = LinearSVC(norm_train_X.shape[1:], **kwargs)
        svc_model.create_keras_model(nclasses=num_classes)
        model = svc_model.model
        model.compile(
            loss=LinearSVC.loss_function(loss_function, class_weight),
            optimizer=optimizer_class(lr=initial_lr),
            metrics=[LinearSVC.accuracy]
        )
        model.fit(
            norm_train_X, train_y, batch_size=batch_size,
            epochs=epochs,
            callbacks=model_clbks,
            validation_data=(norm_test_X, test_y),
            class_weight=class_weight,
            sample_weight=sample_weight,
            verbose=verbose
        )

    model.normalization = normalization

    return model
Example #2
0
def train_Keras(train_X, train_y, test_X, test_y, kwargs, epochs=150):
    normalization = normalization_func()
    num_classes = train_y.shape[-1]

    norm_train_X = normalization.fit_transform(train_X)
    norm_test_X = normalization.transform(test_X)

    batch_size = max(2, len(train_X) // 50)
    class_weight = train_y.shape[0] / np.sum(train_y, axis=0)
    class_weight = num_classes * class_weight / class_weight.sum()
    sample_weight = None
    print('mu :', kwargs['mu'], ', batch_size :', batch_size)
    print('reps : ', reps, ', weights : ', class_weight)
    if num_classes == 2:
        sample_weight = np.zeros((len(norm_train_X), ))
        sample_weight[train_y[:, 1] == 1] = class_weight[1]
        sample_weight[train_y[:, 1] == 0] = class_weight[0]
        class_weight = None

    svc_model = LinearSVC(nfeatures=norm_train_X.shape[1:], **kwargs)
    svc_model.create_keras_model(nclasses=num_classes)

    model_clbks = [
        callbacks.LearningRateScheduler(scheduler()),
    ]

    fs_callbacks = []

    model = svc_model.model
    e2efs_layer = None

    optimizer = optimizer_class(e2efs_layer, learning_rate=initial_lr)

    model.compile(loss=LinearSVC.loss_function(loss_function, class_weight),
                  optimizer=optimizer,
                  metrics=[LinearSVC.accuracy])

    model.fit(norm_train_X,
              train_y,
              batch_size=batch_size,
              epochs=epochs,
              callbacks=model_clbks,
              validation_data=(norm_test_X, test_y),
              class_weight=class_weight,
              sample_weight=sample_weight,
              verbose=verbose)

    model.normalization = normalization

    return model
Example #3
0
    def fit(self, X, Y=None, val_X=None, val_Y=None, num_epochs=300, batch_size=None, start_temp=10.0,
            min_temp=0.1, tryout_limit=1, class_weight=None):
        if Y is None:
            Y = X
        assert len(X) == len(Y)
        validation_data = None
        if val_X is not None and val_Y is not None:
            assert len(val_X) == len(val_Y)
            validation_data = (val_X, val_Y)

        if batch_size is None:
            batch_size = max(len(X) // 256, 16)

        steps_per_epoch = (len(X) + batch_size - 1) // batch_size

        for i in range(tryout_limit):

            K.set_learning_phase(1)

            inputs = layers.Input(shape=X.shape[1:])

            alpha = np.exp(np.log(min_temp / start_temp) / (num_epochs * steps_per_epoch))

            self.concrete_select = ConcreteSelect(self.K, start_temp, min_temp, alpha, name='concrete_select')

            selected_features = self.concrete_select(inputs)

            outputs = self.output_function(selected_features)

            self.model = models.Model(inputs, outputs)

            self.model.compile(
                loss=LinearSVC.loss_function(loss_function, class_weight),
                optimizer=optimizer_class(lr=initial_lr),
                metrics=[LinearSVC.accuracy]
            )

            print(self.model.summary())

            stopper_callback = StopperCallback()

            hist = self.model.fit(X, Y, batch_size, num_epochs, verbose=0, callbacks=[stopper_callback],
                                  validation_data=validation_data)  # , validation_freq = 10)

            if K.get_value(
                    K.mean(K.max(K.softmax(self.concrete_select.logits, axis=-1)))) >= stopper_callback.mean_max_target:
                break

            num_epochs *= 2

        self.probabilities = K.get_value(K.softmax(self.model.get_layer('concrete_select').logits))
        self.indices = K.get_value(K.argmax(self.model.get_layer('concrete_select').logits))

        return self
Example #4
0
def train_Keras(train_X,
                train_y,
                test_X,
                test_y,
                kwargs,
                e2efs_class=None,
                n_features=None,
                epochs=150):
    normalization = normalization_func()
    num_classes = train_y.shape[-1]

    norm_train_X = normalization.fit_transform(train_X)
    norm_test_X = normalization.transform(test_X)

    batch_size = max(2, len(train_X) // 50)
    class_weight = train_y.shape[0] / np.sum(train_y, axis=0)
    class_weight = num_classes * class_weight / class_weight.sum()
    sample_weight = None
    print('mu :', kwargs['mu'], ', batch_size :', batch_size)
    print('reps : ', reps, ', weights : ', class_weight)
    if num_classes == 2:
        sample_weight = np.zeros((len(norm_train_X), ))
        sample_weight[train_y[:, 1] == 1] = class_weight[1]
        sample_weight[train_y[:, 1] == 0] = class_weight[0]
        class_weight = None

    svc_model = LinearSVC(nfeatures=norm_train_X.shape[1:], **kwargs)
    svc_model.create_keras_model(nclasses=num_classes)

    model_clbks = [
        callbacks.LearningRateScheduler(scheduler()),
    ]

    fs_callbacks = []

    if e2efs_class is not None:
        classifier = svc_model.model
        e2efs_layer = e2efs_class(n_features,
                                  input_shape=norm_train_X.shape[1:])
        model = e2efs_layer.add_to_model(classifier,
                                         input_shape=norm_train_X.shape[1:])
        fs_callbacks.append(
            clbks.E2EFSCallback(factor_func=None,
                                units_func=None,
                                verbose=verbose))
    else:
        model = svc_model.model
        e2efs_layer = None

    optimizer = optimizer_class(e2efs_layer, lr=initial_lr)

    model.compile(loss=LinearSVC.loss_function(loss_function, class_weight),
                  optimizer=optimizer,
                  metrics=[LinearSVC.accuracy])

    if e2efs_class is not None:
        model.fs_layer = e2efs_layer
        model.heatmap = e2efs_layer.moving_heatmap

        start_time = time.process_time()
        model.fit(norm_train_X,
                  train_y,
                  batch_size=batch_size,
                  epochs=200000,
                  callbacks=fs_callbacks,
                  validation_data=(norm_test_X, test_y),
                  class_weight=class_weight,
                  sample_weight=sample_weight,
                  verbose=verbose)
        model.fs_time = time.process_time() - start_time

    model.fit(norm_train_X,
              train_y,
              batch_size=batch_size,
              epochs=epochs,
              callbacks=model_clbks,
              validation_data=(norm_test_X, test_y),
              class_weight=class_weight,
              sample_weight=sample_weight,
              verbose=verbose)

    model.normalization = normalization

    return model
Example #5
0
def train_Keras(train_X,
                train_y,
                test_X,
                test_y,
                kwargs,
                l2x_model_func=None,
                n_features=None,
                epochs=150):
    normalization = normalization_func()
    num_classes = train_y.shape[-1]

    norm_train_X = normalization.fit_transform(train_X)
    norm_test_X = normalization.transform(test_X)

    batch_size = max(2, len(train_X) // 50)
    class_weight = train_y.shape[0] / np.sum(train_y, axis=0)
    class_weight = num_classes * class_weight / class_weight.sum()
    sample_weight = None
    print('mu :', kwargs['mu'], ', batch_size :', batch_size)
    print('reps : ', reps, ', weights : ', class_weight)
    if num_classes == 2:
        sample_weight = np.zeros((len(norm_train_X), ))
        sample_weight[train_y[:, 1] == 1] = class_weight[1]
        sample_weight[train_y[:, 1] == 0] = class_weight[0]
        class_weight = None

    svc_model = LinearSVC(nfeatures=norm_train_X.shape[1:], **kwargs)
    svc_model.create_keras_model(nclasses=num_classes)

    model_clbks = [
        callbacks.LearningRateScheduler(scheduler()),
    ]

    fs_callbacks = [
        callbacks.LearningRateScheduler(scheduler(extra_epochs=extra_epochs)),
    ]

    if l2x_model_func is not None:
        classifier = svc_model.model
        l2x_model = l2x_model_func(norm_train_X.shape[1:], n_features)
        classifier_input = layers.Multiply()(
            [l2x_model.output, l2x_model.input])
        output = classifier(classifier_input)
        model = models.Model(l2x_model.input, output)
    else:
        model = svc_model.model
        l2x_model = None

    optimizer = optimizer_class(lr=initial_lr)

    model.compile(loss=LinearSVC.loss_function(loss_function, class_weight),
                  optimizer=optimizer,
                  metrics=[LinearSVC.accuracy])

    if l2x_model is not None:
        model.l2x_model = l2x_model

        start_time = time.process_time()
        model.fit(norm_train_X,
                  train_y,
                  batch_size=batch_size,
                  epochs=epochs + extra_epochs,
                  callbacks=fs_callbacks,
                  validation_data=(norm_test_X, test_y),
                  class_weight=class_weight,
                  sample_weight=sample_weight,
                  verbose=0)
        # scores = l2x_model.predict(norm_train_X, verbose=0, batch_size=batch_size).reshape((-1, np.prod(norm_train_X.shape[1:])))
        # model.heatmap = compute_median_rank(scores, k=n_features)
        model.heatmap = l2x_model.predict(
            norm_train_X, verbose=0, batch_size=batch_size).reshape(
                (-1, np.prod(norm_train_X.shape[1:]))).sum(axis=0)
        model.fs_time = time.process_time() - start_time
    else:
        model.fit(norm_train_X,
                  train_y,
                  batch_size=batch_size,
                  epochs=epochs,
                  callbacks=model_clbks,
                  validation_data=(norm_test_X, test_y),
                  class_weight=class_weight,
                  sample_weight=sample_weight,
                  verbose=verbose)

    model.normalization = normalization

    return model