Beispiel #1
0
    def cross_validate(self, x_all, y_all, n_splits, trainer_args=None):
        """Do the n_splits cross-validation for the input."""
        if trainer_args is None:
            trainer_args = {}

        if constant.LIMIT_MEMORY:
            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            sess = tf.Session(config=config)
            init = tf.global_variables_initializer()
            sess.run(init)
            backend.set_session(sess)

        k_fold = StratifiedKFold(n_splits=n_splits, shuffle=False, random_state=7)
        ret = []
        y_raw_all = y_all
        y_all = self.y_encoder.transform(y_all)
        model = self.load_searcher().load_best_model()
        for train, test in k_fold.split(x_all, y_raw_all):
            graph = Graph(model, False)
            backend.clear_session()
            model = graph.produce_model()
            ModelTrainer(model,
                         x_all[train],
                         y_all[train],
                         x_all[test],
                         y_all[test], False).train_model(**trainer_args)
            scores = model.evaluate(x_all[test], y_all[test], verbose=self.verbose)
            if self.verbose:
                print('Score:', scores[1])
            ret.append(scores[1] * 100)
        return np.array(ret)
Beispiel #2
0
    def add_model(self, model, x_train, y_train, x_test, y_test):
        """add one model while will be trained to history list

        Returns:
            History object.
        """
        model.compile(loss=categorical_crossentropy,
                      optimizer=Adadelta(),
                      metrics=['accuracy'])
        if self.verbose:
            model.summary()
        ModelTrainer(model, x_train, y_train, x_test, y_test,
                     self.verbose).train_model()
        loss, accuracy = model.evaluate(x_test, y_test, verbose=self.verbose)
        model.save(os.path.join(self.path, str(self.model_count) + '.h5'))

        ret = {
            'model_id': self.model_count,
            'loss': loss,
            'accuracy': accuracy
        }
        self.history.append(ret)
        self.history_configs.append(extract_config(model))
        self.model_count += 1

        return ret
Beispiel #3
0
def train(args):
    graph, x_train, y_train, x_test, y_test, trainer_args, path = args
    model = graph.produce_model()
    # if path is not None:
    #     plot_model(model, to_file=path, show_shapes=True)
    loss, accuracy = ModelTrainer(model, x_train, y_train, x_test, y_test,
                                  False).train_model(**trainer_args)
    return accuracy, loss, Graph(model, True)
Beispiel #4
0
def train(args):
    graph, train_data, test_data, trainer_args, path, verbose = args
    model = graph.produce_model()
    # if path is not None:
    #     plot_model(model, to_file=path, show_shapes=True)
    loss, accuracy = ModelTrainer(model, train_data, test_data,
                                  verbose).train_model(**trainer_args)
    model.set_weight_to_graph()
    return accuracy, loss, model.graph
Beispiel #5
0
def train(args):
    graph, train_data, test_data, trainer_args, path, metric, verbose = args
    model = graph.produce_model()
    # if path is not None:
    #     plot_model(model, to_file=path, show_shapes=True)
    loss, metric_value = ModelTrainer(model, train_data, test_data, metric,
                                      classification_loss,
                                      verbose).train_model(**trainer_args)
    model.set_weight_to_graph()
    return metric_value, loss, model.graph
Beispiel #6
0
 def final_fit(self, x_train, y_train, x_test, y_test, trainer_args=None, retrain=False):
     if trainer_args is None:
         trainer_args = {}
     y_train = self.y_encoder.transform(y_train)
     y_test = self.y_encoder.transform(y_test)
     searcher = self.load_searcher()
     model = searcher.load_best_model()
     if retrain:
         model = Graph(model, False).produce_model()
     ModelTrainer(model, x_train, y_train, x_test, y_test, True).train_model(**trainer_args)
     searcher.replace_model(model, searcher.get_best_model_id())
Beispiel #7
0
 def cross_validate(self, x_all, y_all, n_splits):
     """Do the n_splits cross-validation for the input."""
     k_fold = StratifiedKFold(n_splits=n_splits,
                              shuffle=False,
                              random_state=7)
     scores = []
     y_raw_all = y_all
     y_all = self.y_encoder.transform(y_all)
     for train, test in k_fold.split(x_all, y_raw_all):
         model = self.searcher.load_best_model()
         reset_weights(model)
         ModelTrainer(model, x_all[train], y_all[train], x_all[test],
                      y_all[test], self.verbose).train_model()
         scores = model.evaluate(x_all[test],
                                 y_all[test],
                                 verbose=self.verbose)
         scores.append(scores[1] * 100)
     return np.array(scores)
Beispiel #8
0
    def add_model(self, model, x_train, y_train, x_test, y_test):
        """add one model while will be trained to history list

        Returns:
            History object.
        """
        loss, accuracy = ModelTrainer(model, x_train, y_train, x_test, y_test,
                                      False).train_model(**self.trainer_args)

        accuracy += 0.005 * len(
            Graph(model, False).extract_descriptor().skip_connections)
        accuracy = min(accuracy, 1)

        model.save(os.path.join(self.path, str(self.model_count) + '.h5'))
        plot_model(model,
                   to_file=os.path.join(self.path,
                                        str(self.model_count) + '.png'),
                   show_shapes=True)

        model_id = self.model_count
        ret = {'model_id': model_id, 'loss': loss, 'accuracy': accuracy}
        self.history.append(ret)
        self.history_configs.append(extract_config(model))
        self.model_count += 1
        self.descriptors[Graph(model, False).extract_descriptor()] = True

        # Update best_model text file
        if model_id == self.get_best_model_id():
            file = open(os.path.join(self.path, 'best_model.txt'), 'w')
            file.write('best model: ' + str(model_id))
            file.close()

        if self.verbose:
            print('Model ID:', model_id)
            print('Loss:', loss)
            print('Accuracy', accuracy)
        return ret
Beispiel #9
0
def run():
    # Training parameters
    batch_size = 32  # orig paper trained all networks with batch_size=128
    epochs = 200
    data_augmentation = True
    num_classes = 10

    # Subtracting pixel mean improves accuracy
    subtract_pixel_mean = True

    # Model parameter
    # ----------------------------------------------------------------------------
    #           |      | 200-epoch | Orig Paper| 200-epoch | Orig Paper| sec/epoch
    # Model     |  n   | ResNet v1 | ResNet v1 | ResNet v2 | ResNet v2 | GTX1080Ti
    #           |v1(v2)| %Accuracy | %Accuracy | %Accuracy | %Accuracy | v1 (v2)
    # ----------------------------------------------------------------------------
    # ResNet20  | 3 (2)| 92.16     | 91.25     | -----     | -----     | 35 (---)
    # ResNet32  | 5(NA)| 92.46     | 92.49     | NA        | NA        | 50 ( NA)
    # ResNet44  | 7(NA)| 92.50     | 92.83     | NA        | NA        | 70 ( NA)
    # ResNet56  | 9 (6)| 92.71     | 93.03     | 93.01     | NA        | 90 (100)
    # ResNet110 |18(12)| 92.65     | 93.39+-.16| 93.15     | 93.63     | 165(180)
    # ResNet164 |27(18)| -----     | 94.07     | -----     | 94.54     | ---(---)
    # ResNet1001| (111)| -----     | 92.39     | -----     | 95.08+-.14| ---(---)
    # ---------------------------------------------------------------------------
    n = 3

    # Model version
    # Orig paper: version = 1 (ResNet v1), Improved ResNet: version = 2 (ResNet v2)
    version = 1

    # Computed depth from supplied model parameter n
    if version == 1:
        depth = n * 6 + 2
    elif version == 2:
        depth = n * 9 + 2

    # Model name, depth and version
    model_type = 'ResNet%dv%d' % (depth, version)

    # Load the CIFAR10 data.
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()

    # Input image dimensions.
    input_shape = x_train.shape[1:]

    # Normalize data.
    x_train = x_train.astype('float32') / 255
    x_test = x_test.astype('float32') / 255

    # If subtract pixel mean is enabled
    if subtract_pixel_mean:
        x_train_mean = np.mean(x_train, axis=0)
        x_train -= x_train_mean
        x_test -= x_train_mean

    print('x_train shape:', x_train.shape)
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')
    print('y_train shape:', y_train.shape)

    # Convert class vectors to binary class matrices.
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    if version == 2:
        model = resnet_v2(input_shape=input_shape, depth=depth)
    else:
        model = resnet_v1(input_shape=input_shape, depth=depth)

    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=lr_schedule(0)),
                  metrics=['accuracy'])
    model.summary()
    print(model_type)

    # Prepare model model saving directory.
    save_dir = os.path.join(os.getcwd(), 'saved_models')
    model_name = 'cifar10_%s_model.{epoch:03d}.h5' % model_type
    if not os.path.isdir(save_dir):
        os.makedirs(save_dir)
    filepath = os.path.join(save_dir, model_name)

    # Prepare callbacks for model saving and for learning rate adjustment.
    checkpoint = ModelCheckpoint(filepath=filepath,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True)

    lr_scheduler = LearningRateScheduler(lr_schedule)

    lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                                   cooldown=0,
                                   patience=5,
                                   min_lr=0.5e-6)

    callbacks = [checkpoint, lr_reducer, lr_scheduler]

    # # Run training, with or without data augmentation.
    # if not data_augmentation:
    #     print('Not using data augmentation.')
    #     model.fit(x_train, y_train,
    #               batch_size=batch_size,
    #               epochs=epochs,
    #               validation_data=(x_test, y_test),
    #               shuffle=True,
    #               callbacks=callbacks)
    # else:
    #     print('Using real-time data augmentation.')
    #     # This will do preprocessing and realtime data augmentation:
    #     datagen = ImageDataGenerator(
    #         # set input mean to 0 over the dataset
    #         featurewise_center=False,
    #         # set each sample mean to 0
    #         samplewise_center=False,
    #         # divide inputs by std of dataset
    #         featurewise_std_normalization=False,
    #         # divide each input by its std
    #         samplewise_std_normalization=False,
    #         # apply ZCA whitening
    #         zca_whitening=False,
    #         # randomly rotate images in the range (deg 0 to 180)
    #         rotation_range=0,
    #         # randomly shift images horizontally
    #         width_shift_range=0.1,
    #         # randomly shift images vertically
    #         height_shift_range=0.1,
    #         # randomly flip images
    #         horizontal_flip=True,
    #         # randomly flip images
    #         vertical_flip=False)
    #
    #     # Compute quantities required for featurewise normalization
    #     # (std, mean, and principal components if ZCA whitening is applied).
    #     datagen.fit(x_train)
    #
    #     # Fit the model on the batches generated by datagen.flow().
    #     model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
    #                         validation_data=(x_test, y_test),
    #                         epochs=epochs, verbose=1, workers=4,
    #                         callbacks=callbacks)
    ModelTrainer(model, x_train, y_train, x_test, y_test, False).train_model()

    # Score trained model.
    scores = model.evaluate(x_test, y_test, verbose=1)
    print('Test loss:', scores[0])
    print('Test accuracy:', scores[1])
Beispiel #10
0
from keras.datasets import cifar10

from autokeras.generator import DefaultClassifierGenerator
from autokeras.net_transformer import default_transform
from autokeras.preprocessor import OneHotEncoder
from autokeras.utils import ModelTrainer

if __name__ == '__main__':
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()

    print('Start Encoding')
    encoder = OneHotEncoder()
    encoder.fit(y_train)

    y_train = encoder.transform(y_train)
    y_test = encoder.transform(y_test)

    print('Start Generating')
    graphs = default_transform(
        DefaultClassifierGenerator(10, x_train.shape[1:]).generate())
    keras_model = graphs[0].produce_model()

    print('Start Training')
    ModelTrainer(keras_model, x_train, y_train, x_test, y_test,
                 True).train_model(max_no_improvement_num=100, batch_size=128)
    print(keras_model.evaluate(x_test, y_test, True))
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(
    Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adadelta(),
              metrics=['accuracy'])

ModelTrainer(model, x_train, y_train, x_test, y_test, True).train_model()
# model.fit(x_train, y_train,
#           batch_size=batch_size,
#           epochs=epochs,
#           verbose=1,
#           validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
Beispiel #12
0
def test_model_trainer():
    model = DefaultClassifierGenerator(3,
                                       (28, 28, 3)).generate().produce_model()
    train_data, test_data = get_processed_data()
    ModelTrainer(model, train_data, test_data, Accuracy,
                 False).train_model(max_iter_num=3)
Beispiel #13
0
from autokeras.preprocessor import OneHotEncoder, DataTransformer
from autokeras.utils import ModelTrainer


if __name__ == '__main__':
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()

    print('Start Encoding')
    encoder = OneHotEncoder()
    encoder.fit(y_train)

    y_train = encoder.transform(y_train)
    y_test = encoder.transform(y_test)

    data_transformer = DataTransformer(x_train, augment=True)

    train_data = data_transformer.transform_train(x_train, y_train)
    test_data = data_transformer.transform_test(x_test, y_test)

    print('Start Generating')
    graphs = [DefaultClassifierGenerator(10, x_train.shape[1:]).generate()]
    keras_model = graphs[0].produce_model()


    print('Start Training')
    loss, acc = ModelTrainer(keras_model,
                             train_data,
                             test_data,
                             True).train_model(max_no_improvement_num=100, batch_size=128)
    print(loss, acc)
Beispiel #14
0
def test_model_trainer_classification():
    model = CnnGenerator(3, (28, 28, 3)).generate().produce_model()
    train_data, test_data = get_classification_dataloaders()
    ModelTrainer(model, train_data, test_data, Accuracy, classification_loss, False).train_model(max_iter_num=3)
Beispiel #15
0
def test_model_trainer_regression():
    model = CnnGenerator(1, (28, 28, 3)).generate().produce_model()
    train_data, test_data = get_regression_dataloaders()
    ModelTrainer(model, train_data, test_data, MSE, regression_loss, False).train_model(max_iter_num=3)