Exemplo n.º 1
0
def train(data,
          nb_classes=10,
          save_path='cnn_mnist.h5',
          eval=True,
          conf=train_conf):
    assert data is not None
    assert data['train'] is not None and len(data['train']) == 2

    # Get training data
    X_train, y_train = data['train']

    nb_examples, img_rows, img_cols, nb_channels = X_train.shape
    nb_trains = int(nb_examples * (1. - conf['val_rate']))
    train_examples = X_train[:nb_trains]
    train_labels = y_train[:nb_trains]
    val_examples = X_train[nb_trains:]
    val_labels = y_train[nb_trains:]

    cnn = __get_cnn(input_shape=(img_rows, img_cols, nb_channels),
                    nb_classes=nb_classes)
    cnn.compile(optimizer=conf['cnn_params']['optimizer'],
                loss=conf['cnn_params']['loss'],
                metrics=['accuracy'])

    # train the model
    print('Training [{}] CNN, it may take a while...'.format(data['trans']))
    start = time.monotonic()
    history = cnn.fit(train_examples,
                      train_labels,
                      batch_size=conf['batch_size'],
                      epochs=conf['epoch'],
                      verbose=2,
                      validation_data=(val_examples, val_labels))
    train_cost = time.monotonic() - start
    logger.info('Done training. It costs {} minutes.'.format(train_cost / 60.))

    if eval:
        scores_train = cnn.evaluate(train_examples,
                                    train_labels,
                                    batch_size=128,
                                    verbose=0)
        scores_val = cnn.evaluate(val_examples,
                                  val_labels,
                                  batch_size=128,
                                  verbose=0)
        logger.info('Evaluation on [{} set]: {}.'.format(
            'training', scores_train))
        logger.info('Evaluation on [{} set]: {}.'.format(
            'validation', scores_val))

        if data['test'] is not None:
            X_test, y_test = data['test']
            scores_test = cnn.evaluate(X_test,
                                       y_test,
                                       batch_size=128,
                                       verbose=0)
            logger.info('Evaluation on [{} set]: {}'.format(
                'testing', scores_test))

    # logger.info('Save the trained model to [{}].'.format(save_path))
    # cnn.save(save_path)

    checkpoints_file = save_path.split('/')[-1].split('.')[0]
    checkpoints_file = 'checkpoints_train-' + checkpoints_file + '.csv'
    checkpoints_file = os.path.join(LOG_DIR, checkpoints_file)

    if not os.path.dirname(LOG_DIR):
        os.mkdir(LOG_DIR)

    logger.info('Training checkpoints have been saved to file [{}].'.format(
        checkpoints_file))
    file.dict2csv(history.history, checkpoints_file)
    save_path = save_path.split('/')[-1].split('.')[0]
    save_path = 'hist_train-' + save_path + '.pdf'
    plot_training_history(history, save_path)

    return cnn
Exemplo n.º 2
0
def train_and_save(model_name, X, Y, validation_rate=0.2, need_augment=False):
    """
    Train a model over given training set, then
    save the trained model as given name.
    :param model_name: the name to save the model as
    :param X: training examples.
    :param Y: corresponding desired labels.
    :param need_augment: a flag whether to perform data augmentation before training.
    """
    warnings.warn(
        'This method is deprecated, it will be removed soon. '
        'Please use functions train() or train_model() to train a model'
        'then save_model() to save the model.', DeprecationWarning)

    prefix, dataset, architect, trans_type = model_name.split('-')
    nb_examples, img_rows, img_cols, nb_channels = X.shape

    nb_validation = int(nb_examples * validation_rate)
    nb_training = nb_examples - nb_validation
    train_samples = X[:nb_training]
    train_labels = Y[:nb_training]
    val_sample = X[nb_training:]
    val_labels = Y[nb_training:]
    input_shape = (img_rows, img_cols, nb_channels)
    nb_classes = int(Y.shape[1])

    print('input_shape: {}; nb_classes: {}'.format(input_shape, nb_classes))
    print('{} training sample; {} validation samples.'.format(
        nb_training, nb_validation))

    # get corresponding model
    model = create_model(dataset,
                         input_shape=input_shape,
                         nb_classes=nb_classes)
    history = []
    scores = []
    if (need_augment):
        # normalize samples
        train_samples = data.normalize(train_samples)
        val_sample = data.normalize(val_sample)
        # data augmentation
        datagen = ImageDataGenerator(
            rotation_range=15,
            width_shift_range=0.1,
            height_shift_range=0.1,
            horizontal_flip=True,
        )
        datagen.fit(train_samples)
        # define a optimizer
        opt_rms = optimizers.RMSprop(lr=0.001, decay=1e-6)
        model.compile(loss='categorical_crossentropy',
                      optimizer=opt_rms,
                      metrics=['accuracy'])
        # perform training
        with tf.device('/device:GPU:0'):  # to run in google colab
            print("Found GPU:0")
            # train
            history = model.fit_generator(
                datagen.flow(train_samples,
                             train_labels,
                             batch_size=MODEL.BATCH_SIZE),
                steps_per_epoch=nb_training // MODEL.BATCH_SIZE,
                epochs=MODEL.EPOCHS,
                verbose=2,
                validation_data=(val_sample, val_labels),
                callbacks=[LearningRateScheduler(lr_schedule)])
            # test, this will be run with GPU
            # verbose: integer. 0 = silent; 1 = progress bar; 2 = one line per epoch
            # train the model silently
            scores = model.evaluate(val_sample,
                                    val_labels,
                                    batch_size=128,
                                    verbose=0)
    else:
        # compile model
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        with tf.device('/device:GPU:0'):
            # train
            history = model.fit(train_samples,
                                train_labels,
                                epochs=MODEL.EPOCHS,
                                batch_size=MODEL.BATCH_SIZE,
                                shuffle=True,
                                verbose=1,
                                validation_data=(val_sample, val_labels))
            # test
            # verbose: integer. 0 = silent; 1 = progress bar; 2 = one line per epoch
            # train the model silently
            scores = model.evaluate(val_sample,
                                    val_labels,
                                    batch_size=128,
                                    verbose=0)

    # save the trained model
    model.save('{}/{}.h5'.format(PATH.MODEL, model_name))
    keras.models.save_model(model, '{}/{}_2.h5'.format(PATH.MODEL, model_name))
    # report
    print('Trained model has been saved to data/{}'.format(model_name))
    print('Test accuracy: {:.4f}; loss: {:.4f}'.format(scores[1], scores[0]))
    file_name = 'CheckPoint-{}-{}-{}.csv'.format(dataset, architect,
                                                 trans_type)
    file.dict2csv(history.history, '{}/{}'.format(PATH.RESULTS, file_name))
    plotTrainingResult(history, model_name)
    # delete the model after it's been saved.
    del model
Exemplo n.º 3
0
def train(trainset, testset, processor=None, **kwargs):
    # get network
    model = cnn()

    X_train, Y_train = trainset
    X_test, Y_test = testset

    # apply transformation
    X_train = transform(X_train, processor)

    learning_rate = 0.001
    validation_rate = 0.2

    optimizer = kwargs.get('optimizer',
                           keras.optimizers.Adam(lr=learning_rate))
    loss_func = kwargs.get('loss', keras.losses.categorical_crossentropy)
    metrics = kwargs.get('metrics', 'default')

    print('Training weak defense [{}]...'.format(processor.description))
    print('>>> optimizer: {}'.format(optimizer))
    print('>>> loss function: {}'.format(loss_func))
    print('>>> metrics: {}'.format(metrics))

    nb_examples, img_rows, img_cols, nb_channels = X_train.shape

    nb_training = int(nb_examples * (1. - validation_rate))
    train_examples = X_train[:nb_training]
    train_labels = Y_train[:nb_training]
    val_examples = X_train[nb_training:]
    val_labels = Y_train[nb_training:]
    """
	compile data
	"""
    if ('default' == metrics):
        model.compile(optimizer=optimizer,
                      loss=loss_func,
                      metrics=['accuracy'])
    else:
        model.compile(optimizer=optimizer,
                      loss=loss_func,
                      metrics=['accuracy', metrics])

    # train the model
    history = model.fit(train_examples,
                        train_labels,
                        batch_size=64,
                        epochs=20,
                        verbose=2,
                        validation_data=(val_examples, val_labels))

    model_dir = kwargs.get('model_dir', 'models')
    save_file = kwargs.get('model_name', 'demo')
    postfix = kwargs.get('model_format', '.h5')
    model.save(os.path.join(model_dir, save_file + postfix))
    print('Saved the model to file [{}]'.format(
        os.path.join(model_dir, save_file + postfix)))

    # evaluate the model
    scores_train = model.evaluate(train_examples,
                                  train_labels,
                                  batch_size=128,
                                  verbose=0)
    scores_val = model.evaluate(val_examples,
                                val_labels,
                                batch_size=128,
                                verbose=0)

    X_test = transform(X_test, processor)
    scores_test = model.evaluate(X_test, Y_test, batch_size=128, verbose=0)
    """
	report
	"""
    print('\t\t\t loss, \t acc (BS/AE)')
    print('training set: {}'.format(scores_train))
    print('validation set: {}'.format(scores_val))
    print('test set: {}'.format(scores_test))
    print('')

    log_dir = kwargs.get('checkpoint_folder', 'checkpoints')
    save_file = save_file.replace(postfix, '')
    file.dict2csv(history.history,
                  '{}/checkpoint-{}.csv'.format(log_dir, save_file))
Exemplo n.º 4
0
def train(model, X, Y, model_name, need_augment=False, is_BB=False, **kwargs):
    """
    Train a model on given dataset.
    :param model: the model to train
    :param dataset: the name of the dataset
    :param need_augment: a flag - whether we need to augment the data before training the model
    :param kwargs: for optimizer, loss function, and metrics
    :return: the trained model
    """
    print('INFO: model name: {}'.format(model_name))
    learning_rate = 0.001
    validation_rate = 0.2

    prefix, dataset, architect, trans_type = model_name.split('-')

    optimizer = kwargs.get('optimizer',
                           keras.optimizers.Adam(lr=learning_rate))
    loss_func = kwargs.get('loss', keras.losses.categorical_crossentropy)
    metrics = kwargs.get('metrics', 'default')

    print('INFO: compiler')
    print('>>> optimizer: {}'.format(optimizer))
    print('>>> loss function: {}'.format(loss_func))
    print('>>> metrics: {}'.format(metrics))

    nb_examples, img_rows, img_cols, nb_channels = X.shape

    if (DATA.cifar_10 == dataset):
        """
        mean-std normalization
        """
        X = data.normalize(X)

    if not is_BB:
        nb_training = int(nb_examples * (1. - validation_rate))
        train_examples = X[:nb_training]
        train_labels = Y[:nb_training]
        val_examples = X[nb_training:]
        val_labels = Y[nb_training:]

    else:
        num_val = 9000
        train_examples = X[num_val:]
        train_labels = Y[num_val:]
        val_examples = X[:num_val]
        val_labels = Y[:num_val]
    """
    augment data
    """
    datagen = None
    if (DATA.cifar_10 == dataset):
        # normalize data (has been handled when loading the data)
        # data augmentation
        datagen = ImageDataGenerator(
            rotation_range=15,
            width_shift_range=0.1,
            height_shift_range=0.1,
            horizontal_flip=True,
        )
        datagen.fit(train_examples)
    """
    compile data
    """
    if ('default' == metrics):
        model.compile(optimizer=optimizer,
                      loss=loss_func,
                      metrics=['accuracy'])
    else:
        model.compile(optimizer=optimizer,
                      loss=loss_func,
                      metrics=['accuracy', metrics])
    """
    train the model
    """
    if (DATA.cifar_10 == dataset):
        history = model.fit_generator(
            datagen.flow(train_examples,
                         train_labels,
                         batch_size=MODEL.BATCH_SIZE),
            steps_per_epoch=nb_training // MODEL.BATCH_SIZE,
            epochs=MODEL.EPOCHS,
            verbose=2,
            validation_data=(val_examples, val_labels),
            callbacks=[LearningRateScheduler(lr_schedule)])

    else:  # do not need to augment data
        history = model.fit(train_examples,
                            train_labels,
                            batch_size=MODEL.BATCH_SIZE,
                            epochs=MODEL.EPOCHS,
                            verbose=2,
                            validation_data=(val_examples, val_labels))
    """
    evaluate the model
    """
    scores_train = model.evaluate(train_examples,
                                  train_labels,
                                  batch_size=128,
                                  verbose=0)
    scores_val = model.evaluate(val_examples,
                                val_labels,
                                batch_size=128,
                                verbose=0)
    """
    report
    """
    print('\t\t\t loss, \tacc, \tadv_acc')
    print('Evaluation score on training set: {}'.format(scores_train))
    print('Evaluation score on validation set: {}'.format(scores_val))
    file_name = 'checkpoints-{}-{}-{}-{}.csv'.format(prefix, dataset,
                                                     architect, trans_type)
    file.dict2csv(history.history, '{}/{}'.format(PATH.RESULTS, file_name))
    plotTrainingResult(history, model_name)

    return model
Exemplo n.º 5
0
def train(dataset,
          model=None,
          trans_type=TRANSFORMATION.clean,
          save_path='cnn_mnist.h5',
          eval=True,
          **kwargs):
    """
    Train a cnn model on MNIST or Fashion-MNIST.
    :param dataset:
    :param model: a model to train.
    :param trans_type: transformation associated to the model.
    :param save_path: file name, including the path, to save the trained model.
    :param kwargs: customized loss function, optimizer, etc. for cleverhans to craft AEs.
    :return: the trained model
    """
    lr = 0.001
    validation_rate = 0.2
    optimizer = kwargs.get('optimizer', keras.optimizers.Adam(lr=lr))
    loss_fn = kwargs.get('loss', keras.losses.categorical_crossentropy)
    metrics = kwargs.get('metrics', 'default')

    logger.info('optimizer: [{}].'.format(optimizer))
    logger.info('loss function: [{}].'.format(loss_fn))
    logger.info('metrics: [{}].'.format(metrics))

    (X_train, Y_train), (X_test, Y_test) = data.load_data(dataset)
    X_train = data_utils.set_channels_last(X_train)
    X_test = data_utils.set_channels_last(X_test)

    # Apply transformation (associated to the weak defending model)
    X_train = data_utils.rescale(transform(X_train, trans_type))
    X_test = data_utils.rescale(transform(X_test, trans_type))

    nb_examples, img_rows, img_cols, nb_channels = X_train.shape
    nb_train_samples = int(nb_examples * (1. - validation_rate))
    train_examples = X_train[:nb_train_samples]
    train_labels = Y_train[:nb_train_samples]
    val_examples = X_train[nb_train_samples:]
    val_labels = Y_train[nb_train_samples:]

    if model is None:
        model = create_model(input_shape=(img_rows, img_cols, nb_channels))

    # Compile model
    if ('default' == metrics):
        model.compile(optimizer=optimizer, loss=loss_fn, metrics=['accuracy'])
    else:
        model.compile(optimizer=optimizer,
                      loss=loss_fn,
                      metrics=['accuracy', metrics])

    # Train model
    batch_size = kwargs.get('batch_size', 128)
    epochs = kwargs.get('epochs', 20)

    start = time.monotonic()
    history = model.fit(train_examples,
                        train_labels,
                        batch_size=batch_size,
                        epochs=epochs,
                        verbose=2,
                        validation_data=(val_examples, val_labels))
    cost = time.monotonic() - start
    logger.info('Done training. It costs {} minutes.'.format(cost / 60.))

    if eval:
        scores_train = model.evaluate(train_examples,
                                      train_labels,
                                      batch_size=128,
                                      verbose=0)
        scores_val = model.evaluate(val_examples,
                                    val_labels,
                                    batch_size=128,
                                    verbose=0)
        scores_test = model.evaluate(X_test, Y_test, batch_size=128, verbose=0)

        logger.info('Evaluation on [{} set]: {}.'.format(
            'training', scores_train))
        logger.info('Evaluation on [{} set]: {}.'.format(
            'validation', scores_val))
        logger.info('Evaluation on [{} set]: {}.'.format(
            'testing', scores_test))

    logger.info('Save the trained model to [{}].'.format(save_path))
    model.save(save_path)

    checkpoints_file = save_path.split('/')[-1].split('.')[0]
    checkpoints_file = 'checkpoints_train_' + checkpoints_file + '.csv'
    checkpoints_file = os.path.join(LOG_DIR, checkpoints_file)

    if not os.path.dirname(LOG_DIR):
        os.mkdir(LOG_DIR)

    logger.info('Training checkpoints have been saved to file [{}].'.format(
        checkpoints_file))
    file.dict2csv(history.history, checkpoints_file)
    save_path = save_path.split('/')[-1].split('.')[0]
    save_path = 'hist_train_' + save_path + '.pdf'
    plot_training_history(history, save_path)

    return model