model = Sequential() model.add( Dense(512, activation='relu', kernel_regularizer=l2(1e-5), input_shape=(784, ))) model.add(Dense(512, activation='relu', kernel_regularizer=l2(1e-5))) model.add(Dense(10, kernel_regularizer=l2(1e-5))) model.add(Activation('softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy']) if args.importance_training: wrapped = ImportanceTraining(model, presample=5) else: wrapped = model history = wrapped.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1])
x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 wrapped_model = ImportanceTraining(model, k=0.5, presample=64, adaptive_smoothing=True, smooth=0.5, forward_batch_size=64) if not data_augmentation: print('Not using data augmentation.') wrapped_model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test), shuffle=True) else: print('Using real-time data augmentation.') # This will do preprocessing and realtime data augmentation: datagen = ImageDataGenerator( featurewise_center=False, # set input mean to 0 over the dataset samplewise_center=False, # set each sample mean to 0 featurewise_std_normalization= False, # divide inputs by std of the dataset samplewise_std_normalization=False, # divide each input by its std zca_whitening=False, # apply ZCA whitening rotation_range= 0, # randomly rotate images in the range (degrees, 0 to 180) width_shift_range=