def __init__(self):
        self.model = None
        self.epochs_trained = 0

        # 读取或建立模型
        if os.path.exists(Config.model_save_path):
            self.model = keras.models.load_model(Config.model_save_path)
            # 读取已训练的epochs数目
            with open(Config.model_epochs_trained_path) as f:
                line = f.readlines()[0]
                self.epochs_trained = int(line)
        else:
            self.build_model()
        self.model.summary()
        # 读取数据
        self.data = get_data()
示例#2
0
from data.preprocess import get_data, get_data_keras
import numpy as np
from skimage.transform import resize
from operator import itemgetter
from models import log_reg
from sklearn.metrics import accuracy_score

# Validation Examples
n_valid = 1000  # Max 100000
# Model Configuration
learning_rate = 0.09
n_epochs = 1000
batch_size = 1

dataset = get_data(n_valid)
X_valid, y_valid = dataset
X_valid = X_valid * 1.0

print("Building logistic regression model using MNIST dataset...")
#log_reg.sgd_optimization_MNIST();
print("Done.")

print("Downsizing validation set...")
X_valid = resize(X_valid)
print("Done.")

print("Predicting...")
predictions = log_reg.predict_MNIST(X_d)
print("Accuracy score:")
print accuracy_score(y_valid, predictions)
#Other configuration
savename = "cnn_task3_attempt1"

###
###

print("Building model...")
model = convnets.build_cnn(input_shape, nb_classes, depth, kernel_size,
                           pool_size, nb_filters, dropout, activation)

print("Compiling loss function")
model.compile(loss=loss_function, optimizer=optimizer, metrics=['accuracy'])

print("Retrieving and augmenting data...")
X, Y = get_data(num_examples)

if validation:
    #Splitting data in train and validation set
    train_set, valid_set = utils.split_train_valid(zip(X, Y))
    X_train, y_train = map(np.array, zip(*train_set))
    X_valid, y_valid = map(np.array, zip(*valid_set))
    X_valid = X_valid.reshape(X_valid.shape[0], num_channels, img_rows,
                              img_cols)
    X_valid = X_valid.astype('float32')
    X_valid /= 255
    print(X_train.shape[0], 'train samples')
    print(X_valid.shape[0], 'test samples')
    # convert class vectors to binary class matrices
    Y_valid = np_utils.to_categorical(y_valid, nb_classes)
else:
示例#4
0
from data.preprocess import get_data, get_data_keras
from models import log_reg
import numpy as np
import six.moves.cPickle as pickle
from skimage.transform import resize
from sklearn.metrics import accuracy_score

# Training Configuration
n_train = 100000  # Max 100000
n_perturbed = 0
# Model Configuration
learning_rate = 0.09
n_epochs = 1000
batch_size = 1
load = False

# Get data
dataset = get_data(n_train, n_perturbed)
X, y = dataset
dataset_train = X[1000:], y[1000:]

print("Building logistic regression model")
log_reg.sgd_optimization(dataset_train, learning_rate, n_epochs, batch_size)

X_valid, y_valid = X[:1000], y[:1000]

predictions = log_reg.predict(X_valid)
print accuracy_score(y_valid, predictions)
示例#5
0
from data.preprocess import get_data
import numpy as np
from keras.models import model_from_json
from skimage.transform import resize
from operator import itemgetter
# load json and create model
json_file = open('../trained_models/cnn1.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)

# load weights into new model
loaded_model.load_weights("../trained_models/cnn1.h5")
print("Loaded model from disk")

X, Y = get_data()

num_example = len(X)

#Downsizing...
XX = np.zeros(shape=(num_example, 1, 28, 28), dtype="float32")
for i, x in enumerate(X):
    XX[i] = resize(x, (28, 28))

outs = loaded_model.predict(XX)


def argmax2(x):
    a = [(i, j) for i, j in enumerate(x)]
    a = sorted(a, key=itemgetter(1))
    return (a[0][0], a[1][0])