Ejemplo n.º 1
0
def train_model(train_test_path):
    """
    Creates a model and performs training.
    """
    # Load train/test data
    train_test_data = np.load(train_test_path)
    x_train = train_test_data['X_train']
    y_train = train_test_data['y_train']

    print("x_train:", x_train.shape)
    print("y_train:", y_train.shape)

    del train_test_data

    x_train = np.expand_dims(x_train, axis=3)

    # Create network
    model = Sequential()
    model.add(Conv1D(128, 5, input_shape=x_train.shape[1:], padding='same', activation='relu'))
    model.add(MaxPooling1D(5))
    model.add(Conv1D(128, 5, padding='same', activation='relu'))
    model.add(MaxPooling1D(5))
    model.add(Dropout(0.5))

    model.add(Flatten())

    model.add(Dense(1024, kernel_initializer='glorot_uniform', activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(512, kernel_initializer='glorot_uniform', activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(256, kernel_initializer='glorot_uniform', activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(128, kernel_initializer='glorot_uniform', activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(len(language_codes), kernel_initializer='glorot_uniform', activation='softmax'))

    model_optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    model.compile(loss='categorical_crossentropy', optimizer=model_optimizer, metrics=['accuracy'])

    # Train
    model.fit(x_train, y_train,
              epochs=10,
              validation_split=0.10,
              batch_size=64,
              verbose=2,
              shuffle=True)

    model.save(model_path)
Ejemplo n.º 2
0
print(Training_img)
model = Sequential()
model.add(Conv2D(
    input_shape=(100,100,3),
    filters=32,
    kernel_size=(5,5),
    padding="same"
))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=64,kernel_size=(2,2),padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))#output_shape=(25,25,64)
model.add(Flatten())
model.add(Dense(1024))
model.add(Activation("relu"))
model.add(Dense(512))
model.add(Activation("relu"))
model.add(Dense(256))
model.add(Activation("relu"))
model.add(Dense(4))
model.add(Activation("softmax"))
adam=Adam(lr=0.0001)
model.compile(optimizer=adam,loss='categorical_crossentropy',metrics=['accuracy'])
model.fit(x=Training_img,y=Training_label,epochs=number_batch,batch_size=batch_size,verbose=1)
model.save("./fruitfinder.h5")




Ejemplo n.º 3
0
]
dia_norm = diabetes.copy()
dia_norm[cols_to_norm] = diabetes[cols_to_norm].apply(lambda x: (x - x.min()) /
                                                      (x.max() - x.min()))
x = dia_norm.drop(columns='Outcome')
y = dia_norm['Outcome']
X_train, X_test, y_train, y_test = train_test_split(x,
                                                    y,
                                                    test_size=0.33,
                                                    random_state=42)
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.fit(X_train, y_train, epochs=500, batch_size=100)
_, accuracy = model.evaluate(X_train, y_train)
predictions = model.predict(X_test)
rounded = [round(x[0]) for x in predictions]
pred = np.array(rounded)
count = 0
for i, j in zip(pred, y_test):
    if i == j:
        count += 1
print(count / y_test.size)
model.save('diabetes-model.h5')
Ejemplo n.º 4
0
    if os.path.exists('LeNet_origin_lr=%f.h5' % lr):
        model = load_model('LeNet_origin_lr=%f.h5' % lr)
    sgd = SGD(lr=lr, decay=1e-5, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.fit(
        train_x,
        train_y,
        batch_size=128,
        epochs=EPOCH,
        verbose=1,
        validation_split=0.05,
        callbacks=[TensorBoard(log_dir='./log/LeNet_origin_lr=%f.h5' % lr)])
    model.save('LeNet_origin_lr=%f.h5' % lr)
    del (model)
    gc.collect

for bs in BS:
    model = Sequential()
    model.add(
        Conv2D(filters=6,
               kernel_size=(5, 5),
               activation='relu',
               padding='valid',
               name='C1',
               input_shape=LENET_INPUT_SHAPE))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='P1'))
    model.add(
        Conv2D(filters=16,
Ejemplo n.º 5
0
def create_model(X, y, it=1, no_of_filters=32, kern_size=3,
                 max_p_size=3, drop_perc_conv=0.3, drop_perc_dense=0.2,
                 dens_size=128, val_split_perc=0.1, no_of_epochs=30,
                 optimizer="adam", random_search=False, batch_size=64):
    """Creates an architecture, train and saves CNN model.

    Returns:
        Dictionary with training report history.
    """

    y_train_cat = to_categorical(y)

    model = Sequential()

    model.add(Conv2D(no_of_filters,
                     kernel_size=(kern_size, kern_size),
                     activation='relu',
                     input_shape=(56, 56, 1),
                     padding='same'))

    model.add(Conv2D(no_of_filters,
                     kernel_size=(kern_size, kern_size),
                     activation='relu',
                     padding='same'))
    model.add(MaxPooling2D((max_p_size, max_p_size)))
    model.add(Dropout(drop_perc_conv))

    model.add(Conv2D(no_of_filters,
                     kernel_size=(kern_size, kern_size),
                     activation='relu',
                     padding='same'))
    model.add(Conv2D(no_of_filters,
                     kernel_size=(kern_size, kern_size),
                     activation='relu',
                     padding='same'))
    model.add(MaxPooling2D((max_p_size, max_p_size)))
    model.add(Dropout(drop_perc_conv))

    model.add(Flatten())

    model.add(Dense(dens_size, activation='relu'))
    model.add(Dropout(drop_perc_dense))

    model.add(Dense(36, activation='softmax'))

    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    early_stopping_monitor = EarlyStopping(patience=5)
    rlrop = ReduceLROnPlateau(monitor='val_acc', factor=0.5,
                              patience=3, verbose=1, min_lr=0.00001)

    history = model.fit(X,
                        y_train_cat,
                        validation_split=val_split_perc,
                        epochs=no_of_epochs,
                        callbacks=[early_stopping_monitor, rlrop],
                        batch_size=batch_size)

    history_dict = history.history

    if random_search:

        np.save(r"./models/random_search/hist/history_dict_{}.npy".format(it),
                history_dict)
        model.save(r"./models/random_search/models/CNN_{}.h5".format(it))

    else:

        np.save(r"./logs/history_dict_{}.npy".format(it), history_dict)
        model.save(r"./models/CNN_FF_{}.h5".format(it))

    return history_dict
Ejemplo n.º 6
0
model.add(Convolution2D(64, 2, 2, border_mode="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='th'))

model.add(Flatten())
model.add(Dense(256))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(5, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer=optimizers.RMSprop(lr=0.0004),
              metrics=['accuracy'])

train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)

train_generator = train_datagen.flow_from_directory(base_dir,
                                                    target_size=(img_height,
                                                                 img_width),
                                                    batch_size=batch_size,
                                                    class_mode='categorical')

model.fit_generator(train_generator, samples_per_epoch=1000, epochs=50)

model.save('50_model.h5')
model.save_weights('50_weights.h5')
def create_model(structure_id, model_name, classes, max_epochs, train_tweets,
                 train_labels, validate_tweets, validate_labels):
    model = Sequential()
    if structure_id == 0:
        binary = True
        model.add(GaussianNoise(0.1, input_shape=(140, input_len)))
        model.add(Conv1D(8, 3, activation="relu"))
        model.add(Dropout(0.3))
        model.add(Conv1D(8, 3, activation="relu"))
        model.add(Dropout(0.3))
        model.add(Conv1D(8, 3, activation="relu"))
        model.add(Dropout(0.3))
        model.add(Bidirectional(GRU(6, recurrent_dropout=0.5)))
        model.add(Dropout(0.3))
    if binary:
        model.add(Dense(1, activation="sigmoid"))
        model.compile(loss="binary_crossentropy",
                      optimizer="adam",
                      metrics=["accuracy"])
    else:
        model.add(Dense(len(classes), activation="softmax"))
        model.compile(loss="categorical_crossentropy",
                      optimizer="adam",
                      metrics=["accuracy"])

    losses = []
    if binary:
        train_outputs = train_labels
        validate_outputs = validate_labels
    else:
        train_outputs = to_categorical(train_labels)
        validate_outputs = to_categorical(validate_labels)

    bestLoss = inf
    step = 0
    i = 0
    if balanced:
        train_weights = class_weight.compute_class_weight(
            "balanced", np.unique(train_labels), train_labels)
        validate_weights = class_weight.compute_class_weight(
            "balanced", np.unique(validate_labels), validate_labels)
        sample_weights = []
        print(train_weights)
        print(validate_weights)
        for lable in validate_labels:
            sample_weights.append(validate_weights[lable])
        sample_weights = np.array(sample_weights)
    else:
        train_weights = None
    got_worse = 0
    best_train_weights = train_weights.copy()
    while step < max_epochs and i < 130 and got_worse < tollerance:
        i += 1
        step += 0
        train_sample_weights = []
        print(train_weights)
        print(validate_weights)
        for lable in train_labels:
            train_sample_weights.append(best_train_weights[lable])
        train_sample_weights = np.array(train_sample_weights)

        print(best_train_weights)
        model.fit(np.array(train_tweets),
                  np.array(train_outputs),
                  epochs=1,
                  batch_size=16,
                  sample_weight=train_sample_weights)
        loss = model.evaluate(np.array(validate_tweets),
                              np.array(validate_outputs),
                              sample_weight=sample_weights)[0]
        losses += [loss]
        if loss > bestLoss:
            got_worse += 1
        else:
            if loss < bestLoss:
                model.save("../models/" + model_name)
                bestLoss = loss
            got_worse = 0
        print(step, loss, bestLoss)
        predictions = model.predict(np.array(validate_tweets))
        get_weight_multipliers(predictions, validate_labels)
Ejemplo n.º 8
0
def neuralnet(filename):
    df = pd.read_csv(filename, header=None)

    with open("tokeniser.pickle", "rb") as input1:
        tokenizer = pickle.load(input1)

    ###### WARNING ######
    # ast.literal_eval() is a DANGEROUS function since it can result in arbitary
    # code execution. I may try to remove code later down the track, but since
    # we are dealing with data that doesn't contain code it is safe.
    # TODO: Change from csv to pickle
    X = [ast.literal_eval(x) for x in tqdm(df[df.columns[1]].values)]
    Y = [ast.literal_eval(x) for x in tqdm(df[df.columns[0]].values)]

    training_size = 0.8
    max_words = 16

    X_train = sequence.pad_sequences(X[:int(training_size * len(X))],
                                     maxlen=max_words)
    X_test = sequence.pad_sequences(X[-int((1 - training_size) * len(X)):],
                                    maxlen=max_words)

    Y_train = sequence.pad_sequences(Y[:int(training_size * len(Y))], maxlen=1)
    Y_test = sequence.pad_sequences(Y[-int((1 - training_size) * len(Y)):],
                                    maxlen=1)

    embedding_size = 32
    model = Sequential()
    model.add(
        Embedding(len(tokenizer.word_index.items()) + 1,
                  embedding_size,
                  input_length=max_words))
    model.add(LSTM(100))
    model.add(Dense(1, activation='sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    batch_size = 300
    num_epochs = 20

    X_valid, y_valid = X_train[:batch_size], Y_train[:batch_size]
    X_train2, y_train2 = X_train[batch_size:], Y_train[batch_size:]

    filepath = "sentiment-ai-{epoch:02d}-{val_acc:.2f}.hdf5"
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=False,
                                 mode='max')
    callbacks_list = [checkpoint]

    model.fit(X_train2,
              y_train2,
              validation_data=(X_valid, y_valid),
              batch_size=batch_size,
              epochs=num_epochs,
              callbacks=callbacks_list)

    model.save("my-model.hdf5")
    return 0
Ejemplo n.º 9
0
epochs = 5  #训练5次
model1.summary()  #模型输出
model1.compile(
    loss='sparse_categorical_crossentropy',  #模型编译
    optimizer='adam',
    metrics=['accuracy'])
#从训练集中抽取0.2进行验证
history = model1.fit(x_train,
                     y_train,
                     batch_size=batch_size,
                     epochs=epochs,
                     validation_split=0.2)

#-----------------------------------------------保存模型,可视化--------------------------
#保存模型
model1.save('model_CNN_text.h5')
#模型可视化
plot_model(model1, to_file='model_CNN_text.png', show_shape=True)
#加载模型
model = load_model('model_CNN_text.h5')
y_new = model.predict(x_train[0].reshape(1, 50))
#训练结果可视化
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.savefig('Valid_acc.png')
plt.show()
Ejemplo n.º 10
0
def create_class_weight(labels_dict, mu=0.1):
    total = np.sum([i for i in labels_dict.values()])
    keys = labels_dict.keys()
    class_weight = dict()
    for key in keys:
        score = math.log((mu * total) / float(labels_dict[key]))
        math.log(mu)
        class_weight[key] = score if score > 1.0 else 1.0
    return class_weight
labels_dict = {0: 37000, 1: 18871, 2: 11132, 3: 6062, 4: 4089, 5: 3496, 6: 677, 7: 583
    , 8: 378, 9: 44}
labels_dict_train ={0: 56000, 1: 40000, 2: 33393, 3: 18184, 4: 12264, 5: 10491, 6: 2000, 7: 1746
                   ,8:1133,9:130}
class_weight_dict = create_class_weight(labels_dict_train)
history = model.fit(train_70_x, train_70_y,validation_data=(train_30_x,train_30_y),batch_size=4096, epochs=50)
model.save('my_model4.h5')
loss, accuracy = model.evaluate(x_test, y_test2)
pre_y = model.predict_classes(x_test)
y_test = np.array(y_test)
metrics = classification_report(y_test, pre_y)
print(metrics)
confusion_m = confusion_matrix(y_test, pre_y)
y_pred_pro = model.predict_proba(x_test)[:, 1]
fpr, tpr, thresholds = roc_curve(y_test, y_pred_pro, pos_label=1)
roc_auc = auc(fpr, tpr)
mat_plt(history)
plot_confusion_matrix(confusion_m)
roc(fpr, tpr, roc_auc)
model.save('my_modelm.h5')
def fpr_tpr(confusion_m):
    sum = 0
Ejemplo n.º 11
0
model.add(Activation('relu'))
model.add(Conv2D(16, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Conv2D(32, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Conv2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(2))
model.add(Activation('softmax'))

model.summary()
model.build(input_shape=(500, 500, 1))
model.compile(optimizer='adam',
              loss="categorical_crossentropy",
              metrics=['accuracy'])
model.fit(x, y, steps_per_epoch=2, epochs=30)

model.save("first.h5")
# recognizer.train(x_train,np.array(y_labels))
# recognizer.save("trainner.yml")
Ejemplo n.º 12
0
def train_cifar10(batch_size: int,
                  learning_rate: float,
                  epochs: int,
                  experiment: Experiment,
                  model: Sequential = get_model(),
                  initial_epoch: int = 0,
                  training_datagen: ImageDataGenerator = ImageDataGenerator(),
                  scheduler: Callable[[int], float] = None,
                  early_stopping_th: Optional[int] = 250,
                  data_portion: float = 1.0,
                  find_lr: bool = False) -> None:
    preprocessing_fnc = training_datagen.preprocessing_function
    name = experiment.get_key()
    log_path, model_path = get_output_paths(name)
    data = get_cifar10_data(data_portion=data_portion)

    training_datagen.fit(data.x_train)
    log_images(data.x_train, training_datagen, experiment)
    log_input_images(data.x_train, data.y_train, training_datagen, experiment)

    opt = Adam(lr=learning_rate)
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    log_model_plot(experiment, model)

    csv_cb = CSVLogger(log_path)
    keep_best_cb = KeepBest('val_acc')
    callbacks = [csv_cb,
                 keep_best_cb]  # [csv_cb, early_stopping_cb, keep_best_cb]
    if early_stopping_th is not None:
        early_stopping_cb = EarlyStopping('val_acc',
                                          patience=early_stopping_th,
                                          restore_best_weights=True,
                                          verbose=2)
        callbacks.append(early_stopping_cb)
    if scheduler is not None:
        scheduler.experiment_log(experiment=experiment,
                                 epochs=list(range(epochs)))
        callbacks.append(LearningRateScheduler(scheduler))
    if find_lr:
        lrf = LearningRateFinder(model=model)
        lrf.lrMult = (10e-1 / learning_rate)**(
            1.0 / (epochs * len(data.x_train) / batch_size))
        callbacks = [
            LambdaCallback(
                on_batch_end=lambda batch, logs: lrf.on_batch_end(batch, logs))
        ]

    model.fit_generator(training_datagen.flow(data.x_train,
                                              data.y_train,
                                              batch_size=batch_size),
                        steps_per_epoch=len(data.x_train) / batch_size,
                        epochs=epochs,
                        validation_data=(preprocessing_fnc(data.x_dev),
                                         data.y_dev),
                        shuffle=True,
                        callbacks=callbacks,
                        verbose=2,
                        initial_epoch=initial_epoch)
    model.save(model_path)
    experiment.log_asset(model_path)
    experiment.log_asset(log_path)

    if find_lr:
        experiment.log_figure('lr vs acc', lrf.plot_loss())

    log_final_metrics(experiment, model, data, preprocessing_fnc)
Ejemplo n.º 13
0
model.add(LSTM(64, return_sequences=True))
model.add(LSTM(64, return_sequences=False))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
print(model.summary())

# Split train and validation set (e.g: 10000 dataset -> 8000 train, 1000 validation, 1000 test)
train_valtest = 0.8
val_test = 0.5

train_x, train_y = train_X[:int(train_valtest*len(train_X))], train_Y[:int(train_valtest*len(train_Y))]
valtest_x, valtest_y = train_X[int(train_valtest*len(train_X)):], train_Y[int(train_valtest*len(train_Y)):]

val_x, val_y = valtest_x[:int(val_test*len(valtest_x))], valtest_y[:int(val_test*len(valtest_y))]
test_x, test_y = valtest_x[int(val_test*len(valtest_x)):], valtest_y[int(val_test*len(valtest_y)):]


model.fit(train_x, train_y, validation_data = (val_x, val_y), epochs=10, verbose=1)
scores = model.evaluate(test_x, test_y, verbose=0)

print('Test on %d samples' %len(test_x))
print('Test accuracy: %.2f%%' % (scores[1]*100))

model.save(getcwd()+'/GloVe-LSTM_model.h5')
print('Model saved to '+getcwd()+'/GloVe-LSTM_model.h5')

print("Total training time: %s seconds" % (time.time() - start_time))
from keras import Sequential
from keras.layers import LSTM, Dense, Activation, Dropout
from data_transformD import scaler, train_X, train_Y, test_X, test_Y
import matplotlib.pyplot as plt
from numpy import concatenate
from math import sqrt
from sklearn.metrics import mean_squared_error

model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')
History = model.fit(train_X,
                    train_Y,
                    epochs=100,
                    batch_size=64,
                    validation_data=(test_X, test_Y))
model.save("model/expModelD.h5")

plt.plot(History.history['loss'], label='train')
plt.plot(History.history['val_loss'], label='test')
plt.legend()
plt.show()
Ejemplo n.º 15
0
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
#
# model.add(Conv2D(64, (3, 3), padding='same'))
# model.add(Activation('relu'))
# model.add(Conv2D(64, (3, 3)))
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
#
# model.add(Flatten())
# model.add(Dense(512))
# model.add(Activation('relu'))
# model.add(Dropout(0.5))
model.add(Dense(4, activation="softmax"))
opt = keras.optimizers.RMSprop(lr=0.0001, decay=1e-6)
model.compile(optimizer=opt,
              loss="categorical_crossentropy",
              metrics=["accuracy"])
# model.compile(optimizer=opt,loss="sparse_categorical_crossentropy" ,metrics=["accuracy"])

model.fit(train_data_img, train_data_label, epochs=30, batch_size=20)

model.save("E:/sperm_image/HuSHem/model.h5")

for i in test_data_img:
    i = np.expand_dims(i, 0)
    result = model.predict(i)
    print(result)
print(test_data_label)
Ejemplo n.º 16
0
def alex_net(x_train, y_train, x_test, y_test):
    y_train = keras.utils.to_categorical(y_train, num_classes=flower_types)
    y_test = keras.utils.to_categorical(y_test, num_classes=flower_types)

    model = Sequential()
    model.add(
        Conv2D(96, (11, 11),
               strides=(4, 4),
               input_shape=(image_size, image_size, 3),
               padding='valid',
               activation='relu',
               kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(Dropout(0.25))

    model.add(
        Conv2D(256, (5, 5),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(Dropout(0.25))

    model.add(
        Conv2D(384, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    model.add(
        Conv2D(384, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    model.add(
        Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(flower_types, activation='softmax'))
    sgd = SGD(lr=1e-2, decay=1e-9)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    history = model.fit(x_train,
                        y_train,
                        validation_split=0.1,
                        batch_size=100,
                        epochs=epochs)
    loss, acc = model.evaluate(x_test, y_test, batch_size=50)
    print('loss is {:.4f}'.format(loss) +
          ', acc is  {:.2f}%\n'.format(acc * 100))
    model_name = 'result/alex_model_epoch' + str(epochs) + '_' + str(
        round(acc * 100, 2)) + '.h5'
    model.save(model_name)
    save_history(history, 'result', str(round(acc * 100, 2)))
    # 清除session
    keras.backend.clear_session()
    return model_name
Ejemplo n.º 17
0
# model.add(Dropout(0.5))

model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))

model.add(Dense(100, activation='relu'))
model.add(Dropout(0.25))

# Output Layer
model.add(Dense(units=len(class_names), activation='softmax'))

model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer='sgd',
              metrics=['accuracy'])

history = LossHistory()
model.fit(x=x_train,
          y=y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          callbacks=[history])

model.save('my_model_0.h5')
score = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
history.loss_plot('epoch')
Ejemplo n.º 18
0
    def __len__(self):
        return self.X.shape[0]

    def __getitem__(self, idx):
        return {"X": self.X[idx], "Y": self.Y[idx]}



chess_dataset = ChessValueDataset()

num_classes = 128

model = Sequential()
input_shape = 5, 8, 8
model.add(Conv2D(16, kernel_size=(3, 3), activation="relu", input_shape=input_shape, padding="same"))
model.add(Conv2D(32, kernel_size=(3, 3), activation="relu", padding="same"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.3))
model.add(Dense(num_classes, activation="softmax"))
model.compile()

model.summary()

model.fit(x=X, y=y)

model.save("nets/value.pth")

Ejemplo n.º 19
0
print(labels[193:])


model = Sequential()

model.add(Conv2D(128, (3,3), activation="relu", input_shape=(128,128,3)))
model.add(MaxPooling2D(2,2))
model.add(Conv2D(256, (3,3), activation="relu"))
model.add(MaxPooling2D(2,2))
model.add(Conv2D(256, (3,3), activation="relu"))
model.add(MaxPooling2D(2,2))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dense(n, activation="softmax"))

model.compile(optimizer="adam", loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True) , metrics=["acc"])

model.fit(train, labels, epochs=epochs, validation_data = (X_test, y_test))

model.save("model.h5")

score  = model.evaluate(X_test, y_test)

print("Total Accuracy  : ", score[1])
print("Successfully...")
print("Please Run python Real_Time.py")




Ejemplo n.º 20
0
X_test = X_test.reshape(-1,96,96)
print("X_test", X_test.shape)
model =  Sequential()
# x = (7049,96,96)
# y = (7049,30)
model.add(Flatten(input_shape= (96,96)))
model.add(Dense(128,activation = "relu"))
model.add(Dropout(0.1))

model.add(Dense(64,activation = "relu"))
model.add(Dense(30))

model.compile(optimizer = 'adam',
              loss = 'mse',
              metrics = ['mae','accuracy'])
model.fit(X_train,y_train,epochs = 50, batch_size = 128,validation_split = 0.2)
model.save('model.h5')

json_string = model.to_json()
model = model_from_json(json_string)
model.load_weights('model.h5',by_name = True)
model.load_model('model.h5')
def loaded_model():
  model = load_model('model.h5')
  return model
def show_results(images_index):
  pred = model.predict(X_test[images_index:(images_index+1)])
  show_images(X_test[images_index], pred[0])
show_images(3)

              metrics=['accuracy'])

history = model.fit(train_data,
                    train_labels_one_hot,
                    batch_size=256,
                    epochs=20,
                    verbose=1,
                    validation_data=(test_data, test_labels_one_hot))

[test_loss, test_acc] = model.evaluate(test_data, test_labels_one_hot)
print("Evaluation result on Test Data : Loss = {}, accuracy = {}".format(
    test_loss, test_acc))

#model.load_weights("mnist-model.h5")
#model.fit(train_x,train_y,batch_size=32,epochs=10,verbose=1)
model.save("mnistmodel.h5")
model.load_weights("mnistmodel.h5")
#model.predict(test_data[23],)
img = test_data[112]
test_img = img.reshape((1, 784))
img_class = model.predict_classes(test_img)
prediction = img_class[0]
classname = img_class[0]
print("Class: ", classname)
img = img.reshape((28, 28))
plt.imshow(img)
plt.title(classname)
plt.show()

print(history)
# Plot training & validation accuracy values
Ejemplo n.º 22
0
model.add(Dense(500, activation='relu'))
model.add(BatchNormalization())
#model.add(Dropout(0.7))
model.add(Dense(100, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(20, activation='relu'))
model.add(BatchNormalization())
#model.add(Dropout(0.7))
model.add(Dense(2))
# model.compile(loss=keras.losses.MSE,
#               optimizer=keras.optimizers.SGD(lr=1e-3, decay=1e-4, momentum=0.9, nesterov=True),
#               metrics=['mse'])
model.compile(loss=keras.losses.MSE,
              optimizer=keras.optimizers.Adam(lr=0.5 * 1e-3),
              metrics=['mse'])
model.save('my_model.h5')

tensorboard = TensorBoard(log_dir=log_dir)
#%%读取模型
if train_from_pretrained:
    model = load_model(pretrained_model)


#%% 提取batch
def prewhiten(x):
    if x.ndim == 4:
        axis = (1, 2, 3)
        size = x[0].size
    elif x.ndim == 3:
        axis = (0, 1, 2)
        size = x.size
Ejemplo n.º 23
0
model.add(Conv1D(8, kernel_size=4, activation='relu', name='conv5'))
model.add(Flatten())
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='linear'))

# model setting
model.compile(optimizer='adam', loss='mse', metrics=['mae'])
model.summary()

# test data:80%, valid data:20%
x_valid = x_train[int(0.2 * len(x_train)):, :, :]
y_valid = y_train[int(0.2 * len(y_train)):, :]
x_train = x_train[0:int(0.8 * len(x_train)), :, :]
y_train = y_train[0:int(0.8 * len(y_train)), :]

# random the dataset
index = [i for i in range(len(x_train))]
random.shuffle(index)
x_train = x_train[index]
y_train = y_train[index]

# model saving
hist = model.fit(x_train,
                 y_train,
                 batch_size=32,
                 epochs=30,
                 validation_data=(x_valid, y_valid))
model.save('CNN_model_dishwasher.h5')
pt.plot_history(hist)
    text = text.replace("\n", "")
    text = text.replace("\r", "")
    seg = jieba.cut(text, cut_all=False, HMM=True)
    seg = [word for word in seg if word not in stopwords]
    text = " ".join(seg)
    textarray = [text]
    textarray = np.array(pad_sequences(input_tokenizer.texts_to_sequences(textarray), maxlen=maxLength))
    return textarray


def predict_result(text):
    features = find_features(text)
    predicted = model.predict(features, verbose=1)[0]  # we have only one sentence to predict, so take index 0
    prob = predicted.max()
    prediction = sentiment_tag[predicted.argmax()]
    return prediction, prob


# predict the review data
test_data = pd.read_excel(path_testing, encoding='utf-8')
test_data = test_data.astype({'评价内容': str, '情感': str})
test_data['预测情感'] = test_data['评价内容'].apply(lambda x: predict_result(x)[0])
test_data['预测情感'] = np.where(test_data['预测情感'] == 'pos', '正面', '负面')
accuracy = accuracy_score(test_data['情感'], test_data['预测情感'])
labels = ['正面', '负面']
cm = confusion_matrix(test_data['情感'], test_data['预测情感'])
sns.heatmap(cm, annot=True, fmt='d')

# Save the Model
model.save(model_path)
Ejemplo n.º 25
0
model.add(BatchNormalization())
model.add(Dropout(0.17))
model.add(Flatten())
model.add(Dense(64, activation="relu"))
model.add(BatchNormalization())
model.add(Dropout(0.17))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', metrics=['acc'], loss='binary_crossentropy')
model.summary()
plot_model(model, to_file='model3.png', show_layer_names=True, show_shapes=True)
import time
start1 = time.clock()
history = model.fit(train_70_x, train_70_y,validation_data=(train_30_x,train_30_y),batch_size=4096, epochs=50)
end1 = time.clock()
t1 = end1 -start1
model.save('my_model5.h5')
start2 = time.clock()
loss, accuracy = model.evaluate(x_test, y_test,batch_size=4096)
end2 = time.clock()
t2 = end2 - start2
pre_y = model.predict_classes(x_test)
y_test = np.array(y_test)
metrics = classification_report(y_test, pre_y)
print(metrics)
confusion_m = confusion_matrix(y_test, pre_y)
y_pred_pro = model.predict_proba(x_test)[:, 0]
fpr, tpr, thresholds = roc_curve(y_test, y_pred_pro, pos_label=1)
roc_auc = auc(fpr, tpr)
mat_plt(history)
plot_confusion_matrix(confusion_m)
roc(fpr, tpr, roc_auc)
Ejemplo n.º 26
0
# Separate into input (X) and output (Y)
print("Splitting into X and Y...")
sequences = np.array(sequences)
X, Y = sequences[:,0:-1], sequences[:,-1]
# Y = to_categorical(Y, num_classes=vocab_size)
seq_length = X.shape[1]
print('Done.')

# Create Model
model = Sequential()
model.add(Embedding(input_dim=vocab_size, output_dim=100, input_length=seq_length)) # Consider input_length = None
model.add(LSTM(100, return_sequences=True))
model.add(LSTM(100))
model.add(Dense(100, activation='relu'))
model.add(Dense(vocab_size, activation='softmax'))
model.summary()

# Compile model
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])

# Fit model
epochs = 1
for e in range(epochs):
    model.fit(X, Y, batch_size=512, epochs=1)
    # if e % 10 == 0:
    model.save('./models/philosopher-king-epoch' + str(e) +'.h5')
    # save the tokenizer
    pickle.dump(tokenizer, open('./models/tokenizer.pkl', 'wb'))

#building the model
from keras import Sequential
from keras.layers import Embedding, LSTM, Dense, Dropout

embedding_size = 32
model = Sequential()
model.add(Embedding(vocabulary_size, embedding_size, input_length=max_words))
model.add(LSTM(100))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

#traning the model
batch_size = 64
num_epochs = 3
X_valid, y_valid = X_train[:batch_size], y_train[:batch_size]
X_train2, y_train2 = X_train[batch_size:], y_train[batch_size:]

model.fit(X_train,
          y_train,
          validation_data=(X_test, y_test),
          batch_size=batch_size,
          epochs=num_epochs)

model.save('Sentiment_analysis.h5')

scores = model.evaluate(X_test, y_test, verbose=0)
print('Test accuracy:', scores[1])
Ejemplo n.º 28
0
               kernel_size=(3, 3),
               strides=(2, 2),
               activation='relu',
               padding='same'))
    model.add(AveragePooling2D())
    model.add(Flatten())
    model.add(Dropout(0.3))
    model.add(
        Dense(neuron_best, activation='tanh', kernel_regularizer=l2(0.005)))
    model.add(Dropout(0.3))
    model.add(Dense(3, activation="softmax", kernel_regularizer=l2(0.005)))
    model.compile(optimizer=RMSprop(learning_rate=rate_best),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    es = EarlyStopping(monitor='val_accuracy',
                       verbose=0,
                       patience=10,
                       restore_best_weights=True,
                       mode='max')
    model.fit(x_train,
              y_train,
              validation_data=(x_valid, y_valid),
              batch_size=128,
              callbacks=[es],
              verbose=0,
              epochs=500)
    """
    Finally, save the model to an h5 file in the deliverable directory
    """
    model.save('../deliverable/nn_task2.h5')
train_image_data_flow = train_image_data_generator.flow_from_directory(
    "./Images/Training",
    target_size=(dimensions[0], dimensions[1]),
    batch_size=batch_size,
    class_mode="categorical")

validation_image_data_generator = ImageDataGenerator(rescale=1. / 255)

validation_image_data_flow = validation_image_data_generator.flow_from_directory(
    "./Images/Validation",
    target_size=(dimensions[0], dimensions[1]),
    batch_size=batch_size,
    class_mode="categorical")

sample_amount = 1.361 + 1322 + 116 + 600
validation_sample_amount = 1021
epochs = 50
epoch_steps = sample_amount // batch_size
validation_steps = validation_sample_amount // batch_size

history = model.fit_generator(train_image_data_flow,
                              steps_per_epoch=epoch_steps,
                              epochs=epochs,
                              validation_data=validation_image_data_flow,
                              validation_steps=validation_steps)

print(history.history)

model.save("cnn_model_v5_50epoch.h5")
Ejemplo n.º 30
0
                    save_best_only=True)
]

# training
history = model.fit(
    x_train,
    y_train,
    batch_size=batch_size,
    epochs=epochs,
    shuffle=False,  # already shuffled during augmentation
    validation_data=(x_val, y_val),
    callbacks=cbks,
    verbose=1)

# save and plot result
model.save('./model/room_model_{}.h5'.format(cur_time))

train_error = [(1 - acc) * 100 for acc in history.history['acc']]
val_error = [(1 - acc) * 100 for acc in history.history['val_acc']]

fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
plt.tight_layout(pad=3, w_pad=2)
fig.suptitle('Messy Room Classifier', fontsize=16, fontweight='bold')
ax1.set_xlabel('Epochs', fontsize=14)
ax1.set_ylabel('Error(%)', fontsize=14)
ax1.plot(train_error, label='Training Error')
ax1.plot(val_error, label='Validation Error')
ax1.legend()

ax2.set_xlabel('Epochs', fontsize=14)
ax2.set_ylabel('Loss', fontsize=14)
Ejemplo n.º 31
0
import numpy as np
from keras import Sequential
from keras.layers import Dense

data = np.random.random((1000, 32))
label = np.random.random((1000, 10))

model = Sequential()
model.add(Dense(64, activation='relu', input_shape=(32, )))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))

model.compile('adam', 'categorical_crossentropy')

model.fit(data, label, epochs=100)

model.save('my_model.h5')

Ejemplo n.º 32
0
    validation_generator = test_datagen.flow_from_directory(
        VALIDATE_DIR,
        target_size=(IMG_WIDTH, IMG_HEIGHT),
        batch_size=BATCH_SIZE,
        class_mode='binary')

    callbacks = [TensorBoard(log_dir="logs/{}".format(NAME))]

    model.fit_generator(train_generator,
                        callbacks=callbacks,
                        steps_per_epoch=TRAIN_STEP,
                        epochs=EPOCHS,
                        validation_data=validation_generator,
                        validation_steps=VALIDATION_STEP)

    score = model.evaluate_generator(validation_generator,
                                     VALIDATION_STEP / BATCH_SIZE,
                                     workers=12)
    scores = model.predict_generator(validation_generator,
                                     VALIDATION_STEP / BATCH_SIZE,
                                     workers=12)
    model.save(FILE_NAME)

    with open("logs/mylog.txt", "a") as f:
        f.write("\n\n--------" + NAME + "----------\n")
        f.write(str(score) + "\n")
except Exception as e:
    with open("logs/mylog.txt", "a") as f:
        f.write("\n\n--------" + NAME + "----------\n")
        f.write("Failed")