Exemple #1
0
                                             K.epsilon())

    # return a single tensor value
    return _f1score


###############################################################################################################

# 컴파일, 훈련
op = Adadelta(lr=1e-5)
# model.compile(optimizer=op, loss="sparse_categorical_crossentropy", metrics=["acc",f1score])
model.compile(optimizer=op,
              loss="sparse_categorical_crossentropy",
              metrics=["acc"])
stop = EarlyStopping(monitor='val_loss',
                     patience=20,
                     restore_best_weights=True,
                     verbose=1)
lr = ReduceLROnPlateau(monitor='val_loss', vactor=0.5, patience=10, verbose=1)
mcpath = 'C:/nmb/nmb_data/h5/speechvgg_mels_del4.h5'
mc = ModelCheckpoint(mcpath,
                     monitor='val_loss',
                     verbose=1,
                     save_best_only=True,
                     save_weights_only=True)
tb = TensorBoard(log_dir='C:/nmb/nmb_data/graph/' +
                 start.strftime("%Y%m%d-%H%M%S") + "/",
                 histogram_freq=0,
                 write_graph=True,
                 write_images=True)
history = model.fit(x_train,
                    y_train,
Exemple #2
0
print(inceptionv3.weights)

inceptionv3.trainable = False
inceptionv3.summary()
print(len(inceptionv3.weights))  # 26
print(len(inceptionv3.trainable_weights))  # 0

model = Sequential()
model.add(inceptionv3)
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
model.summary()

from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
es = EarlyStopping(monitor='val_loss', patience=15, mode='auto')
lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5)
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['acc'])
model.fit(x_train,
          y_train,
          epochs=1000,
          batch_size=16,
          validation_split=0.2,
          verbose=1,
          callbacks=[es, lr])

loss = model.evaluate(x_test, y_test)
print(loss)
## 옵티마이져
#optimizer = Adam(lr=0.0001, beta_1 = 0.9, beta_2 = 0.999)    # ADAM 옵티마이져, loss Mse
# optimizer = RMSprop(learning_rate=0.001, rho= 0.9, momentum = 0.0, epsilon=0.0000001)    # ADAM 옵티마이져, loss Mse
#optimizer = SGD(learning_rate=0.0001,momentum=0.9,nesterov=True) #SGD는 별로 도움이 안됨
optimizer = Adamax(lr=0.001, beta_1=0.9, beta_2=0.999)
#optimizer = Adagrad(lr=0.001, epsilon=1e-6) X
#optimizer = Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0)
#@optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) 못씀
model.compile(loss='mean_squared_error', optimizer=optimizer)

# In[119]:

epochs = 200
batch_size = 24
early_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=1)
model.fit(train_x,
          train_y,
          batch_size=batch_size,
          epochs=epochs,
          validation_data=(val_x, val_y),
          callbacks=[early_stopping])

# In[108]:

batch_size = 30
mae = []
epochs = 1
for i in range(0, 100):
    print(i + 1)
Exemple #4
0
model.add(Flatten())

model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))

model.summary()

model.compile(loss='binary_crossentropy', metrics=['acc'], optimizer='adam')
modelPath = './homework/project1/models/checkpoints/PJ-Conv2D-{val_loss:.4f}.hdf5'
checkPoint = ModelCheckpoint(filepath=modelPath,
                             monitor='val_loss',
                             save_best_only=True,
                             mode='auto')

ealyStopping = EarlyStopping(monitor='val_loss', mode='max', patience=20)
for i in range(1, 11, 2):

    hist = model.fit(x_train,
                     y_train,
                     epochs=300,
                     batch_size=16,
                     validation_split=0.2,
                     callbacks=[ealyStopping, checkPoint])

    loss, acc = model.evaluate(x_test, y_test)

    x_predict = x_test
    y_predict = model.predict(x_predict)

    print('loss', loss)
Exemple #5
0
plot_model(model, dpi=200)

# In[108]:

# 定义优化器
# 因为标签没有转独热编码,所以loss用sparse_categorical_crossentropy
model.compile(optimizer=Adam(0.01),
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

# 监控指标统一使用val_accuracy
# 可以使用EarlyStopping来让模型停止,连续40个周期val_accuracy没有下降就结束训练
# ModelCheckpoint保存所有训练周期中val_accuracy最高的模型
# ReduceLROnPlateau学习率调整策略,连续20个周期val_accuracy没有提升,当前学习率乘以0.1
callbacks = [
    EarlyStopping(monitor='val_accuracy', patience=40, verbose=1),
    ModelCheckpoint('audio_model/' + 'cnn_{val_accuracy:.4f}.h5',
                    monitor='val_accuracy',
                    save_best_only=True),
    ReduceLROnPlateau(monitor='val_accuracy',
                      factor=0.1,
                      patience=20,
                      verbose=1)
]

# 模型训练
history = model.fit(x_train,
                    y_train,
                    epochs=epochs,
                    batch_size=batch_size,
                    validation_data=(x_test, y_test),
    layer.trainable = True

model.summary()


# optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999,
#                  epsilon=None, decay=1e-6, amsgrad=False)

optimizer = SGD(learning_rate=0.001)

model.compile(optimizer=optimizer,
              loss="categorical_crossentropy",
              metrics=['accuracy', utils.CalculateF1Score])

# Set a learning rate reductor
cb_early_stopper = EarlyStopping(monitor='val_loss', patience=4)

# Set a learning rate reductor
learningRateReduction = ReduceLROnPlateau(
    monitor='val_accuracy', patience=3, verbose=1, factor=0.5, min_lr=0.00001)

# Fit the model
epochs = 30
batchSize = 10

history = model.fit(train_datagen.flow(xTrain, yTrain, batch_size=batchSize),
                    epochs=epochs, validation_data=(xValidate, yValidate),
                    verbose=1, steps_per_epoch=xTrain.shape[0] // batchSize,
                    callbacks=[learningRateReduction])

print('Model metrics name: {0}'.format(model.metrics_names))
                  loss=losses.binary_crossentropy,
                  metrics=['acc'])
    model.summary()
    return model


# In[19]:

model = get_model()
file_path = "cnn_res_deep_ptbdb.h5"
checkpoint = ModelCheckpoint(file_path,
                             monitor='val_acc',
                             verbose=1,
                             save_best_only=True,
                             mode='max')
early = EarlyStopping(monitor="val_acc", mode="max", patience=5, verbose=1)
redonplat = ReduceLROnPlateau(monitor="val_acc",
                              mode="max",
                              patience=3,
                              verbose=2)
callbacks_list = [checkpoint, early, redonplat]  # early

model.fit(X,
          Y,
          epochs=1000,
          verbose=2,
          callbacks=callbacks_list,
          validation_split=0.1)
model.load_weights(file_path)

# ### Evaluation
Exemple #8
0
y_in = Input(shape=(256, 1))
y_err = Input(shape=(256, 1))
vae = VariationalAutoEncoder(256, 1024, 16)

optimizer = tf.keras.optimizers.Adam(clipvalue=0.5)

vae.compile(optimizer, loss=chi2(y_err))

# vae.fit(x_train, x_train, epochs=3, batch_size=64)

training_time_stamp = datetime.datetime.now(
    tz=pytz.timezone('Europe/London')).strftime("%Y-%m-%d_%H-%M-%S")

CB = EarlyStopping(monitor='val_loss',
                   min_delta=5e-5,
                   patience=100,
                   verbose=1,
                   mode='auto')
MC = ModelCheckpoint(
    '../../model_weights/model_{}.h5'.format(training_time_stamp),
    monitor='val_loss',
    mode="auto",
    save_best_only=True,
    verbose=1)
history = vae.fit_generator(training_generator,
                            epochs=8000,
                            verbose=2,
                            callbacks=[MC, CB],
                            validation_data=validation_generator)

np.savetxt("training_history/loss_history-{}.txt".format(training_time_stamp),
Exemple #9
0
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=(3,3), padding = 'SAME'))
model.add(Dropout(0.2))
model.add(Conv2D(32, kernel_size=(3,3), padding = 'SAME'))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(32, activation= 'relu'))
model.add(Dense(32, activation= 'relu'))
model.add(Dense(10, activation= 'softmax'))
model.summary()
model.save('../data/h5/k52_1_model1.h5')#모델저장

#3. 컴파일, 훈련
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
early_stopping = EarlyStopping(monitor = 'loss', patience = 5, mode = 'auto')
modelpath = '../data/modelCheckpoint/k52_1_MCK.h5_{epoch:02d}-{val_loss:.4f}.hdf5'   
# k45_mnist_37_0100(0.0100).hdf5
cp = ModelCheckpoint(filepath= modelpath , monitor='val_loss', save_best_only=True, mode = 'auto')
#filepath='(경로)' : 가중치를 세이브 해주는 루트
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['acc'])
model.fit(x_train, y_train, epochs = 5, batch_size = 64, validation_split=0.2, verbose = 1 ,callbacks = [early_stopping, cp])

model.save('../data/h5/k52_1_model2.h5')
model.save_weights('../data/h5/k52_1_weight.h5')

#4. 평가, 예측
loss = model.evaluate(x_test, y_test, batch_size=8)
print('loss : ', loss)

y_pred = model.predict(x_test[:10])
Exemple #10
0
from __future__ import print_function
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import utils
from tensorflow.keras.callbacks import ReduceLROnPlateau, CSVLogger, EarlyStopping
from tensorflow.keras import backend

import numpy as np
from ML.models import ResNet_SNN
from ML.data import cifar

lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                               cooldown=0,
                               patience=5,
                               min_lr=0.5e-6)
early_stopper = EarlyStopping(min_delta=0.001, patience=10)
csv_logger = CSVLogger('resnet18SNN_cifar10.csv')

batch_size = 32
nb_classes = 10
nb_epoch = 200
data_augmentation = True

# input image dimensions
img_rows, img_cols = 32, 32
# the cifar10 images are RGB
img_channels = 3

# the data, shuffled and split between train and test sets
dataloader = cifar.CifarLoader('../../data/cifar-10-batches-py/')
X_train, y_train = dataloader.load_train()
              metrics=['categorical_accuracy'])

# generate callback to save best model w.r.t val_categorical_accuracy
if use_vgg:
    file_name = "vgg"
else:
    file_name = "dense"
checkpointer = ModelCheckpoint(
    "../data/models/" + file_name + "_ms_transfer_alternative_init." +
    "{epoch:02d}-{val_categorical_accuracy:.3f}." + "hdf5",
    monitor='val_categorical_accuracy',
    verbose=1,
    save_best_only=True,
    mode='max')
earlystopper = EarlyStopping(monitor='val_categorical_accuracy',
                             patience=10,
                             mode='max',
                             restore_best_weights=True)
history = model.fit_generator(train_generator,
                              steps_per_epoch=1000,
                              epochs=10000,
                              callbacks=[checkpointer, earlystopper],
                              validation_data=validation_generator,
                              validation_steps=500)
initial_epoch = len(history.history['loss']) + 1

# at this point, the top layers are well trained and we can start fine-tuning
# convolutional layers. We will freeze the bottom N layers
# and train the remaining top layers.

# let's visualize layer names and layer indices to see how many layers
# we should freeze:
Exemple #12
0
for fold, (train_index, valid_index) in enumerate(kfold.split(f_train, la_tr)):
    print("{}train {}th fold{}".format('==' * 20, fold + 1, '==' * 20))
    y_ = to_categorical(la_tr, num_classes=19)
    model = MCNN_model_v2(feat_lens, y_.shape[1])

    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['acc'])
    model.summary()
    plateau = ReduceLROnPlateau(monitor="val_acc",
                                verbose=1,
                                mode='max',
                                factor=0.5,
                                patience=20)
    early_stopping = EarlyStopping(monitor='val_acc',
                                   verbose=1,
                                   mode='max',
                                   patience=30)
    checkpoint = ModelCheckpoint(f'models/fold{fold}_mlp.h5',
                                 monitor='val_acc',
                                 verbose=0,
                                 mode='max',
                                 save_best_only=True)

    csv_logger = CSVLogger('../logs/log.csv', separator=',', append=True)
    history = model.fit(
        [f_train[train_index] for f_train in f_trains],
        y_[train_index],
        epochs=500,
        batch_size=128,
        verbose=1,
        shuffle=True,
Exemple #13
0
def precision_m(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision


def f1_m(y_true, y_pred):
    precision = precision_m(y_true, y_pred)
    recall = recall_m(y_true, y_pred)
    return 2 * ((precision * recall) / (precision + recall + K.epsilon()))


#3 Compile, train
es = EarlyStopping(monitor='val_loss', patience=20, mode='min')
lr = ReduceLROnPlateau(monitor='val_loss', factor=0.3, patience=10, mode='min')
path = '../Project01_data/9.cp/find_cocacola_{val_loss:.4f}.hdf5'
cp = ModelCheckpoint(path, monitor='val_loss', mode='min', save_best_only=True)

model.compile(optimizer='adam',
              metrics=['acc', f1_m],
              loss='binary_crossentropy')
hist = model.fit_generator(
    train_generator, steps_per_epoch=len(x_train) // batch, epochs=100, \
        validation_data=valid_generator, callbacks=[es, lr, cp])

#4 Evaluate, Predict

loss, acc, f1_score = model.evaluate(test_generator)
print("loss : ", loss)
output_folder = 'models'
if not os.path.exists(output_folder):
    os.makedirs(output_folder)

model.load_weights(os.path.join(output_folder, model_name + '.hdf5'))

# training parameters
batch_size = 128
nb_epoch = 400

# callbacks
checkpointer = ModelCheckpoint(filepath=os.path.join(output_folder,
                                                     model_name + '.hdf5'),
                               save_best_only=True)
early_stopping = EarlyStopping(patience=10)
tensorboard = TensorBoard()

# training loop
model.fit(
    RotNetDataGenerator(train_filenames,
                        input_shape=input_shape,
                        batch_size=batch_size,
                        one_hot=False,
                        preprocess_func=preprocess_input,
                        crop_center=True,
                        crop_largest_rect=True,
                        shuffle=True),
    steps_per_epoch=len(train_filenames) / batch_size,
    epochs=nb_epoch,
    validation_data=RotNetDataGenerator(test_filenames,
Exemple #15
0
        metric_list = [SparseCategoricalAccuracy()]
        output_shape = 2
        train_labels = np.reshape(train_labels, (-1))
        val_labels = np.reshape(val_labels, (-1))

    # Load model
    model = mb.model_selector(model_name, input_shape, output_shape,
                              model_arguments)

    callbacks = None
    if stpc_type.lower() == 'epoch' or stpc_type.lower() == 'epochs':
        callbacks = None
    elif stpc_type.lower() == "earlystopping" or stpc_type.lower(
    ) == "early_stopping":
        arguments = cgf['TRAIN']['stopping_criteria']['arguments']
        es = EarlyStopping(**arguments)
        callbacks = [es]
        for key, value in arguments.items():
            print('\t{}: {}'.format(key, value))
    else:
        print('Unknown stopping criteria')
        callbacks = None
    print('')

    #model = tf.keras.models.load_model('model/300/best_model_resnet.h5')
    model.load_weights('model/302/best_model_resnet.h5',
                       by_name=True,
                       skip_mismatch=True)

    opt = tf.keras.optimizers.Adam()
    # Compile model
Exemple #16
0
        X_train_cont, X_cal_cont, X_test_cont = get_continuous_arrays(
            list_df=[train, cal, test], config=config)
        Y_train, Y_cal, Y_test = get_targets_arrays(list_df=[train, cal, test],
                                                    config=config)

        X_train_cont, X_val_cont, Y_train, Y_val = train_test_split(
            X_train_cont, Y_train, test_size=0.1, random_state=1337)

        checkpoint = ModelCheckpoint("mlp.h5",
                                     monitor="val_loss",
                                     verbose=1,
                                     save_best_only=True,
                                     mode="min")
        early = EarlyStopping(monitor="val_loss",
                              mode="min",
                              patience=20,
                              verbose=1)
        redonplat = ReduceLROnPlateau(monitor="val_loss",
                                      mode="min",
                                      patience=10,
                                      verbose=2)
        callbacks_list = [checkpoint, early, redonplat]

        # normalized conformal prediction for multi
        print("Normalized conformal prediction for MULTI NN %s" % i)

        start_multi = time.time()

        model, model_repr = two_model_mlp(
            n_continuous=n_cont,
            n_outputs=n_out,
def main():
    my_ds = xr.open_mfdataset('/lambda_stor/data/rjackson/coverage_product/*.nc')
    labels = my_ds['time'].values
    labels = np.array([get_label(dt64_to_dt(x)) for x in labels])
    print(my_ds.variables.keys()) 
    time = my_ds['time'].values
    range_bins = my_ds['range_bins'].values
    feature_list = ['snrgt3.000000', 'snrgt5.000000', 'snrgt10.000000']

    
    x = np.concatenate([my_ds[x].values for x in feature_list], axis=1)
    feature_labels = []
    for feat in feature_list:
        for i in range(len(my_ds.range_bins.values)):
            feature_labels.append('%s%d' % (feat, my_ds.range_bins[i]))

    valid = np.where(labels > -1)[0]
    x = x[valid, :]
    labels = labels[valid]
    time = time[valid]
    pct_clear = len(np.argwhere(labels == 0))/len(labels)
    # Since dataset is biased to clear values, remove half of clear values
    where_clear = np.argwhere(labels == 0)
    for inds in where_clear:
        if np.random.random(1) > pct_clear:
            labels[inds] = -1

    valid = np.where(labels > -1)[0]
    x = x[valid, :]
    labels = labels[valid]
    time = time[valid]

    x = np.where(np.isfinite(x), x, 0)
    x = scale(x)
    np.savez('snrscp.npz', x=x, labels=labels, time=time, range_bins=range_bins)
    x_train, x_test, y_train, y_test = train_test_split(x, labels, test_size=0.20)
    
    epoch_no = 1
    #y_train = tf.one_hot(y_train, depth=4).numpy()
    #y_test = tf.one_hot(y_test, depth=4).numpy()
    y = labels
    #train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
    #test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
    model = nn_classifier(x_train, num_layers=12, num_channel_start=64)
    model.summary()
    model.compile(optimizer=Adam(lr=0.0001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
    checkpointer = ModelCheckpoint(
        filepath=('/homes/rjackson/arming_the_edge/models/nnclassifier-{epoch:03d}.hdf5'),
        verbose=1)
    early_stop = EarlyStopping(patience=200)
    model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=3000, callbacks=[checkpointer, early_stop], initial_epoch=epoch_no)

    
    #history = pd.DataFrame(dicts['history'])
    

    #fname = 'train'
    #model = 'model_nonrf'
    #for feat in feature_list:
    #    fname += feat
    #    model += feat
    #fname += 'optimized_nonrf.csv'
    #model += '.json'
    #with open(fname, 'w') as f:
    #    f.write('Features used: ')
    #    for feat in feature_list:
    #        f.write('%s  ' % feat)
    #    f.write('\n')
    #    res.to_csv(f)
    
    #num_rounds = len(res['test-merror-mean'].values) 
    #bst = xgb.train(params, dtrain, num_boost_round=num_rounds, 
    #                callbacks=[xgb.callback.reset_learning_rate(lr)],
    #                evals=[(dtest, 'test')])

    #bst.save_model(model)
    #y_predict = bst.predict(dtest)
    #y_all_predict = bst.predict(dall)
    #y_train_predict = bst.predict(dtrain)
    y_all_predict = model.predict(x)
    y_predict = model.predict(x_test)
    y_train_predict = model.predict(x_train)

    predicted_labels_df = xr.Dataset({'label_true': labels, 'label_pred':
                                      y_all_predict,
                                      'label_train_pred': y_train_predict,
                                      'label_test_pred': y_predict,
                                      'label_train': y_train,
                                      'label_test': y_test})
    predicted_labels_df.to_netcdf('classification_nnn.nc')
    print("Accuracy score for test set: %f" % accuracy_score(y_test, y_predict))
    
    my_ds.close()
train_datagen = DataGen_curt(params, train_data, nsteps, nclone_factor,
                             npartial_mult)
val_datagen = CloneGen_curt(params, val_data, fufis=True)

model, just_trnsf, just_unet = unet_sep(params,
                                        weight_ccpf=weight_ccpf,
                                        lr=params['lr'])

print('Loading weights!!')
#weights_name = dnnfolder + '/weights/decrypt_weights.hdf5'
weights_name = dnnfolder + '/weights/changes_adam_rotatingclonegen.hdf5'
model.load_weights(weights_name)
weights_name_next = dnnfolder + '/weights/changes_adam_rotatingclonegen2.hdf5'

callbacks = [
    EarlyStopping(monitor='loss', patience=25, verbose=1, min_delta=1e-9),
    ReduceLROnPlateau(monitor='loss',
                      factor=0.1,
                      patience=10,
                      verbose=1,
                      min_delta=1e-9),
    ModelCheckpoint(monitor='val_loss',
                    mode='min',
                    filepath=weights_name_next,
                    save_best_only=True,
                    save_weights_only=True,
                    verbose=1),
    CSVLogger(logs_folder + '/hist_' +
              weights_name_next.split('/')[-1].split('.')[0] + '.csv')
]
# %%time
# module_url = "https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1"
# bert_layer = hub.KerasLayer(module_url, trainable=True)

vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case)

train_input = bert_encode(data.text.values, tokenizer, max_len=160)
test_input = bert_encode(data1.text.values, tokenizer, max_len=160)
train_labels = data.user_suggestion.values

model = build_model(bert_layer, max_len=160)
model.summary()

es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=100)

train_history = model.fit(
    train_input, train_labels,
    validation_split=0.2,
    callbacks=[es], # Early stopping
    epochs=3,
    batch_size=16
)

model.save('model.h5')

test_pred = model.predict(test_input)

test_pred
Exemple #20
0
#2. 모델링
model = Sequential()
model.add(Conv1D(filters=10, kernel_size=(5), input_shape=(32 * 32, 3)))
model.add(MaxPool1D(pool_size=(3)))
model.add(Flatten())
model.add(Dense(1))

#3. 컴파일, 훈련
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
'''
EarlyStopping
'''
from tensorflow.keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(
    monitor='loss', patience=20, mode='auto'
)  #loss값이 가장낮을 때를 10번 지나갈 때 까지 기다렸다가 stop. mode는 min, max, auto조정가능
model.fit(x_test,
          y_test,
          epochs=10,
          batch_size=1,
          validation_data=(x_val, y_val),
          callbacks=[early_stopping])

#4. 평가, 예측
loss, mae = model.evaluate(x_test, y_test)
print("loss : ", loss)
print("mae : ", mae)

y_predict = model.predict(x_test)
#print(y_predict)
Exemple #21
0
           input_shape=(30, 1, 1)))
model.add(MaxPooling2D(pool_size=1))
model.add(Dense(4, activation='relu'))
model.add(Flatten())
model.add(Dense(11))
model.add(Dense(11))
model.add(Dense(3))
model.add(Dense(1, activation='sigmoid'))  #마지막에만 sigmoid를 준다

#3. 컴파일, 훈련
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint

modelpath6 = '../data/modelCheckpoint/k46_6_mnist_{epoch:02d}-{val_loss:.4f}.hdf5'
#02d 정수로 두번째 자리 까지, 4f 실수로 4번째 자리까지
#따라서 0.01이면 02d: 01, 4f : 0100이된다. k45_mnist_0100.hdf5
es = EarlyStopping(monitor='val_loss', patience=10)
cp = ModelCheckpoint(filepath=modelpath6,
                     monitor='val_loss',
                     save_best_only=True,
                     mode='auto')
#ModelCheckpoint는 최저점이 생길 때마다 filepath(파일형태)로 기록한다.
#파일형태에 weight 값을 기록하기 위해 사용한다. 최적의 weight(val_loss가장낮음)

model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
####loss가 이진 분류일 때는binary_crossentropy(0,1만 추출)
hist = model.fit(x_train,
                 y_train,
                 epochs=20,
                 batch_size=16,
                 verbose=1,
                 validation_split=0.2,
def lstm_prediction():
    """
    start the burn in with a year's data - 9 month and 3 month split
    retrain model every 10 days
    """
    #    df_ftr = pd.read_csv(r'../data/ftr_tgt.csv')
    df_ftr = read_cleaned_data()
    lookback = 60
    target = 'target'
    df_ftr = df_ftr.dropna()
    # df_ftr['date'] = pd.to_datetime(df_ftr['date'])
    # df_ftr = df_ftr.set_index('date')
    # df_ftr = df_ftr.resample('3Min').last()
    # df_ftr[target] = df_ftr[target]+1
    df_burn = df_ftr.loc[df_ftr.index.year == 2018]
    train_period = 9
    test_period = 11
    n_period = 12
    df_train = df_burn.loc[df_burn.index.month <= train_period]
    df_val = df_burn.loc[((df_burn.index.month > train_period) &
                          (df_burn.index.month <= test_period))]
    df_test = df_burn.loc[((df_burn.index.month > test_period) &
                           (df_burn.index.month <= n_period))]

    ftr_cols = [x for x in df_train.columns if x != 'target']
    print(f'train data shape is {df_train.shape}')
    print(f'validation data shape is {df_val.shape}')

    mm_scaler = preprocessing.MinMaxScaler()
    X_train = mm_scaler.fit_transform(df_train[ftr_cols])
    df_check = pd.DataFrame(X_train)
    X_val = mm_scaler.transform(df_val[ftr_cols])
    X_test = mm_scaler.transform(df_test[ftr_cols])
    df_check = pd.DataFrame(X_val)
    X_train_lstm = []
    y_train_lstm = []
    for i in range(lookback, len(X_train), 3):
        X_train_lstm.append(X_train[i - lookback:i, :])
        y_train_lstm.append(df_train[target].iloc[i])

    X_train_lstm, y_train_lstm = np.array(X_train_lstm), np.array(y_train_lstm)
    X_val = np.concatenate((X_train, X_val), axis=0)
    X_test = np.concatenate((X_val, X_test), axis=0)
    X_val_lstm = []
    y_val_lstm = []
    X_test_lstm = []
    y_test_lstm = []
    for i in range(len(X_train), len(X_val), 3):
        X_val_lstm.append(X_val[i - lookback:i, :])
        y_val_lstm.append(df_val[target].iloc[i - len(X_train)])

    for i in range(len(X_val), len(X_test), 3):
        X_test_lstm.append(X_test[i - lookback:i, :])
        y_test_lstm.append(df_test[target].iloc[i - len(X_test)])

    X_val_lstm, y_val_lstm = np.array(X_val_lstm), np.array(y_val_lstm)
    X_test_lstm, y_test_lstm = np.array(X_test_lstm), np.array(y_test_lstm)

    mdl_nm = 'LSTM'
    model = create_lstm_model(n_lookback=X_train_lstm.shape[1],
                              n_features=X_train_lstm.shape[2],
                              lr=0.001)
    earlyStopping = EarlyStopping(monitor='val_accuracy',
                                  patience=300,
                                  verbose=0,
                                  mode='max',
                                  min_delta=0.001)
    mcp_save = ModelCheckpoint(
        f'../models/{mdl_nm}_train_split{train_period}vs{n_period - train_period}.h5',
        save_best_only=True,
        monitor='val_accuracy',
        mode='max')
    # reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='min')
    csv_logger = CSVLogger(
        f'../models/{mdl_nm}_train_split{train_period}vs{n_period - train_period}.log'
    )

    callback_ls = [mcp_save, csv_logger, mcp_save]
    class_weight = {0: 20, 1: 80}
    print('start run model')
    history = model.fit(
        X_train_lstm,  # Features
        y_train_lstm,  # Target
        epochs=2000,  # Number of epochs
        verbose=1,  # No output
        batch_size=2**10,  # Number of observations per batch
        validation_data=(X_val_lstm, y_val_lstm),
        callbacks=callback_ls,
        class_weight=class_weight)  # Data for evaluation
    plot_model_loss(
        history,
        outfile=
        f'../models/{mdl_nm}_split{train_period}vs{n_period - train_period}')
    model = load_model(
        f'../models/{mdl_nm}_train_split{train_period}vs{n_period - train_period}.h5'
    )
    train_pred_y = model.predict(X_train_lstm)
    # test_pred_y = model.predict(X_test_lstm)
    val_pred_y = model.predict(X_val_lstm)
    test_pred_y = model.predict(X_test_lstm)
    #
    #
    df_train_pred = pd.DataFrame(y_train_lstm)
    df_train_pred.columns = ['target']
    df_train_pred['pred_prob'] = train_pred_y
    plot_roc_curve(df_train_pred, target, 'train')
    prob_cut = Find_Optimal_Cutoff(df_train_pred[target],
                                   df_train_pred['pred_prob'])
    df_train_pred['pred_bi'] = (df_train_pred['pred_prob'] > prob_cut[0])
    confusion_matrix(df_train_pred[target], df_train_pred['pred_bi'])
    print(
        classification_report(df_train_pred[target], df_train_pred['pred_bi']))

    df_val_pred = pd.DataFrame(y_val_lstm)
    df_val_pred.columns = ['target']
    df_val_pred['pred_prob'] = val_pred_y
    plot_roc_curve(df_val_pred, target, 'val')
    prob_cut = Find_Optimal_Cutoff(df_val_pred[target],
                                   df_val_pred['pred_prob'])
    df_val_pred['pred_bi'] = (df_val_pred['pred_prob'] > prob_cut[0])
    confusion_matrix(df_val_pred[target], df_val_pred['pred_bi'])
    print(classification_report(df_val_pred[target], df_val_pred['pred_bi']))

    df_test_pred = pd.DataFrame(y_test_lstm)
    df_test_pred.columns = ['target']
    df_test_pred['pred_prob'] = test_pred_y
    plot_roc_curve(df_test_pred, target, 'test')
    prob_cut = Find_Optimal_Cutoff(df_test_pred[target],
                                   df_test_pred['pred_prob'])
    df_test_pred['pred_bi'] = (df_test_pred['pred_prob'] > prob_cut[0])
    confusion_matrix(df_test_pred[target], df_test_pred['pred_bi'])
    print(classification_report(df_test_pred[target], df_test_pred['pred_bi']))
    # df_val_pred = df_val_pred.reset_index(drop=True)
    # plt.figure()
    # plt.scatter(df_val_pred.index, df_val_pred[target], s=5, marker='.')
    # plt.savefig(cfg.local_projfolder + 'temp/visualization/plot.png')
    clear_session()
    del model
    gc.collect()
Exemple #23
0
x_train = sequence.pad_sequences(x_train, maxlen=max_length)
x_test = sequence.pad_sequences(x_test, maxlen=max_length)

# skip-gram with negative sampling data preprocessing

# skip-gram with negative sampling model
input_x = Input(batch_shape=(None, max_length))
target_x = Input(batch_shape=(None, max_length))

Embed_input = Embedding(max_features, embedding_dim)(input_x)
Embed_target = Embedding(max_features, embedding_dim)(target_x)

Concat = Concatenate(axis=1)([Embed_input, Embed_target])

Output_x = Dense(1, activation='sigmoid')(Concat)
Output_x_reshaped = Reshape((-1, 1))(Output_x)
model_skipgram = Model([input_x, target_x], Output_x_reshaped)
model_skipgram.compile(loss='binary_crossentropy', optimizer='adam')

earlystopping = EarlyStopping(patience=5)
model_skipgram.fit([x_train, x_train],
                   y_train,
                   validation_data=([x_test, x_test], y_test),
                   callbacks=[earlystopping],
                   epochs=100,
                   batch_size=100)

# save parameters
model_skipgram.save(
    'c:/Users/soohan/MC_python_study/python_nlp/skipgram_model.h5')
Exemple #24
0
model6.add(Conv1D(32, 5, activation='relu'))
model6.add(MaxPooling1D(3))
model6.add(Conv1D(32, 5, activation='relu'))
model6.add(GlobalMaxPooling1D())
model6.add(Dense(1))

model6.compile(optimizer=RMSprop(), loss='mae', metrics=['mae'])
print(model6.summary())

# Train
#######
m2_callbacks = [
    # interrupt training when there is no more improvement.
    # patience=2 means interrupt training when accuracy has stopped improving
    # for more than 2 epochs. mae MUST be in the compile step in the metrics
    EarlyStopping(monitor='mae', patience=2),
    # saves the current weights after every epoch
    # only overwrite the model file when val_loss has improved
    ModelCheckpoint('weather__v6.h5', monitor='val_loss', save_best_only=True)
]
history6 = model6.fit(train_gen,
                      steps_per_epoch=500,
                      epochs=20,
                      validation_data=val_gen,
                      callbacks=m2_callbacks,
                      validation_steps=val_steps)
metrics_df = pd.DataFrame(history6.history)
metrics_df.to_csv('history6.csv', index=False)

# Save
######
Exemple #25
0
        encode_cate([content[1] for content in train_data], cat_to_id))
    data_test = encode_sentences([content[0] for content in test_data],
                                 word_to_id)
    label_test = to_categorical(
        encode_cate([content[1] for content in test_data], cat_to_id))

    data_train = sequence.pad_sequences(data_train, maxlen=args.max_len)
    data_test = sequence.pad_sequences(data_test, maxlen=args.max_len)

    model = TextCNNLSTM(args.max_len, args.max_features,
                        args.embedding_size).build_model()
    model.compile('adam', 'categorical_crossentropy', metrics=['accuracy'])

    logger.info('开始训练...')
    callbacks = [
        ModelCheckpoint('./model.h5', verbose=1),
        EarlyStopping(monitor='val_accuracy', patience=2, mode='max')
    ]

    history = model.fit(data_train,
                        label_train,
                        batch_size=args.batch_size,
                        epochs=args.epochs,
                        callbacks=callbacks,
                        validation_data=(data_test, label_test))

    model.summary()
    label_pre = model.predict(data_test)
    pred_argmax = label_pre.argmax(-1)
    label_test = label_test.argmax(-1)
    print(evaluate(label_test, pred_argmax, categories))
Exemple #26
0
           input_shape=(28, 28, 1)))  # 28x28x1 -> 24x24x16

model.add(MaxPooling2D(pool_size=(2, 2)))  # 24x24x16 -> 12x12x16

model.add(
    Conv2D(64,
           kernel_size=(5, 5),
           activation='relu',
           kernel_initializer='he_normal'))  # 12x12x16 -> 8x8x64

model.add(MaxPooling2D(pool_size=(2, 2)))  # 8x8x64 -> 4x4x64

model.add(Flatten())  # 4x4x64-> 1024
model.add(Dense(10, activation='softmax'))  # 1024 -> 10

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer='adam',
              metrics=['accuracy'])

# 作成したモデルの確認
# SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))

early_stopping = EarlyStopping(patience=1, verbose=1)
model.fit(x=x_train,
          y=y_train,
          batch_size=128,
          epochs=100,
          verbose=1,
          validation_data=(x_test, y_test),
          callbacks=[early_stopping])
model.add(Dropout(0.2))
model.add(MaxPooling1D(2))
model.add(Conv1D(300, 3))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(200, activation='sigmoid'))
model.add(Dropout(0.2))
model.add(Dense(120, activation='sigmoid'))
model.add(Dense(60, activation='sigmoid'))
model.add(Dense(30, activation='sigmoid'))
model.add(Dense(1, activation='sigmoid'))

# 3. 컴파일, 훈련
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
early_stopping = EarlyStopping(monitor='val_loss', patience=30, mode='auto')
modelpath= '../data/modelcheckpoint/k54_conv1d_cancer_checkpoint.hdf5'
cp = ModelCheckpoint(modelpath, monitor='val_loss', save_best_only=True, mode='auto')
model.fit(x_train, y_train, epochs=400, validation_split=0.2, callbacks=[early_stopping, cp], batch_size=16)

# 4. 평가, 예측
loss, acc = model.evaluate(x_test, y_test, batch_size=16)

print("loss : ", loss)
print("acc : ", acc)


# 결과치 나오게 코딩할것 0또는 1로

# loss= 0.046165917068719864 
# acc =0.9912280440330505
print(x_pred)

# model structure
model = Sequential()
model.add(LSTM(150, input_shape=(3,7), activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(30, activation='relu'))
model.add(Dense(15, activation='relu'))
model.add(Dense(3))

model.summary()

# compile
model.compile(loss='mse', optimizer='adam')

# EarlyStopping
from tensorflow.keras.callbacks import EarlyStopping
es = EarlyStopping(monitor = 'loss', patience = 30, mode='min')

# fit
hist = model.fit(x_train, y_train, epochs= 300, batch_size= 3, verbose = 1,
                 validation_split=0.2, callbacks = [es])

# evaluate
evaluate = model.evaluate(x_test, y_test, batch_size=3, verbose=2)
print(evaluate)

# predict
y_pred = model.predict(x_pred)
print(y_pred)
Exemple #29
0
sess = tf.compat.v1.Session(config=config)
tf.compat.v1.keras.backend.set_session(sess)

##--------------------------- SETTING AREA ------------------------------##
loading = instant_data()
df, mode = loading.hourly_instant(), 'hour'
# df,mode = loading.daily_instant(),'day'
if mode == 'hour': n_past, n_future = 24 * 6, 72
elif mode == 'day': n_past, n_future = 60, 30
st = 'CPY012'
target, start_p, stop_p, host_path = station_sel(st, mode)
DLtype = '06_Lv1wave_cAonly'
#------------ DL PARAMETER ---------------------#
callback_early_stopping = EarlyStopping(monitor='val_loss',
                                        patience=5,
                                        verbose=2)
reduce_lr = tf.keras.callbacks.LearningRateScheduler(lambda x: 1e-5 * 0.90**x)
callbacks = [callback_early_stopping, reduce_lr]
#my_optimizer = SGD(lr=0.01, decay=0, momentum=0.9, nesterov=True)

#--------------------------- 4 Yr Edit -----------------------------------#
# host_path = './CPY012/4Yr_flood/'
# start_p='2014-10-01'
# stop_p='2017-10-01'
split_date = '2016-11-01'
#n_pca = 4

syn = ''
# Yscale = False # scaler Y before put in model
allscale = True  # scale X before put in model
Exemple #30
0
def main(_argv):
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    for physical_device in physical_devices:
        tf.config.experimental.set_memory_growth(physical_device, True)
    if FLAGS.training_source == 'weight':
        model = YoloV3Tiny(classes=FLAGS.num_classes, training=True)
        model_pretrained = YoloV3Tiny(FLAGS.size,
                                      training=True,
                                      classes=FLAGS.num_classes)
        model_pretrained.load_weights(weigths_path)
        model.get_layer("yolo_darknet").set_weigths(
            model_pretrained.get_layer("yolo_darknet").get_weigths())

    elif FLAGS.training_source == 'model':
        model = tf.keras.models.load_model(FLAGS.source_path, compile=False)

    anchors = yolo_tiny_anchors
    anchor_masks = yolo_tiny_anchor_masks
    #model.load_weights(weights_path).expect_partial()
    model.summary()

    train_dataset = dataset.load_fake_dataset()
    classes_names = [c.strip() for c in open(FLAGS.classes).readlines()]
    train_dataset = dataset.load_tfrecord_dataset(FLAGS.datasets,
                                                  FLAGS.classes, FLAGS.size)
    train_dataset = train_dataset.shuffle(buffer_size=512)
    train_dataset = train_dataset.batch(FLAGS.batch_size)
    train_dataset = train_dataset.map(lambda x, y: (
        dataset.transform_images(x, FLAGS.size),
        dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))
    train_dataset = train_dataset.prefetch(
        buffer_size=tf.data.experimental.AUTOTUNE)

    val_dataset = dataset.load_fake_dataset()
    val_dataset = val_dataset.batch(FLAGS.batch_size)
    val_dataset = val_dataset.map(lambda x, y: (
        dataset.transform_images(x, FLAGS.size),
        dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))

    optimizer = tf.keras.optimizers.Adam(lr=FLAGS.learning_rate)
    loss = [
        YoloLoss(anchors[mask], classes=FLAGS.num_classes)
        for mask in anchor_masks
    ]

    model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
    callbacks = [
        ReduceLROnPlateau(verbose=1),
        EarlyStopping(patience=5, verbose=1),
        ModelCheckpoint('./checkpoints/yolov3_train_test_' + FLAGS.method +
                        '.h5',
                        verbose=1,
                        save_best_only=True),
        TensorBoard(log_dir='logs')
    ]

    history = model.fit(train_dataset,
                        epochs=FLAGS.epochs,
                        callbacks=callbacks,
                        validation_data=val_dataset)
    """