예제 #1
0
def main():
    # Amount of test data
    TEST_PERCENT = 0.2

    benign, malignant = create_base_data('Xdata.npy','ydata.npy')
    X, y = create_subset_of_data(malignant, benign,2000)
    X = X.astype('float32')/255.0
    print(X.shape)
    print(y.shape)
    x_train, x_test, y_train, y_test = train_test_split(X,y,test_size=TEST_PERCENT,random_state=2)

    custom_resnet_model = get_cnn_model()
    custom_resnet_model.fit(x_train,y_train,batch_size = 32,epochs=12,shuffle=True,validation_split=0.1)            

    scores = custom_resnet_model.evaluate(x_test,y_test)
    print("%s: %.2f%%" % (custom_resnet_model.metrics_names[1], scores[1] * 100))

    # Saves model along with weights
    save_model(custom_resnet_model,'testCnnModel1')
    '''
    tf.keras.models.save_model(custom_resnet_model, 'testCnnModel1.h5')
    converter = tf.lite.TFLiteConverter.from_keras_model_file('testCnnModel1.h5')
    tflite_model = converter.convert()
    open("converted_model1.tflite","wb").write(tflite_model)
    '''
    predictions = custom_resnet_model.predict(x_test)
    plot_roc(predictions, y_test)
예제 #2
0
from keras.callbacks import ReduceLROnPlateau

import models
from utils import get_images

train_dir = 'data'
x_train, y_train = get_images(train_dir)

num_train_samples = len(x_train)

img_width = img_height = 32
num_color_channels = 3

print('Number of examples: {}'.format(num_train_samples))
assert num_train_samples > 32

model = models.get_cnn_model(img_width, img_height, num_color_channels)
print(model.summary())

reduce_lr = ReduceLROnPlateau(monitor='mean_absolute_error',
                              factor=0.5,
                              patience=5,
                              min_lr=0.001,
                              verbose=1)

model.fit(x_train, y_train, batch_size=32, epochs=80, callbacks=[reduce_lr])

model.save('model.h5')
예제 #3
0
def train(X_train, y_train, X_test, y_test, target_function):
    # trying with CNN model first
    model = get_cnn_model(num_amino_acids, max_sequence_size, max_num_functions, target_function)

    # moving to LSTM
    # model = get_lstm_model(num_amino_acids, max_sequence_size, max_num_functions)

    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    # if target_function != '':
    #     loss_fn = 'categorical_crossentropy'
    # else:
    #     loss_fn = 'binary_crossentropy'

    loss_fn = 'binary_crossentropy'
    model.compile(loss=loss_fn,
                  optimizer='adam',
                  metrics=['accuracy', 'recall', 'precision', 'fbeta_score'])

    save_model_metatdata(model, batch_size, nb_epoch)

    visualization_filename = "../results/"+exp_id+"-learning.png"
    logging.info("Metrics plot file: %s" % visualization_filename)
    cb_vis_hist = LossHistory(visualization_filename, model_snapshot_directory, exp_id)
    # best_fbeta_score = 0.00

    for epoch in range(nb_epoch):
        logging.info('')
        logging.info("----------")
        logging.info("Epoch %d/%d" % (epoch+1, nb_epoch))
        logging.info("----------")
        logging.info('')
        # csv_logger = CSVLogger('training.log')
        # if file exists, load it, otherwise train


        # checkpoint = ModelCheckpoint(weights_model_filename, monitor='val_loss',
        #                                 verbose=0, save_best_only=False,
        #                                 save_weights_only=True,
        #                                 mode='auto', period=1)

        if target_function != '':
            class_weights = None # {0 : 1., 1: 100.}
        else:
            class_weights = None

        hist = model.fit(X_train, y_train,
                  batch_size = batch_size,
                  nb_epoch = 1,
                  callbacks = [cb_vis_hist],
                  validation_data = (X_test, y_test),
                  class_weight = class_weights,
                  verbose=1)

        # FIXME: best_fbeta_score = from hist
        best_fbeta_score = -0.0

        # dump these metrics as our calculation is different
        # metrics_line = 'TRAIN: '
        # for s in ['loss', 'acc', 'precision', 'recall', 'fbeta_score']:
        #     metrics_line += "%s: %.5f - " %(s, (hist.history[s])[-1])
        # logging.info(metrics_line)


        # no need to validate since we're doing it manually
        # eval_log = predict_and_eval(model, X_train, y_train, X_test, y_test)
        # cb_vis_hist.on_epoch_end(epoch, eval_log)

        # save best fbeta_score
        # new_fbeta_score = eval_log['val_fbeta_score']
        # if new_fbeta_score > best_fbeta_score:
        #     best_fbeta_score = new_fbeta_score
        #     # also save the best model
        #     model_save_filename = model_snapshot_directory + '/'+ exp_id + \
        #                                 '-saved-model.h5'
        #     model.save(model_save_filename)
        #     logging.info("-- saved best model with f measure: %.5f on epoch: %d" % (best_fbeta_score, epoch))

    return (model, best_fbeta_score)
예제 #4
0
from keras import backend as keras_backend
from representation import *
import models
import h5py

keras_backend.set_image_dim_ordering('th')

train_dir = os.path.join('data', 'train')
x_train, y_train = get_images(train_dir)

validation_dir = os.path.join('data', 'validation')
x_validation, y_validation = get_images(validation_dir)

num_train_samples = len(x_train)
num_validation_samples = len(x_validation)

image_width = image_height = 32

num_color_channels = 1

print('num_train_samples', num_train_samples)
print('num_validation_samples', num_validation_samples)

model = models.get_cnn_model(num_color_channels, image_width, image_height)
print(model.summary())

model.fit(x_train, y_train, batch_size=32, epochs=300)

model.save('clock.h5')
예제 #5
0
 def test_save_model(self):
     test_model = get_cnn_model()
     test_model_name = 'test_model'
     modelUtils.save_model(test_model, test_model_name)
     self.assertTrue(os.path.isfile(test_model_name + ".h5"))
     self.assertTrue(os.path.isfile(test_model_name + ".json"))
예제 #6
0
#### GET MODEL CONFUSION MATRICES ####

# NN training parameters
EPOCHS_CNN = 10
BATCH_SIZE_CNN = 32

EPOCHS_LSTM = 50
BATCH_SIZE_LSTM = 32

### Initialize models. ###

# CNN
model_cnn = models.get_cnn_model(
    **model_params.get_params('cnn',
                              n_rows=segments[0].shape[0],
                              n_cols=segments[0].shape[1],
                              num_classes=np.unique(seg_target).size))
clf_cnn = KClassifier(model_cnn, EPOCHS_CNN, BATCH_SIZE_CNN)
clf_cnn._estimator_type = 'classifier'

# LSTM
model_lstm = models.get_lstm_model(
    **model_params.get_params('lstm',
                              n_rows=segments[0].shape[0],
                              n_cols=segments[0].shape[1],
                              num_classes=np.unique(seg_target).size))
clf_lstm = KClassifier(model_lstm, EPOCHS_LSTM, BATCH_SIZE_LSTM)
clf_lstm._estimator_type = 'classifier'

# RF
예제 #7
0
 def test_get_cnn_model(self):
     model = models.get_cnn_model()
     self.assertIsNotNone(model)
예제 #8
0
def train_and_predict():
    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)
    imgs_train, imgs_mask_train = load_train_data()

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]

    print('-' * 30)
    print('Creating and compiling CNN model...')
    print('-' * 30)
    model = models.get_cnn_model(img_rows, img_cols)

    print('-' * 30)
    print('Loading saved weights into CNN model...')
    print('-' * 30)
    model.load_weights('weights_cnn.hdf5')

    print('-' * 30)
    print('Predicting masks on test data using CNN model...')
    print('-' * 30)
    imgs_cnn_mask = model.predict(imgs_train, verbose=1)

    print('-' * 30)
    print('Creating and compiling iterative model...')
    print('-' * 30)
    model = models.get_iterative_model(img_rows, img_cols)

    print('-' * 30)
    print('Begin training...')
    print('-' * 30)
    callbacks = [
        EarlyStopping(monitor='loss', patience=5, verbose=0),
        ModelCheckpoint('weights_iter.hdf5',
                        monitor='loss',
                        save_best_only=True)
    ]
    model.fit([imgs_cnn_mask, imgs_train],
              imgs_mask_train,
              batch_size=4,
              nb_epoch=100,
              verbose=1,
              shuffle=True,
              callbacks=callbacks)

    print('-' * 30)
    print('Loading and preprocessing test data...')
    print('-' * 30)
    imgs_test = load_test_data()
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('-' * 30)
    print('Loading saved weights...')
    print('-' * 30)
    model.load_weights('weights_iter.hdf5')

    print('-' * 30)
    print('Predicting masks on test data using iterative model...')
    print('-' * 30)
    imgs_mask_test = np.load('imgs_mask_test.npy')
    imgs_mask_iter_test = model.predict([imgs_mask_test, imgs_test], verbose=1)
    np.save('imgs_mask_iter_test.npy', imgs_mask_iter_test)