Пример #1
0
ds_train = ds_train.map(scale,
                        num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(128)
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)

model = KerasModel(input_shape=(28, 28, 1))

ds_test = ds_test.map(scale, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.batch(128)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)

logdir = "logs/" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)

model.compile(
    tf.keras.optimizers.Adam(0.001),
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])

model.fit(ds_train,
          epochs=6,
          validation_data=ds_test,
          batch_size=50,
          verbose=True,
          callbacks=[tensorboard_callback])

model.summary()
Пример #2
0
print("train_x", train_x.shape, train_x.dtype)

input_shape = (img_width, img_height, img_num_channels)

logdir = "logs/" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)

model = KerasModel(input_shape)
# tf.keras.utils.plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
model.compile(
    loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    optimizer=keras.optimizers.Adam(0.001),
    metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])

# Fit data to model
history = model.fit(train_x,
                    train_y,
                    callbacks=[tensorboard_callback],
                    batch_size=50,
                    epochs=6,
                    verbose=True,
                    validation_split=0.2)
model.summary()
score = model.evaluate(test_x, test_y, verbose=0)

model.save('saved_model/model')

probabilities = model.predict(test_x, verbose=True)
print(probabilities)
Пример #3
0
    if model_to_run == "KERAS_CNN":
        # ################################################ KERAS MODEL ################################################### #
        # These file paths are specified so that model parameters can be saved after training
        model_name_json_path = os.path.join(os.path.abspath(__file__), '..',
                                            'data', 'Keras_best_model.json')
        model_name_h5_path = os.path.join(os.path.abspath(__file__), '..',
                                          'data', 'Keras_best_model.h5')

        y_train = train_labels.numpy()
        X_train = data_loaders_original["train"].dataset.imgs
        X_test = data_loaders_original["test"].dataset.imgs

        keras_model = KerasModel(model_name_json_path=model_name_json_path,
                                 model_name_h5_path=model_name_h5_path,
                                 X=X_train)
        keras_model.fit(X=X_train, y=y_train)

        y_pred = keras_model.predict(X_train)
        tr_error = np.mean(y_pred != y_train[:, None])
        print(f"Keras Model Training Error is: {tr_error}")
        test_labels = keras_model.predict(X_test)
        save_results_in_csv(test_labels)

    elif model_to_run == "TRANSFER_LEARNING":
        ### load Resnet152 pre-trained model
        model_conv = torchvision.models.resnet152(pretrained=True)

        model = TransferLearningModel(model_conv)

        #Train the model with pre-trained Resnet
        print("Training model...")
Пример #4
0
bst3.fit(X_train, y_train)
# ------------------------------------------------------------------
from keras.callbacks import Callback as keras_clb
random.seed(666)
np.random.seed(666)


class LearningRateClb(keras_clb):
    def on_epoch_end(self, epoch, logs={}):
        if epoch == 300:
            self.model.optimizer.lr.set_value(0.01)


bst4 = KerasModel(cols_k2, 600)
bst4.fit_process(X_train_nn, y_train)
bst4.fit(X_train_nn, y_train, callbacks=[LearningRateClb()])
# ------------------------------------------------------------------
bst5 = LogisticRegression()
bst5.fit(X_train_reg, y_train)
# ------------------------------------------------------------------
params = {
    'silent': 1,
    'objective': 'binary:logistic',
    'max_depth': 3,
    'eta': 0.01,
    'subsample': 0.65,
    'colsample_bytree': 0.3,
    'min_child_weight': 5,
    'n': 1140,
}
Пример #5
0
def get_real_data():
    #df = util.load_data_to_dataframe('dataset/val_test_split.json')
    #unseen_test = create_features(df)
    #unseen_test.to_csv('cache/val_test_split.csv', index=False)
    unseen_test = filterout_mac_features(pd.read_csv('cache/val_test_split.csv'))
    train_feats, train_labels, _ = get_feats_labels_ids(unseen_test)
    return train_feats, train_labels

X, Y, X_test, Y_test = get_data()

# PART 2 FIT MODEL

model = KerasModel()

model.fit(X, Y)
    
print("predicting on kfold validation")

# PART 5 EVALUATE ON UNSEEN
X_real, Y_real = get_real_data()

real_predict = model.predict(X_real)
print(f"Average f1s on unseen: {f1_score(Y_real, real_predict, average='micro')}")

# PART 6 PREPARE SUBMISSION
def get_data_for_submitting():
    #df_test = util.load_data_to_dataframe('dataset/test.json')
    #prepared_df = create_features(df_test)
    #prepared_df.to_csv('cache/test.csv', index=False)
    prepared_df = filterout_mac_features(pd.read_csv('cache/test.csv'))