def main():

    file_path = ".model_weights.hdf5"
    callbacks = get_callbacks(filepath=file_path, patience=10)

    print("Load data...")
    #Load the data.
    train = pd.read_json("../data/train.json")
    #test = pd.read_json("../data/test.json")
    print("Data loading complete")

    print("Feed raw data into data pipeline...")
    all_X_train = data_pipeline(train)
    all_Y_train = train.loc[:, 'is_iceberg']
    print("Data pipeline operations should be complete")

    print("carve data into train/dev/test sets")
    # high iteration training/testing so carve out a final validation block
    # which will be scored 10 times max
    # keep the seed stable so we're not inadvertently using all of the data/overfitting
    X_train_work, X_test, y_train_work, y_test = train_test_split(
        all_X_train, all_Y_train, random_state=317, train_size=0.75)

    # now do the actual split for the train/dev sets
    X_train, X_dev, y_train, y_dev = train_test_split(X_train_work,
                                                      y_train_work,
                                                      train_size=0.80)
    print("data carving completed")

    print("attempt to augment data")
    X_train, y_train = augment_data(X_train, y_train)
    print("data augmentation complete")

    # epochs for model
    epochs = 150
    learning_rate = 0.0001
    lr_decay = 0.5e-5
    batch_size = 128
    drop_out = 0.35

    print("create Keras model")
    gmodel = getModel(learning_rate=learning_rate,
                      lr_decay=lr_decay,
                      drop_out=drop_out)
    print("fit Keras NN")
    hist = gmodel.fit(X_train,
                      y_train,
                      batch_size=batch_size,
                      epochs=epochs,
                      verbose=1,
                      validation_data=(X_dev, y_dev),
                      callbacks=callbacks)

    print("\n\n\n\nModel fit completed")

    print("plot model error/accuracy curves")
    plot_hist(hist, epochs, learning_rate, batch_size, drop_out, lr_decay)

    print("score model")
    score_model(gmodel, file_path, X_train, y_train, X_dev, y_dev)
예제 #2
0
def main():
    _run = True

    count = 0

    while _run is True:
        try:

            count += 1

            file_path = ".{:}_indigo_model_weights.hdf5".format(count)
            callbacks = get_callbacks(filepath=file_path)

            print("Load data...")
            # Load the data.
            train = pd.read_json("../data/train.json")
            # test = pd.read_json("../data/test.json")
            print("Data loading complete")

            print("Feed raw data into data pipeline...")
            all_X_pics, standardized_params = data_pipeline(train)
            # all_Y_train = train.loc[:,'is_iceberg']

            # figure out extra X features from training data
            inc_angle = pd.to_numeric(train.loc[:, "inc_angle"],
                                      errors="coerce")
            inc_angle[np.isnan(inc_angle)] = inc_angle.mean()
            # inc_angle = np.array(inc_angle, dtype=np.float32)

            # TODO: enable this?
            inc_angle, inc_std_params = standardize(inc_angle)

            # because there used to be a column for inc_angle isnan, but that seems
            # to cause issues as that only occurs in training data but not test data

            # Get labels
            all_Y_labels = train.loc[:, "is_iceberg"]

            print("Data pipeline operations should be complete")

            print("carve data into train/dev/test sets")
            # high iteration training/testing so carve out a final validation block
            # which will be scored 10 times max
            # keep the seed stable so we're not inadvertently using all of the data/overfitting

            # keys: "X_images_train", "inc_angle_train", "y_train"
            #       "X_images_dev", "inc_angle_dev", y_dev"
            #       "X_images_test", "inc_angle_test", y_test"
            data_dict = train_test_dev_split(
                (all_X_pics, inc_angle, all_Y_labels))

            print("X_images_train shape:", data_dict["X_images_train"].shape)
            print("inc_angle_train shape:", data_dict["inc_angle_train"].shape)
            print("y_train shape:", data_dict["y_train"].shape)

            print("X_images_dev shape:", data_dict["X_images_dev"].shape)
            print("inc_angle_dev shape:", data_dict["inc_angle_dev"].shape)
            print("y_dev shape:", data_dict["y_dev"].shape)

            print("X_images_test shape:", data_dict["X_images_test"].shape)
            print("inc_angle_test shape:", data_dict["inc_angle_test"].shape)
            print("y_test shape:", data_dict["y_test"].shape)

            print("data carving completed")

            print("attempt to augment data")
            # X_train_pics, X_train_nonpics, y_train = augment_data(X_train_pics, X_train_nonpics, y_train)
            data_dict["X_images_train"], data_amp = data_augmentation(
                data_dict["X_images_train"], ud=True, rotate90=True)
            data_dict["inc_angle_train"] = amplify_data(
                data_dict["inc_angle_train"], data_amp)
            data_dict["y_train"] = amplify_data(data_dict["y_train"], data_amp)

            # random shuffle the arrays because currently it's first half original
            # second half mirror. This might cause some weirdness in training?
            p = np.random.permutation(data_dict["X_images_train"].shape[0])

            print("shuffle augmented data")
            # now shuffly augmented data:
            data_dict["X_images_train"][p]
            data_dict["inc_angle_train"][p]
            data_dict["y_train"][p]
            # return double_X_train_images[p], double_X_train_nonimages[p], double_y_train[p]

            print("data augmentation complete")

            # epochs for model
            epochs = 100

            # aiming for ~0.001 - 0.0001
            _exp = (np.random.uniform(-5.5, -3.0))
            learning_rate = 4**_exp

            # aiming for ~0.0001 - 0.000001
            _exp = (np.random.uniform(-8.5, -6.5))
            lr_decay = 4.0**_exp
            # learning_rate = 0.0001
            # lr_decay = 5e-6

            batches = [16, 32, 48, 64, 96, 128]
            batch_size = batches[np.random.randint(0, len(batches) - 1)]

            drop_out = np.random.uniform(0.05, 0.6)
            # batch_size = 32
            # drop_out = 0.275

            print("create Keras model")
            # icy_model = tiny_icy_model((75, 75, 3), drop_out)
            _model = gmodel2(learning_rate, drop_out)

            mypotim = Adam(lr=learning_rate,
                           beta_1=0.9,
                           beta_2=0.999,
                           epsilon=1e-08,
                           decay=lr_decay)

            _model.compile(loss='binary_crossentropy',
                           optimizer=mypotim,
                           metrics=['accuracy'])

            # _model.summary()

            print("fit Keras NN")
            time.sleep(5.0)
            print("Launching ~ ~ ~ >>-----<>")

            hist = _model.fit(
                [data_dict["X_images_train"], data_dict["inc_angle_train"]],
                data_dict["y_train"],
                batch_size=batch_size,
                epochs=epochs,
                verbose=1,
                validation_data=([
                    data_dict["X_images_dev"], data_dict["inc_angle_dev"]
                ], data_dict["y_dev"]),
                callbacks=callbacks)

            print("\n\n\nModel fit completed")

            print("plot model error/accuracy curves")
            plot_hist(hist, epochs, learning_rate, batch_size, drop_out,
                      lr_decay)

            print("score model")
            score_test = new_score_model(_model, file_path, data_dict)

            if score_test is not None:
                df_test = pd.read_json('../data/test.json')
                test_pics, _ = data_pipeline(df_test, standardized_params)

                test_inc_angle = pd.to_numeric(df_test.loc[:, "inc_angle"],
                                               errors="coerce")
                test_inc_angle[np.isnan(
                    test_inc_angle)] = test_inc_angle.mean()
                # inc_angle = np.array(inc_angle, dtype=np.float32)

                # TODO: enable this?
                # has the (mean, std) from standardizing inc_angle earlier
                test_inc_angle, _ = standardize(test_inc_angle, inc_std_params)

                # because there used to be a column for inc_angle isnan, but that seems
                # to cause issues as that only occurs in training data but not test data

                pred_test = _model.predict([test_pics, test_inc_angle])

                submission = pd.DataFrame({
                    'id':
                    df_test["id"],
                    'is_iceberg':
                    pred_test.reshape((pred_test.shape[0]))
                })
                print(submission.head(10))

                file_name = '{:1.4f}_cnn.csv'.format(score_test)
                submission.to_csv(file_name, index=False)

        except ValueError:
            print(ValueError)
예제 #3
0
def main():

    file_path = ".model_weights.hdf5"
    callbacks = get_callbacks(filepath=file_path, patience=10)

    print("Load data...")
    # Load the data.
    train = pd.read_json("../data/train.json")
    # test = pd.read_json("../data/test.json")
    print("Data loading complete")

    print("Feed raw data into data pipeline...")
    all_X_pics = data_pipeline(train)
    # all_Y_train = train.loc[:,'is_iceberg']

    # figure out extra X features from training data
    inc_angle = pd.to_numeric(train.loc[:, "inc_angle"], errors="coerce")
    inc_angle[np.isnan(inc_angle)] = inc_angle.mean()
    # inc_angle = np.array(inc_angle, dtype=np.float32)

    # inc_angle = standardize(inc_angle)

    # print("inc_angle type: ", type(inc_angle))
    # #
    # inc_angle = tf.convert_to_tensor(inc_angle, np.float32)

    # because there used to be a column for inc_angle isnan, but that seems
    # to cause issues as that only occurs in training data but not test data
    all_X_nonpics = inc_angle

    # Get labels
    all_Y_labels = train.loc[:, "is_iceberg"]

    # make X data linspace so that we can use train_test_split on that and then use
    # indices to get the slices that we need
    x_indices = np.arange(all_X_pics.shape[0])

    print("all x pics shape:", all_X_pics.shape)
    print("all x nonpics shape:", all_X_nonpics.shape)
    print("shape of x_indices:", x_indices.shape)
    print("shape of y labels:", all_Y_labels.shape)

    print("Data pipeline operations should be complete")

    print("carve data into train/dev/test sets")
    # high iteration training/testing so carve out a final validation block
    # which will be scored 10 times max
    # keep the seed stable so we're not inadvertently using all of the data/overfitting
    X_train_work_indices, X_test_indices, y_train_work, y_test = train_test_split(x_indices, all_Y_labels, random_state=317, train_size=0.85)

    # figure out what the train slices are
    # these slices are work in progress as they will be sliced again
    X_train_work_pics = all_X_pics[X_train_work_indices]
    X_train_work_nonpics = all_X_nonpics[X_train_work_indices]

    # figure out the test holdout slices
    X_test_pics = all_X_pics[X_test_indices]
    X_test_nonpics = all_X_nonpics[X_test_indices]

    # make new linspace to get sliced
    x_indices = np.arange(X_train_work_pics.shape[0])

    # now do the actual split for the train/dev sets
    X_train_indices, X_dev_indices, y_train, y_dev = train_test_split(x_indices, y_train_work, train_size=0.80, random_state=12018)

    X_train_pics = X_train_work_pics[X_train_indices]
    X_train_nonpics = X_train_work_nonpics[X_train_indices]

    X_dev_pics = X_train_work_pics[X_dev_indices]
    X_dev_nonpics = X_train_work_nonpics[X_dev_indices]

    print("X_train_images shape:", X_train_pics.shape)
    print("X_train_non_images shape:", X_train_nonpics.shape)
    print("y_Train shape:", y_train.shape)

    print("data carving completed")

    print("attempt to augment data")
    X_train_pics, X_train_nonpics, y_train = augment_data(X_train_pics, X_train_nonpics, y_train)
    print("data augmentation complete")

    # epochs for model
    epochs = 50
    learning_rate = 0.001
    lr_decay = 1e-6
    batch_size = 32
    drop_out = 0.20

    print("create Keras model")
    # icy_model = tiny_icy_model((75, 75, 3), drop_out)
    _model = gmodel2(learning_rate, lr_decay, drop_out)

    mypotim = Adam(lr=learning_rate,
                   beta_1=0.9,
                   beta_2=0.999,
                   epsilon=1e-08,
                   decay=lr_decay)

    _model.compile(loss='binary_crossentropy',
                   optimizer=mypotim,
                   metrics=['accuracy'])

    # optimo = Adam(lr=learning_rate, decay=lr_decay)
    #
    # _model.compile(optimizer=optimo, loss="binary_crossentropy", metrics=["accuracy"])

    _model.summary()

    # gmodel = getModel(learning_rate=learning_rate, lr_decay=lr_decay, drop_out=drop_out)
    print("fit Keras NN")

    # hist = icy_model.fit([X_train_pics, X_train_nonpics], y_train,
    #             batch_size=batch_size,
    #             epochs=epochs,
    #             verbose=1,
    #             validation_data=([X_dev_pics, X_dev_nonpics], y_dev),
    #             callbacks=callbacks)

    # hist = gmodel.fit(X_train, y_train,
    #             batch_size=batch_size,
    #             epochs=epochs,
    #             verbose=1,
    #             validation_data=(X_dev, y_dev),
    #             callbacks=callbacks)

    hist = _model.fit([X_train_pics, X_train_nonpics], y_train,
                      batch_size=batch_size,
                      epochs=epochs,
                      verbose=1,
                      validation_data=([X_dev_pics, X_dev_nonpics], y_dev),
                      callbacks=callbacks)

    print("\n\n\nModel fit completed")

    print("plot model error/accuracy curves")
    plot_hist(hist, epochs, learning_rate, batch_size, drop_out, lr_decay)

    print("score model")
    score_model(_model, file_path, [X_train_pics, X_train_nonpics], y_train,
                                   [X_dev_pics, X_dev_nonpics], y_dev)
예제 #4
0
def main():
    _path = "../data/.indigo_1499_model_weights"
    _model = gmodel2()
    _model.load_weights(filepath=_path)

    mypotim = Adam(lr=0.001,
                   beta_1=0.9,
                   beta_2=0.999,
                   epsilon=1e-08,
                   decay=1e-6)

    _model.compile(loss='binary_crossentropy',
                   optimizer=mypotim,
                   metrics=['accuracy'])

    print("Load data...")
    # Load the data.
    train = pd.read_json("../data/train.json")
    # test = pd.read_json("../data/test.json")
    print("Data loading complete")

    print("Feed raw data into data pipeline...")
    all_X_pics, standardized_params = data_pipeline(train, special_c3=False)
    # all_Y_train = train.loc[:,'is_iceberg']

    # figure out extra X features from training data
    inc_angle = pd.to_numeric(train.loc[:, "inc_angle"], errors="coerce")

    inc_mean = inc_angle.mean()

    inc_angle[np.isnan(inc_angle)] = inc_mean
    # inc_angle = np.array(inc_angle, dtype=np.float32)

    # TODO: enable this?
    # inc_angle, inc_std_params = standardize(inc_angle)

    # because there used to be a column for inc_angle isnan, but that seems
    # to cause issues as that only occurs in training data but not test data

    # Get labels
    all_Y_labels = train.loc[:, "is_iceberg"]

    print("Data pipeline operations should be complete")

    print("carve data into train/dev/test sets")
    # high iteration training/testing so carve out a final validation block
    # which will be scored 10 times max
    # keep the seed stable so we're not inadvertently using all of the data/overfitting

    # keys: "X_images_train", "inc_angle_train", "y_train"
    #       "X_images_dev", "inc_angle_dev", y_dev"
    #       "X_images_test", "inc_angle_test", y_test"
    data_dict = train_test_dev_split((all_X_pics, inc_angle, all_Y_labels))

    print("X_images_train shape:", data_dict["X_images_train"].shape)
    print("inc_angle_train shape:", data_dict["inc_angle_train"].shape)
    print("y_train shape:", data_dict["y_train"].shape)

    print("X_images_dev shape:", data_dict["X_images_dev"].shape)
    print("inc_angle_dev shape:", data_dict["inc_angle_dev"].shape)
    print("y_dev shape:", data_dict["y_dev"].shape)

    print("X_images_test shape:", data_dict["X_images_test"].shape)
    print("inc_angle_test shape:", data_dict["inc_angle_test"].shape)
    print("y_test shape:", data_dict["y_test"].shape)

    print("data carving completed")

    print("score train set:")
    score = _model.evaluate(
        [data_dict["X_images_train"], data_dict["inc_angle_train"]],
        data_dict["y_train"],
        verbose=1)
    print("\n")
    print('Train score: {:2.2f}'.format(score[0]))
    print('Train accuracy: {:2.2f}%'.format(score[1] * 100.0))

    print("score dev set:")
    score = _model.evaluate(
        [data_dict["X_images_dev"], data_dict["inc_angle_dev"]],
        data_dict["y_dev"],
        verbose=1)
    print("\n")
    print('Dev score: {:2.2f}'.format(score[0]))
    print('Dev accuracy: {:2.2f}%'.format(score[1] * 100.0))

    print("score test set:")
    score = _model.evaluate(
        [data_dict["X_images_test"], data_dict["inc_angle_test"]],
        data_dict["y_test"],
        verbose=1)
    print("\n")
    print('Test score: {:2.2f}'.format(score[0]))
    print('Test accuracy: {:2.2f}%'.format(score[1] * 100.0))

    df_test = pd.read_json('../data/test.json')

    print("Push through data pipeline...", standardized_params)
    test_pics, _ = data_pipeline(df_test,
                                 standardized_params,
                                 special_c3=False)

    # print("pipeline inc_angle", inc_std_params)
    test_inc_angle = pd.to_numeric(df_test.loc[:, "inc_angle"],
                                   errors="coerce")

    # fill the holes with the mean of the previous mean from training
    test_inc_angle[np.isnan(test_inc_angle)] = inc_mean
    # inc_angle = np.array(inc_angle, dtype=np.float32)

    # TODO: enable this?
    # has the (mean, std) from standardizing inc_angle earlier
    # test_inc_angle, _ = standardize(test_inc_angle, inc_std_params)

    print("test_inc_angle", test_inc_angle[:10])

    # because there used to be a column for inc_angle isnan, but that seems
    # to cause issues as that only occurs in training data but not test data

    print("Pipeline complete")
    print("\n")
    print("Predict on transformed data")

    # df_test.inc_angle = df_test.inc_angle.replace('na',0)
    # Xtest = (get_scaled_imgs(df_test))
    # Xinc = df_test.inc_angle
    # pred_test = model.predict([Xtest,Xinc])

    # pred_test = _model.predict([data_dict["X_images_train"], data_dict["inc_angle_train"]], verbose=1)
    pred_test = _model.predict([test_pics, test_inc_angle], verbose=1)

    # print("preds: ", pred_test[:10])

    submission = pd.DataFrame({
        'id':
        df_test["id"],
        'is_iceberg':
        pred_test.reshape((pred_test.shape[0]))
    })
    print(submission.head(10))