示例#1
0
    optimizer = args.optimizer

    # initialize numpy, random, TensorFlow, and keras
    np.random.seed(random_seed)
    rn.seed(random_seed)
    tf.random.set_seed(random_seed)
    if gpu_id >= 0:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    else:
        os.environ["CUDA_VISIBLE_DEVICES"] = ''

    # load dataset after scaling
    print("Loading UJIIndoorLoc data ...")

    ujiindoorloc = UJIIndoorLoc(path='../data/ujiindoorloc',
                                frac=frac,
                                preprocessor=preprocessor)
    _, training_data, _, _ = ujiindoorloc.load_data()

    # build DAE model
    print("Buidling DAE model ...")
    model = deep_autoencoder(training_data.rss_scaled,
                             preprocessor=preprocessor,
                             hidden_layers=hidden_layers,
                             cache=cache,
                             model_fname=None,
                             optimizer=optimizer,
                             batch_size=batch_size,
                             epochs=epochs,
                             validation_split=validation_split)
    print(model.summary())
    ### initialize numpy, random, TensorFlow, and keras
    np.random.seed(random_seed)
    rn.seed(random_seed)
    tf.set_random_seed(random_seed)
    if gpu_id >= 0:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    else:
        os.environ["CUDA_VISIBLE_DEVICES"] = ''
    sess = tf.Session(graph=tf.get_default_graph(),
                      config=session_conf)  # for reproducibility
    K.set_session(sess)

    ### load dataset after scaling
    ujiindoorloc = UJIIndoorLoc(data_path,
                                frac=frac,
                                scale=True,
                                classification_mode='hierarchical')
    rss, labels = ujiindoorloc.load_data()

    # create, train, and evaluate a model for multi-class classification of a building
    bld_model = siso_classifier(input_dim=rss.shape[1],
                                input_name='bld_input',
                                output_dim=labels.building.shape[1],
                                output_name='bld_output',
                                base_model=None,
                                hidden_layers=building_hidden_layers,
                                optimizer=optimizer,
                                dropout=dropout)
    startTime = timer()
    bld_history = bld_model.fit(x=rss,
                                y=labels.building,
def siso_regression_uji(gpu_id: int, dataset: str, frac: float,
                        validation_split: float, preprocessor: str,
                        batch_size: int, epochs: int, optimizer: str,
                        dropout: float, corruption_level: float,
                        dae_hidden_layers: list, sdae_hidden_layers: list,
                        cache: bool, regression_hidden_layers: list,
                        verbose: int):
    """Multi-floor indoor localization based on three-dimensional regression of
    location coordinates using a single-input and single-output (SISO) deep
    neural network (DNN) model and UJIIndoorLoc datasets.

    Keyword arguments:

    """

    ### initialize numpy, random, TensorFlow, and keras
    np.random.seed()  # based on current time or OS-specific randomness source
    rn.seed()  #  "
    tf.set_random_seed(rn.randint(0, 1000000))
    if gpu_id >= 0:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    else:
        os.environ["CUDA_VISIBLE_DEVICES"] = ''
    sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
    K.set_session(sess)

    ### load datasets after scaling
    print("Loading data ...")
    if dataset == 'uji':
        from ujiindoorloc import UJIIndoorLoc
        uji = UJIIndoorLoc(
            cache=cache,
            frac=frac,
            preprocessor=preprocessor,
            classification_mode='hierarchical',
        )
    else:
        print("'{0}' is not a supported data set.".format(dataset))
        sys.exit(0)
    flr_height = uji.floor_height
    training_df = uji.training_df
    training_data = uji.training_data
    testing_df = uji.testing_df
    testing_data = uji.testing_data

    ### build and train a SIMO model
    print(
        "Building and training a SISO model for three-dimensional regression ..."
    )
    rss = training_data.rss_scaled
    coord = training_data.coord_3d_scaled
    coord_scaler = training_data.coord_3d_scaler  # for inverse transform
    labels = training_data.labels
    input = Input(shape=(rss.shape[1], ), name='input')  # common input

    # (optional) build deep autoencoder or stacked denoising autoencoder
    if dae_hidden_layers != '':
        print("- Building a DAE model ...")
        model = deep_autoencoder(dataset=dataset,
                                 input_data=rss,
                                 preprocessor=preprocessor,
                                 hidden_layers=dae_hidden_layers,
                                 cache=cache,
                                 model_fname=None,
                                 optimizer=optimizer,
                                 batch_size=batch_size,
                                 epochs=epochs,
                                 validation_split=validation_split)
        x = model(input)
    elif sdae_hidden_layers != '':
        print("- Building an SDAE model ...")
        model = sdae(dataset=dataset,
                     input_data=rss,
                     preprocessor=preprocessor,
                     hidden_layers=sdae_hidden_layers,
                     cache=cache,
                     model_fname=None,
                     optimizer=optimizer,
                     corruption_level=corruption_level,
                     batch_size=batch_size,
                     epochs=epochs,
                     validation_split=validation_split)
        x = model(input)
    else:
        x = input

    # regression hidden layers
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Dropout(dropout)(x)
    if regression_hidden_layers != '':
        for units in regression_hidden_layers:
            x = Dense(units)(x)
            x = BatchNormalization()(x)
            x = Activation('relu')(x)
            x = Dropout(dropout)(x)

    # coordinates regression output
    x = Dense(coord.shape[1], kernel_initializer='normal')(x)
    x = BatchNormalization()(x)
    coordinates_output = Activation('linear', name='coordinates_output')(
        x)  # 'linear' activation

    model = Model(inputs=input, outputs=coordinates_output)
    model.compile(optimizer=optimizer,
                  loss='mean_squared_error',
                  metrics=['mean_squared_error'])
    weights_file = os.path.expanduser("~/tmp/best_weights.h5")
    checkpoint = ModelCheckpoint(weights_file,
                                 monitor='val_loss',
                                 save_best_only=True,
                                 verbose=0)
    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0,
                               patience=10,
                               verbose=0)

    print("- Training a coordinates regressor ...", end='')
    startTime = timer()
    history = model.fit(x={'input': rss},
                        y={'coordinates_output': coord},
                        batch_size=batch_size,
                        epochs=epochs,
                        verbose=verbose,
                        callbacks=[checkpoint, early_stop],
                        validation_split=validation_split,
                        shuffle=True)
    elapsedTime = timer() - startTime
    print(" completed in {0:.4e} s".format(elapsedTime))
    model.load_weights(weights_file)  # load weights from the best model

    ### evaluate the model
    print("Evaluating the model ...")
    rss = testing_data.rss_scaled
    labels = testing_data.labels
    flrs = labels.floor
    coord = testing_data.coord_3d  # original coordinates

    # calculate the classification accuracies and localization errors
    coords_scaled_pred = model.predict(rss, batch_size=batch_size)
    coord_est = coord_scaler.inverse_transform(
        coords_scaled_pred)  # inverse-scaling
    tmp = np.maximum(np.minimum(coord_est[:, 2], 4 * uji.floor_height),
                     0)  # clamping to [0, 4*uji.floor_height]
    flrs_pred = np.floor(
        tmp / uji.floor_height + 0.5
    )  # floor number (0..4); N.B. round() behavior in Python 3 has been changed,so we cannot use it.
    flr_results = (np.equal(np.argmax(flrs, axis=1), flrs_pred)).astype(int)
    flr_acc = flr_results.mean()

    # calculate 2D localization errors
    dist_2d = norm(coord - coord_est, axis=1)
    mean_error_2d = dist_2d.mean()
    median_error_2d = np.median(dist_2d)

    # calculate 3D localization errors
    flr_diff = np.absolute(np.argmax(flrs, axis=1) - flrs_pred)
    z_diff_squared = (flr_height**2) * np.square(flr_diff)
    dist_3d = np.sqrt(
        np.sum(np.square(coord - coord_est), axis=1) + z_diff_squared)
    mean_error_3d = dist_3d.mean()
    median_error_3d = np.median(dist_3d)

    LocalizationResults = namedtuple('LocalizationResults', [
        'flr_acc', 'mean_error_2d', 'median_error_2d', 'mean_error_3d',
        'median_error_3d', 'elapsedTime'
    ])
    return LocalizationResults(flr_acc=flr_acc,
                               mean_error_2d=mean_error_2d,
                               median_error_2d=median_error_2d,
                               mean_error_3d=mean_error_3d,
                               median_error_3d=median_error_3d,
                               elapsedTime=elapsedTime)
示例#4
0
    dataset = args.dataset
    batch_size = args.batch_size
    epochs = args.epochs
    validation_split = args.validation_split
    hidden_layers = [int(i) for i in (args.hidden_layers).split(',')]
    cache = not args.no_cache
    frac = args.frac
    preprocessor = args.preprocessor
    optimizer = args.optimizer
    corruption_level = args.corruption_level

    print("Loading  data ...")
    if dataset == 'uji':
        from ujiindoorloc import UJIIndoorLoc
        ds = UJIIndoorLoc(cache=cache,
                          frac=frac,
                          preprocessor=preprocessor,
                          classification_mode='hierarchical')
    elif dataset == 'tut':
        from tut import TUT
        ds = TUT(cache=cache,
                 frac=frac,
                 preprocessor=preprocessor,
                 classification_mode='hierarchical',
                 grid_size=0)
    elif dataset == 'tut2':
        from tut import TUT2
        ds = TUT2(cache=cache,
                  frac=frac,
                  preprocessor=preprocessor,
                  classification_mode='hierarchical',
                  grid_size=0,
        utm_scaler = StandardScaler()
    elif preprocessor == 'minmax_scaler':
        from sklearn.preprocessing import MinMaxScaler
        rss_scaler = MinMaxScaler()
        utm_scaler = MinMaxScaler()
    elif preprocessor == 'normalizer':
        from sklearn.preprocessing import Normalizer
        rss_scaler = Normalizer()
        utm_scaler = Normalizer()
    else:
        rss_scaler = None
        utm_scaler = None

    ujiindoorloc = UJIIndoorLoc(
        data_path,
        frac=frac,
        rss_scaler=rss_scaler,
        utm_scaler=utm_scaler,
        classification_mode='hierarchical')
    training_df, training_data, testing_df, testing_data = ujiindoorloc.load_data(
    )

    ### build and train a SIMO model
    print(
        "\nPart 2: buidling and training a SIMO model with adaptive loss weights ..."
    )
    rss = training_data.rss_scaled
    utm = training_data.utm_scaled
    labels = training_data.labels
    input = Input(shape=(rss.shape[1], ), name='input')  # common input
    tensorboard = TensorBoard(
        log_dir="logs/{}".format(time()), write_graph=True)
def simo_hybrid_uji(
        gpu_id: int,
        dataset: str,
        frac: float,
        validation_split: float,
        preprocessor: str,
        batch_size: int,
        epochs: int,
        optimizer: str,
        dropout: float,
        corruption_level: float,
        dae_hidden_layers: list,
        sdae_hidden_layers: list,
        cache: bool,
        common_hidden_layers: list,
        floor_hidden_layers: list,
        coordinates_hidden_layers: list,
        floor_weight: float,
        coordinates_weight: float,
        verbose: int
):
    """Multi-building and multi-floor indoor localization based on hybrid
    building/floor classification and coordinates regression using a
    single-input and multi-output (SIMO) deep neural network (DNN) model and
    UJIIndoorLoc datasets.

    Keyword arguments:
    """

    ### initialize numpy, random, TensorFlow, and keras
    np.random.seed()            # based on current time or OS-specific randomness source
    rn.seed()                   #  "
    tf.set_random_seed(rn.randint(0, 1000000))
    if gpu_id >= 0:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    else:
        os.environ["CUDA_VISIBLE_DEVICES"] = ''
    sess = tf.Session(
        graph=tf.get_default_graph(),
        config=session_conf)
    K.set_session(sess)

    ### load datasets after scaling
    print("Loading data ...")
    if dataset == 'uji':
        from ujiindoorloc import UJIIndoorLoc
        uji = UJIIndoorLoc(
            cache=cache,
            frac=frac,
            preprocessor=preprocessor,
            classification_mode='hierarchical')
    else:
        print("'{0}' is not a supported data set.".format(dataset))
        sys.exit(0)
    flr_height = uji.floor_height
    training_df = uji.training_df
    training_data = uji.training_data
    testing_df = uji.testing_df
    testing_data = uji.testing_data

    ### build and train a SIMO model
    print(
        "Building and training a SIMO model for hybrid classification and regression ..."
    )
    rss = training_data.rss_scaled
    coord = training_data.coord_scaled
    coord_scaler = training_data.coord_scaler  # for inverse transform
    labels = training_data.labels
    input = Input(shape=(rss.shape[1], ), name='input')  # common input

    # (optional) build deep autoencoder or stacked denoising autoencoder
    if dae_hidden_layers != '':
        print("- Building a DAE model ...")
        model = deep_autoencoder(
            dataset=dataset,
            input_data=rss,
            preprocessor=preprocessor,
            hidden_layers=dae_hidden_layers,
            cache=cache,
            model_fname=None,
            optimizer=optimizer,
            batch_size=batch_size,
            epochs=epochs,
            validation_split=validation_split)
        x = model(input)
    elif sdae_hidden_layers != '':
        print("- Building an SDAE model ...")
        model = sdae(
            dataset=dataset,
            input_data=rss,
            preprocessor=preprocessor,
            hidden_layers=sdae_hidden_layers,
            cache=cache,
            model_fname=None,
            optimizer=optimizer,
            corruption_level=corruption_level,
            batch_size=batch_size,
            epochs=epochs,
            validation_split=validation_split)
        x = model(input)
    else:
        x = input

    # common hidden layers
    # x = BatchNormalization()(x)
    # x = Activation('relu')(x)
    # x = Dropout(dropout)(x)
    # if common_hidden_layers != '':
    #     for units in common_hidden_layers:
    #         x = Dense(units)(x)
    #         x = BatchNormalization()(x)
    #         x = Activation('relu')(x)
    #         x = Dropout(dropout)(x)
    # common_hl_output = x

    # floor classification output
    # if floor_hidden_layers != '':
    #     for units in floor_hidden_layers:
    #         x = Dense(units)(x)
    #         x = BatchNormalization()(x)
    #         x = Activation('relu')(x)
    #         x = Dropout(dropout)(x)
    # x = Dense(labels.floor.shape[1])(x)
    # x = BatchNormalization()(x)
    # floor_output = Activation(
    #     'softmax', name='floor_output')(x)  # no dropout for an output layer
    #
    # x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)
    # x = Conv1D(filters=99, kernel_size=12, activation='relu')(x)
    #
    # x = MaxPooling1D(pool_size=5)(x)
    # x = Conv1D(filters=99, kernel_size=12, activation='relu')(x)
    #
    # x = MaxPooling1D(pool_size=5)(x)
    #
    # x = Conv1D(filters=99, kernel_size=12, activation='relu')(x)
    #
    # x = Dropout(dropout)(x)
    # x = Flatten()(x)
    #
    # x = Dense(labels.floor.shape[1])(x)
    # x = BatchNormalization()(x)
    # floor_output = Activation('softmax', name='floor_output')(x)
    # # coordinates regression output
    # x = common_hl_output
    # for units in coordinates_hidden_layers:
    #     x = Dense(units, kernel_initializer='normal')(x)
    #     x = BatchNormalization()(x)
    #     x = Activation('relu')(x)
    #     x = Dropout(dropout)(x)
    # x = Dense(coord.shape[1], kernel_initializer='normal')(x)
    # x = BatchNormalization()(x)
    # coordinates_output = Activation(
    #     'linear', name='coordinates_output')(x)  # 'linear' activation
    # x = common_hl_output
    # x = Lambda(lambda x:K.expand_dims(x,axis=-1))(x)
    # x = Conv1D(filters=99, kernel_size=12, activation='relu')(x)
    #
    # x = MaxPooling1D(pool_size=5)(x)
    # x = Conv1D(filters=99, kernel_size=12, activation='relu')(x)
    #
    # x = MaxPooling1D(pool_size=5)(x)
    # x = Conv1D(filters=99, kernel_size=12, activation='relu')(x)
    # x = Dropout(dropout)(x)
    # x = Flatten()(x)
    # x = Dense(coord.shape[1],kernel_initializer='normal')(x)
    # x = BatchNormalization()(x)
    # coordinates_output = Activation(
    #     'linear', name='coordinates_output')(x)



    # 1D_CNN by John

    x = Lambda(lambda x:K.expand_dims(x,axis=-1))(x)


    x = Conv1D(filters=99, kernel_size=22, activation='relu')(x)
    x = Dropout(dropout)(x)
    # x = Conv1D(filters=128, kernel_size=10, activation='relu')(x)

    # x = MaxPooling1D(pool_size=2)(x)
    x = Conv1D(filters=66, kernel_size=22, activation='relu')(x)

    # x = MaxPooling1D(pool_size=2)(x)

    x = Conv1D(filters=33, kernel_size=22, activation='relu')(x)
    x = MaxPooling1D(pool_size=2)(x)
    x = Flatten()(x)
    #1D_CNN by John
    #1DCNN by John
    n = x
    x = Dense(labels.floor.shape[1])(x)
    # x = BatchNormalization()(x)
    floor_output = Activation('softmax', name='floor_output')(x)

    common_hl_output = n
    x = common_hl_output
    x = Dense(coord.shape[1], kernel_initializer='normal')(x)
    # x = BatchNormalization()(x)
    coordinates_output = Activation(
        'linear', name='coordinates_output')(x)
    #1DCNN by John
    model = Model(
        inputs=input,
        outputs=[
            floor_output,
            coordinates_output
        ])
    model.compile(
        optimizer=optimizer,
        loss=[
            'categorical_crossentropy',
            'mean_squared_error'
        ],
        loss_weights={
            'floor_output': floor_weight,
            'coordinates_output': coordinates_weight
        },
        metrics={
            'floor_output': 'accuracy',
            'coordinates_output': 'mean_squared_error'
        })
    weights_file = os.path.expanduser("~/tmp/best_weights.h5")
    checkpoint = ModelCheckpoint(weights_file, monitor='val_loss', save_best_only=True, verbose=0)
    early_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0)

    print("- Training a hybrid floor classifier and coordinates regressor ...", end='')
    startTime = timer()
    history = model.fit(
        x={'input': rss},
        y={
            'floor_output': labels.floor,
            'coordinates_output': coord
        },
        batch_size=batch_size,
        epochs=epochs,
        verbose=verbose,
        callbacks=[checkpoint, early_stop],
        validation_split=validation_split,
        shuffle=True)
    elapsedTime = timer() - startTime
    print(" completed in {0:.4e} s".format(elapsedTime))
    model.load_weights(weights_file)  # load weights from the best model

    ### evaluate the model
    print("Evaluating the model ...")
    rss = testing_data.rss_scaled
    labels = testing_data.labels
    flrs = labels.floor
    coord = testing_data.coord  # original coordinates

    # calculate the classification accuracies and localization errors
    flrs_pred, coords_scaled_pred = model.predict(rss, batch_size=batch_size)
    flr_results = (np.equal(
        np.argmax(flrs, axis=1), np.argmax(flrs_pred, axis=1))).astype(int)
    flr_acc = flr_results.mean()
    coord_est = coord_scaler.inverse_transform(coords_scaled_pred)  # inverse-scaling

    # calculate 2D localization errors
    dist_2d = norm(coord - coord_est, axis=1)
    mean_error_2d = dist_2d.mean()
    median_error_2d = np.median(dist_2d)

    # calculate 3D localization errors
    flr_diff = np.absolute(
        np.argmax(flrs, axis=1) - np.argmax(flrs_pred, axis=1))
    z_diff_squared = (flr_height**2)*np.square(flr_diff)
    dist_3d = np.sqrt(np.sum(np.square(coord - coord_est), axis=1) + z_diff_squared)
    mean_error_3d = dist_3d.mean()
    median_error_3d = np.median(dist_3d)

    LocalizationResults = namedtuple('LocalizationResults', ['flr_acc',
                                                             'mean_error_2d',
                                                             'median_error_2d',
                                                             'mean_error_3d',
                                                             'median_error_3d',
                                                             'elapsedTime'])
    return LocalizationResults(flr_acc=flr_acc, mean_error_2d=mean_error_2d,
                               median_error_2d=median_error_2d,
                               mean_error_3d=mean_error_3d,
                               median_error_3d=median_error_3d,
                               elapsedTime=elapsedTime)
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    else:
        os.environ["CUDA_VISIBLE_DEVICES"] = ''
    sess = tf.Session(
        graph=tf.get_default_graph(),
        config=session_conf)  # for reproducibility
    K.set_session(sess)

    ### load datasets after scaling
    print("\nPart 1: loading data ...")

    if dataset == 'uji':
        from ujiindoorloc import UJIIndoorLoc
        ujiindoorloc = UJIIndoorLoc(
            '../data/ujiindoorloc',
            frac=frac,
            preprocessor=preprocessor,
            classification_mode='hierarchical')
        training_df, training_data, testing_df, testing_data = ujiindoorloc.load_data(
        )
    elif dataset == 'tut':
        from tut import TUT
        tut = TUT(
            path='../data/tut',
            frac=frac,
            preprocessor=preprocessor,
            classification_mode='hierarchical')
        training_df = tut.training_df
        training_data = tut.training_data
        testing_df = tut.testing_df
        testing_data = tut.testing_data