Beispiel #1
0
def siso_regression_tut(
        gpu_id: int,
        dataset: str,
        frac: float,
        validation_split: float,
        preprocessor: str,
        batch_size: int,
        epochs: int,
        optimizer: str,
        dropout: float,
        corruption_level: float,
        dae_hidden_layers: list,
        sdae_hidden_layers: list,
        cache: bool,
        regression_hidden_layers: list,
        verbose: int
):
    """Multi-floor indoor localization based on three-dimensional regression of
    location coordinates using a single-input and single-output (SISO) deep
    neural network (DNN) model and TUT datasets.

    Keyword arguments:

    """

    ### initialize numpy, random, TensorFlow, and keras
    np.random.seed()            # based on current time or OS-specific randomness source
    rn.seed()                   #  "
    tf.set_random_seed(rn.randint(0, 1000000))
    if gpu_id >= 0:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    else:
        os.environ["CUDA_VISIBLE_DEVICES"] = ''
    sess = tf.Session(
        graph=tf.get_default_graph(),
        config=session_conf)
    K.set_session(sess)

    ### load datasets after scaling
    print("Loading data ...")
    if dataset == 'tut':
        from tut import TUT
        tut = TUT(
            cache=cache,
            frac=frac,
            preprocessor=preprocessor,
            classification_mode='hierarchical',
            grid_size=0)
    elif dataset == 'tut2':
        from tut import TUT2
        tut = TUT2(
            cache=cache,
            frac=frac,
            preprocessor=preprocessor,
            classification_mode='hierarchical',
            grid_size=0,
            testing_split=0.2)
    elif dataset == 'tut3':
        from tut import TUT3
        tut = TUT3(
            cache=cache,
            frac=frac,
            preprocessor=preprocessor,
            classification_mode='hierarchical',
            grid_size=0)
    else:
        print("'{0}' is not a supported data set.".format(dataset))
        sys.exit(0)
    flr_height = tut.floor_height
    training_df = tut.training_df
    training_data = tut.training_data
    testing_df = tut.testing_df
    testing_data = tut.testing_data

    ### build and train a SIMO model
    print(
        "Building and training a SISO model for three-dimensional regression ..."
    )
    rss = training_data.rss_scaled
    coord = training_data.coord_3d_scaled
    coord_scaler = training_data.coord_3d_scaler  # for inverse transform
    labels = training_data.labels
    input = Input(shape=(rss.shape[1], ), name='input')  # common input

    # (optional) build deep autoencoder or stacked denoising autoencoder
    if dae_hidden_layers != '':
        print("- Building a DAE model ...")
        model = deep_autoencoder(
            dataset=dataset,
            input_data=rss,
            preprocessor=preprocessor,
            hidden_layers=dae_hidden_layers,
            cache=cache,
            model_fname=None,
            optimizer=optimizer,
            batch_size=batch_size,
            epochs=epochs,
            validation_split=validation_split)
        x = model(input)
    elif sdae_hidden_layers != '':
        print("- Building an SDAE model ...")
        model = sdae(
            dataset=dataset,
            input_data=rss,
            preprocessor=preprocessor,
            hidden_layers=sdae_hidden_layers,
            cache=cache,
            model_fname=None,
            optimizer=optimizer,
            corruption_level=corruption_level,
            batch_size=batch_size,
            epochs=epochs,
            validation_split=validation_split)
        x = model(input)
    else:
        x = input

    # regression hidden layers
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Dropout(dropout)(x)
    if regression_hidden_layers != '':
        for units in regression_hidden_layers:
            x = Dense(units)(x)
            x = BatchNormalization()(x)
            x = Activation('relu')(x)
            x = Dropout(dropout)(x)

    # coordinates regression output
    x = Dense(coord.shape[1], kernel_initializer='normal')(x)
    x = BatchNormalization()(x)
    coordinates_output = Activation(
        'linear', name='coordinates_output')(x)  # 'linear' activation

    model = Model(inputs=input, outputs=coordinates_output)
    model.compile(optimizer=optimizer, loss='mean_squared_error',
                  metrics=['mean_squared_error'])
    weights_file = os.path.expanduser("~/tmp/best_weights.h5")
    checkpoint = ModelCheckpoint(weights_file, monitor='val_loss', save_best_only=True, verbose=0)
    early_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0)

    print("- Training a coordinates regressor ...", end='')
    startTime = timer()
    history = model.fit(
        x={'input': rss},
        y={'coordinates_output': coord},
        batch_size=batch_size,
        epochs=epochs,
        verbose=verbose,
        callbacks=[checkpoint, early_stop],
        validation_split=validation_split,
        shuffle=True)
    elapsedTime = timer() - startTime
    print(" completed in {0:.4e} s".format(elapsedTime))
    model.load_weights(weights_file)  # load weights from the best model

    ### evaluate the model
    print("Evaluating the model ...")
    rss = testing_data.rss_scaled
    labels = testing_data.labels
    flrs = labels.floor
    coord = testing_data.coord_3d  # original coordinates

    # calculate the classification accuracies and localization errors
    coords_scaled_pred = model.predict(rss, batch_size=batch_size)
    coord_est = coord_scaler.inverse_transform(coords_scaled_pred)  # inverse-scaling
    tmp = np.maximum(np.minimum(coord_est[:,2], 4*tut.floor_height), 0)     # clamping to [0, 4*tut.floor_height]
    flrs_pred = np.floor(tmp/tut.floor_height+0.5)  # floor number (0..4); N.B. round() behavior in Python 3 has been changed,so we cannot use it.
    flr_results = (np.equal(np.argmax(flrs, axis=1), flrs_pred)).astype(int)
    flr_acc = flr_results.mean()

    # calculate 2D localization errors
    dist_2d = norm(coord - coord_est, axis=1)
    mean_error_2d = dist_2d.mean()
    median_error_2d = np.median(dist_2d)

    # calculate 3D localization errors
    flr_diff = np.absolute(np.argmax(flrs, axis=1) - flrs_pred)
    z_diff_squared = (flr_height**2)*np.square(flr_diff)
    dist_3d = np.sqrt(np.sum(np.square(coord - coord_est), axis=1) + z_diff_squared)
    mean_error_3d = dist_3d.mean()
    median_error_3d = np.median(dist_3d)

    LocalizationResults = namedtuple('LocalizationResults', ['flr_acc',
                                                             'mean_error_2d',
                                                             'median_error_2d',
                                                             'mean_error_3d',
                                                             'median_error_3d',
                                                             'elapsedTime'])
    return LocalizationResults(flr_acc=flr_acc, mean_error_2d=mean_error_2d,
                               median_error_2d=median_error_2d,
                               mean_error_3d=mean_error_3d,
                               median_error_3d=median_error_3d,
                               elapsedTime=elapsedTime)
Beispiel #2
0
    if gpu_id >= 0:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    else:
        os.environ["CUDA_VISIBLE_DEVICES"] = ''
    # sess = tf.Session(
    #     graph=tf.get_default_graph(),
    #     config=session_conf)  # for reproducibility
    # K.set_session(sess)

    ### load datasets after scaling
    print("\nPart 1: loading data ...")
    if dataset == 'tut':
        from tut import TUT
        tut = TUT(
            cache=cache,
            frac=frac,
            preprocessor=preprocessor,
            classification_mode='hierarchical',
            grid_size=0)
    elif dataset == 'tut2':
        from tut import TUT2
        tut = TUT2(
            cache=cache,
            frac=frac,
            preprocessor=preprocessor,
            classification_mode='hierarchical',
            grid_size=0,
            testing_split=0.2)
    elif dataset == 'tut3':
        from tut import TUT3
        tut = TUT3(
            cache=cache,
Beispiel #3
0
def simo_swt_hybrid_tut(
        gpu_id: int,
        dataset: str,
        frac: float,
        validation_split: float,
        preprocessor: str,
        batch_size: int,
        epochs: int,
        optimizer: str,
        dropout: float,
        corruption_level: float,
        dae_hidden_layers: list,
        sdae_hidden_layers: list,
        cache: bool,
        common_hidden_layers: list,
        floor_hidden_layers: list,
        coordinates_hidden_layers: list,
        verbose: int
):
    """Multi-floor indoor localization based on hybrid floor classification and
    coordinates regression using a stage-wise trained single-input and
    multi-output (SIMO) deep neural network (DNN) model and TUT datasets.

    Keyword arguments:

    """

    ### initialize numpy, random, TensorFlow, and keras
    np.random.seed()            # based on current time or OS-specific randomness source
    rn.seed()                   # "
    tf.set_random_seed(rn.randint(0, 1000000))
    if gpu_id >= 0:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    else:
        os.environ["CUDA_VISIBLE_DEVICES"] = ''
    sess = tf.Session(
        graph=tf.get_default_graph(),
        config=session_conf)
    K.set_session(sess)

    ### load datasets after scaling
    print("Loading data ...")
    if dataset == 'tut':
        from tut import TUT
        tut = TUT(
            cache=cache,
            frac=frac,
            preprocessor=preprocessor,
            classification_mode='hierarchical',
            grid_size=0)
    elif dataset == 'tut2':
        from tut import TUT2
        tut = TUT2(
            cache=cache,
            frac=frac,
            preprocessor=preprocessor,
            classification_mode='hierarchical',
            grid_size=0,
            testing_split=0.2)
    elif dataset == 'tut3':
        from tut import TUT3
        tut = TUT3(
            cache=cache,
            frac=frac,
            preprocessor=preprocessor,
            classification_mode='hierarchical',
            grid_size=0)
    else:
        print("'{0}' is not a supported data set.".format(dataset))
        sys.exit(0)
    flr_height = tut.floor_height
    training_df = tut.training_df
    training_data = tut.training_data
    testing_df = tut.testing_df
    testing_data = tut.testing_data
        
    ### build and do stage-wise training of a SIMO model
    print(
        "Building and stage-wise training a SIMO model for hybrid classification and regression ..."
    )
    rss = training_data.rss_scaled
    coord = training_data.coord_scaled
    coord_scaler = training_data.coord_scaler  # for inverse transform
    labels = training_data.labels
    input = Input(shape=(rss.shape[1], ), name='input')  # common input

    # (optional) build deep autoencoder or stacked denoising autoencoder
    if dae_hidden_layers != '':
        print("- Building a DAE model ...")
        model = deep_autoencoder(
            dataset=dataset,
            input_data=rss,
            preprocessor=preprocessor,
            hidden_layers=dae_hidden_layers,
            cache=cache,
            model_fname=None,
            optimizer=optimizer,
            batch_size=batch_size,
            epochs=epochs,
            validation_split=validation_split)
        x = model(input)
    elif sdae_hidden_layers != '':
        print("- Building an SDAE model ...")
        model = sdae(
            dataset=dataset,
            input_data=rss,
            preprocessor=preprocessor,
            hidden_layers=sdae_hidden_layers,
            cache=cache,
            model_fname=None,
            optimizer=optimizer,
            corruption_level=corruption_level,
            batch_size=batch_size,
            epochs=epochs,
            validation_split=validation_split)
        x = model(input)
    else:
        x = input

    # common hidden layers
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Dropout(dropout)(x)
    if common_hidden_layers != '':
        for i in range(len(common_hidden_layers)):
            x = Dense(common_hidden_layers[i], name='common_hidden_layer_{:d}'.format(i))(x)
            x = BatchNormalization()(x)
            x = Activation('relu')(x)
            x = Dropout(dropout)(x)
    common_hl_output = x

    # floor classification output
    if floor_hidden_layers != '':
        for i in range(len(floor_hidden_layers)):
            x = Dense(floor_hidden_layers[i], name='floor_hidden_layer_{:d}'.format(i))(x)
            x = BatchNormalization()(x)
            x = Activation('relu')(x)
            x = Dropout(dropout)(x)
        i += 1
    else:
        i = 0
    x = Dense(labels.floor.shape[1], name='floor_hidden_layer_{:d}'.format(i))(x)
    x = BatchNormalization()(x)
    floor_output = Activation(
        'softmax', name='floor_output')(x)  # no dropout for an output layer

    # coordinates regression output
    x = common_hl_output
    if coordinates_hidden_layers != '':
        for i in range(len(coordinates_hidden_layers)):
            x = Dense(coordinates_hidden_layers[i], kernel_initializer='normal', name='coordinates_hidden_layer_{:d}'.format(i))(x)
            x = BatchNormalization()(x)
            x = Activation('relu')(x)
            x = Dropout(dropout)(x)
        i += 1
    else:
        i = 0
    x = Dense(coord.shape[1], kernel_initializer='normal', name='coordinates_hidden_layer_{:d}'.format(i))(x)
    x = BatchNormalization()(x)
    coordinates_output = Activation(
        'linear', name='coordinates_output')(x)  # 'linear' activation
    
    # build model
    model = Model(
        inputs=input,
        outputs=[
            floor_output,
            coordinates_output
        ])

    print("- Stage-wise training with floor information ...", end='')
    model.compile(
        optimizer=optimizer,
        loss=[
            'categorical_crossentropy',
            'mean_squared_error'
        ],
        loss_weights={
            'floor_output': 1.0,
            'coordinates_output': 0.0
        },
        metrics={
            'floor_output': 'accuracy',
            'coordinates_output': 'mean_squared_error'
        })
    weights_file = os.path.expanduser("~/tmp/best_f-weights.h5")
    checkpoint = ModelCheckpoint(weights_file, monitor='val_loss', save_best_only=True, verbose=0)
    early_stop = EarlyStopping(monitor='val_loss', patience=10, verbose=0)
    
    startTime = timer()
    f_history = model.fit(
        x={'input': rss},
        y={
            'floor_output': labels.floor,
            'coordinates_output': coord
        },
        batch_size=batch_size,
        epochs=epochs,
        verbose=verbose,
        callbacks=[checkpoint, early_stop],
        validation_split=validation_split,
        shuffle=True)
    elapsedTime = timer() - startTime
    elapsedTime_total = elapsedTime
    print(" completed in {0:.4e} s".format(elapsedTime))
    model.load_weights(weights_file)  # load weights from the best model

    print(
        "- Stage-wise training with floor-coordinates information ...", end=''
    )

    # reinitialize hidden layers based on
    # https://www.codementor.io/nitinsurya/how-to-re-initialize-keras-model-weights-et41zre2g
    # - common
    if common_hidden_layers != '':
        for i in range(len(common_hidden_layers)):
            layer = model.get_layer('common_hidden_layer_{:d}'.format(i))
            if hasattr(layer, 'kernel_initializer'):
                layer.kernel.initializer.run(session=sess)
    # # - floor
    # if floor_hidden_layers != '':
    #     for i in range(len(floor_hidden_layers)):
    #         layer = model.get_layer('floor_hidden_layer_{:d}'.format(i))
    #         if hasattr(layer, 'kernel_initializer'):
    #             layer.kernel.initializer.run(session=sess)
    #     i += 1
    # else:
    #     i = 0
    # layer = model.get_layer('floor_hidden_layer_{:d}'.format(i))
    # if hasattr(layer, 'kernel_initializer'):
    #     layer.kernel.initializer.run(session=sess)
    # - coordinats
    if coordinates_hidden_layers != '':
        for i in range(len(coordinates_hidden_layers)):
            layer = model.get_layer('coordinates_hidden_layer_{:d}'.format(i))
            if hasattr(layer, 'kernel_initializer'):
                layer.kernel.initializer.run(session=sess)
        i += 1
    else:
        i = 0
    layer = model.get_layer('coordinates_hidden_layer_{:d}'.format(i))
    if hasattr(layer, 'kernel_initializer'):
        layer.kernel.initializer.run(session=sess)
                
    model.compile(
        optimizer=optimizer,
        loss=[
            'categorical_crossentropy',
            'mean_squared_error'
        ],
        loss_weights={
            'floor_output': 1.0,
            'coordinates_output': 1.0
        },
        metrics={
            'floor_output': 'accuracy',
            'coordinates_output': 'mean_squared_error'
        })
    weights_file = os.path.expanduser("~/tmp/best_fc-weights.h5")
    checkpoint = ModelCheckpoint(weights_file, monitor='val_loss', save_best_only=True, verbose=0)
    early_stop = EarlyStopping(monitor='val_loss', patience=10, verbose=0)

    startTime = timer()
    fc_history = model.fit(
        x={'input': rss},
        y={
            'floor_output': labels.floor,
            'coordinates_output': coord
        },
        batch_size=batch_size,
        epochs=epochs,
        verbose=verbose,
        callbacks=[checkpoint, early_stop],
        validation_split=validation_split,
        shuffle=True)
    elapsedTime = timer() - startTime
    elapsedTime_total += elapsedTime
    print(" completed in {0:.4e} s".format(elapsedTime))
    model.load_weights(weights_file)  # load weights from the best model

    ### evaluate the model
    print("Evaluating the model ...")
    rss = testing_data.rss_scaled
    labels = testing_data.labels
    flrs = labels.floor
    coord = testing_data.coord  # original coordinates
    x_col_name = 'X'
    y_col_name = 'Y'

    # calculate the classification accuracies and localization errors
    flrs_pred, coords_scaled_pred = model.predict(rss, batch_size=batch_size)
    flr_results = (np.equal(
        np.argmax(flrs, axis=1), np.argmax(flrs_pred, axis=1))).astype(int)
    flr_acc = flr_results.mean()
    coord_est = coord_scaler.inverse_transform(coords_scaled_pred)  # inverse-scaling

    # calculate 2D localization errors
    dist_2d = norm(coord - coord_est, axis=1)
    mean_error_2d = dist_2d.mean()
    median_error_2d = np.median(dist_2d)

    # calculate 3D localization errors
    flr_diff = np.absolute(
        np.argmax(flrs, axis=1) - np.argmax(flrs_pred, axis=1))
    z_diff_squared = (flr_height**2)*np.square(flr_diff)
    dist_3d = np.sqrt(np.sum(np.square(coord - coord_est), axis=1) + z_diff_squared)
    mean_error_3d = dist_3d.mean()
    median_error_3d = np.median(dist_3d)

    LocalizationResults = namedtuple('LocalizationResults', ['flr_acc',
                                                             'mean_error_2d',
                                                             'median_error_2d',
                                                             'mean_error_3d',
                                                             'median_error_3d',
                                                             'elapsedTime'])
    return LocalizationResults(flr_acc=flr_acc, mean_error_2d=mean_error_2d,
                               median_error_2d=median_error_2d,
                               mean_error_3d=mean_error_3d,
                               median_error_3d=median_error_3d,
                               elapsedTime=elapsedTime)
Beispiel #4
0
def simo_rnn_tut_pt(frac: float, validation_split: float, preprocessor: str,
                    batch_size: int, epochs: int, optimizer: str,
                    dropout: float, corruption_level: float,
                    dae_hidden_layers: list, sdae_hidden_layers: list,
                    cache: bool, rnn_hidden_size: int, rnn_num_layers: int,
                    floor_hidden_size: int, floor_num_layers: int,
                    coordinates_hidden_size: int, coordinates_num_layers: int,
                    floor_weight: float, coordinates_weight: float,
                    log_level: str, device: torch.device):
    """Multi-building and multi-floor indoor localization based on hybrid
    buidling/floor classification and coordinates regression using SDAE and
    SIMO RNN and TUT dataset.

    Keyword arguments:

    """

    # set logging level
    if log_level == 'CRITICAL':
        logger.setLevel(logging.CRITICAL)
    elif log_level == 'ERROR':
        logger.setLevel(logging.ERROR)
    elif log_level == 'WARNING':
        logger.setLevel(logging.WARNING)
    elif log_level == 'INFO':
        logger.setLevel(logging.INFO)
    elif log_level == 'DEBUG':
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.NOTSET)

    # load datasets after scaling
    logger.info("Loading the data ...")
    tut = TUT(cache=cache,
              frac=frac,
              preprocessor=preprocessor,
              classification_mode='hierarchical')
    flr_height = tut.floor_height
    # training_df = tut.training_df
    training_data = tut.training_data
    # testing_df = tut.testing_df
    testing_data = tut.testing_data

    logger.info("Building the model ...")
    rss = training_data.rss_scaled
    coord = training_data.coord_scaled
    coord_scaler = training_data.coord_scaler  # for inverse transform
    labels = training_data.labels
    rss_size = rss.shape[1]
    floor_size = labels.floor.shape[1]
    coord_size = coord.shape[1]

    if sdae_hidden_layers != '':
        sdae = sdae_pt(
            dataset='tut',
            input_data=rss,
            preprocessor=preprocessor,
            hidden_layers=sdae_hidden_layers,
            cache=cache,
            model_fname=None,
            optimizer=optimizer,
            corruption_level=corruption_level,
            batch_size=batch_size,
            epochs=epochs,
            # epochs=300,
            validation_split=validation_split)
        input_size = sdae_hidden_layers[-1] + 1  # 1 for floor index
    else:
        sdae = nn.Identity()
        input_size = rss_size + 1  # 1 for floor index

    rnn = nn.RNN(input_size=input_size,
                 hidden_size=rnn_hidden_size,
                 num_layers=rnn_num_layers,
                 batch_first=True,
                 dropout=(dropout if rnn_num_layers > 1 else
                          0.0))  # to turn off RNN warning messages
    fnn_floor = build_fnn(rnn_hidden_size, floor_hidden_size, floor_num_layers,
                          floor_size, dropout)
    fnn_coord = build_fnn(rnn_hidden_size, coordinates_hidden_size,
                          coordinates_num_layers, coord_size, dropout)
    model = SimoRnnFnn(sdae,
                       rnn,
                       fnn_floor,
                       fnn_coord,
                       batch_size,
                       device=device).to(device)

    logger.info("Training the model ...")
    startTime = timer()
    # N.B.: CrossEntropyLoss combines nn.LogSoftmax() and nn.NLLLoss() in one
    # single class. So we don't need softmax activation function in
    # classification.
    criterion_floor = nn.CrossEntropyLoss()
    criterion_coord = nn.MSELoss()
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
    dataset = TutDataset(tut.training_data)
    dataloader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=True,
                            drop_last=True)
    # dataloader = DataLoader(dataset, batch_size=1, shuffle=True, drop_last=True)

    for epoch in range(epochs):
        model.train()
        running_loss = 0
        for rss, floor, coord in dataloader:
            hidden = model.initHidden()

            # move data to GPU if available
            hidden = hidden.to(device, non_blocking=True)
            rss = rss.to(device, non_blocking=True)
            floor = floor.to(device, non_blocking=True)
            coord = coord.to(device, non_blocking=True)
            optimizer.zero_grad()

            # forward pass
            output_floor, output_coord, hidden = model(rss, hidden)

            loss = floor_weight * criterion_floor(output_floor, floor)
            loss += coordinates_weight * criterion_coord(output_coord, coord)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()

        logger.debug("[Epoch %3d] loss: %.3f", epoch + 1,
                     running_loss / len(dataloader))

    elapsedTime = timer() - startTime
    logger.info("Completed in %.4e s", elapsedTime)

    logger.info("Evaluating the model ...")
    model.eval()
    rss = testing_data.rss_scaled
    flrs = np.argmax(testing_data.labels.floor, axis=1)
    coords = testing_data.coord  # original coordinates

    dataset = TutDataset(tut.testing_data)
    dataloader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            drop_last=True)

    # calculate the classification accuracies and localization errors
    flrs_pred = list()
    coords_scaled_pred = list()
    for rss, _, _ in dataloader:
        hidden = model.initHidden()

        # move data to GPU if available
        hidden = hidden.to(device, non_blocking=True)
        rss = rss.to(device, non_blocking=True)

        # run the model recursively twice for floor and location
        for _ in range(2):
            output_floor, output_coord, hidden = model(rss, hidden)
        if device == torch.device("cuda"):
            output_floor = output_floor.detach().cpu().clone().numpy()
            output_coord = output_coord.detach().cpu().clone().numpy()
        else:
            output_floor = output_floor.detach().clone().numpy()
            output_coord = output_coord.detach().clone().numpy()
        flrs_pred.append(output_floor)
        coords_scaled_pred.append(output_coord)

    flrs_pred = np.argmax(np.vstack(flrs_pred), axis=1)
    flrs = flrs[:flrs_pred.shape[0]]
    flr_acc = accuracy_score(flrs, flrs_pred)
    coords_scaled_pred = np.vstack(coords_scaled_pred)
    coords_est = coord_scaler.inverse_transform(
        coords_scaled_pred)  # inverse-scaling
    coords = coords[:coords_est.shape[0], :]

    # calculate 2D localization errors
    dist_2d = norm(coords - coords_est, axis=1)
    mean_error_2d = dist_2d.mean()
    median_error_2d = np.median(dist_2d)

    # calculate 3D localization errors
    flr_diff = np.absolute(flrs - flrs_pred)
    z_diff_squared = (flr_height**2) * np.square(flr_diff)
    dist_3d = np.sqrt(
        np.sum(np.square(coords - coords_est), axis=1) + z_diff_squared)
    mean_error_3d = dist_3d.mean()
    median_error_3d = np.median(dist_3d)

    LocalizationResults = namedtuple('LocalizationResults', [
        'flr_acc', 'mean_error_2d', 'median_error_2d', 'mean_error_3d',
        'median_error_3d', 'elapsedTime'
    ])
    return LocalizationResults(flr_acc=flr_acc,
                               mean_error_2d=mean_error_2d,
                               median_error_2d=median_error_2d,
                               mean_error_3d=mean_error_3d,
                               median_error_3d=median_error_3d,
                               elapsedTime=elapsedTime)
    print("\nPart 1: loading data ...")

    if dataset == 'uji':
        from ujiindoorloc import UJIIndoorLoc
        ujiindoorloc = UJIIndoorLoc(
            '../data/ujiindoorloc',
            frac=frac,
            preprocessor=preprocessor,
            classification_mode='hierarchical')
        training_df, training_data, testing_df, testing_data = ujiindoorloc.load_data(
        )
    elif dataset == 'tut':
        from tut import TUT
        tut = TUT(
            path='../data/tut',
            frac=frac,
            preprocessor=preprocessor,
            classification_mode='hierarchical')
        training_df = tut.training_df
        training_data = tut.training_data
        testing_df = tut.testing_df
        testing_data = tut.testing_data
    elif dataset == 'tut2':
        from tut import TUT2
        tut2 = TUT2(
            path='../data/tut',
            frac=frac,
            preprocessor=preprocessor,
            classification_mode='hierarchical',
            testing_split=0.2)
        training_df = tut2.training_df
def simo_classification_tut(
        gpu_id: int, dataset: str, frac: float, validation_split: float,
        preprocessor: str, grid_size: float, batch_size: int, epochs: int,
        optimizer: str, dropout: float, corruption_level: float,
        num_neighbors: int, scaling: float, dae_hidden_layers: list,
        sdae_hidden_layers: list, cache: bool, common_hidden_layers: list,
        floor_hidden_layers: list, location_hidden_layers: list,
        floor_weight: float, location_weight: float, verbose: int):
    """Multi-floor indoor localization based on floor and coordinates classification
    using a single-input and multi-output (SIMO) deep neural network (DNN) model
    and TUT datasets.

    Keyword arguments:

    """

    ### initialize numpy, random, TensorFlow, and keras
    np.random.seed()  # based on current time or OS-specific randomness source
    rn.seed()  #  "
    tf.set_random_seed(rn.randint(0, 1000000))
    if gpu_id >= 0:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    else:
        os.environ["CUDA_VISIBLE_DEVICES"] = ''
    sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
    K.set_session(sess)

    ### load datasets after scaling
    print("Loading data ...")
    if dataset == 'tut':
        from tut import TUT
        tut = TUT(cache=cache,
                  frac=frac,
                  preprocessor=preprocessor,
                  classification_mode='hierarchical',
                  grid_size=0)
    elif dataset == 'tut2':
        from tut import TUT2
        tut = TUT2(cache=cache,
                   frac=frac,
                   preprocessor=preprocessor,
                   classification_mode='hierarchical',
                   grid_size=0,
                   testing_split=0.2)
    elif dataset == 'tut3':
        from tut import TUT3
        tut = TUT3(cache=cache,
                   frac=frac,
                   preprocessor=preprocessor,
                   classification_mode='hierarchical',
                   grid_size=0)
    else:
        print("'{0}' is not a supported data set.".format(dataset))
        sys.exit(0)
    flr_height = tut.floor_height
    training_df = tut.training_df
    training_data = tut.training_data
    testing_df = tut.testing_df
    testing_data = tut.testing_data

    ### build and train a SIMO model
    print("Building and training a SIMO model for classification ...")
    rss = training_data.rss_scaled
    coord = training_data.coord_scaled
    coord_scaler = training_data.coord_scaler  # for inverse transform
    labels = training_data.labels
    input = Input(shape=(rss.shape[1], ), name='input')  # common input

    # (optional) build deep autoencoder or stacked denoising autoencoder
    if dae_hidden_layers != '':
        print("- Building a DAE model ...")
        model = deep_autoencoder(dataset=dataset,
                                 input_data=rss,
                                 preprocessor=preprocessor,
                                 hidden_layers=dae_hidden_layers,
                                 cache=cache,
                                 model_fname=None,
                                 optimizer=optimizer,
                                 batch_size=batch_size,
                                 epochs=epochs,
                                 validation_split=validation_split)
        x = model(input)
    elif sdae_hidden_layers != '':
        print("- Building an SDAE model ...")
        model = sdae(dataset=dataset,
                     input_data=rss,
                     preprocessor=preprocessor,
                     hidden_layers=sdae_hidden_layers,
                     cache=cache,
                     model_fname=None,
                     optimizer=optimizer,
                     corruption_level=corruption_level,
                     batch_size=batch_size,
                     epochs=epochs,
                     validation_split=validation_split)
        x = model(input)
    else:
        x = input

    # common hidden layers
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Dropout(dropout)(x)
    if common_hidden_layers != '':
        for units in common_hidden_layers:
            x = Dense(units)(x)
            x = BatchNormalization()(x)
            x = Activation('relu')(x)
            x = Dropout(dropout)(x)
    common_hl_output = x

    # floor classification output
    if floor_hidden_layers != '':
        for units in floor_hidden_layers:
            x = Dense(units)(x)
            x = BatchNormalization()(x)
            x = Activation('relu')(x)
            x = Dropout(dropout)(x)
    x = Dense(labels.floor.shape[1])(x)
    x = BatchNormalization()(x)
    floor_output = Activation('softmax', name='floor_output')(
        x)  # no dropout for an output layer

    # location classification output
    if location_hidden_layers != '':
        for units in location_hidden_layers:
            x = Dense(units)(x)
            x = BatchNormalization()(x)
            x = Activation('relu')(x)
            x = Dropout(dropout)(x)
    x = Dense(labels.location.shape[1])(x)
    x = BatchNormalization()(x)
    location_output = Activation('softmax', name='location_output')(
        x)  # no dropout for an output layer

    # build model
    model = Model(inputs=input, outputs=[floor_output, location_output])

    # for stage-wise training with floor information only
    model.compile(
        optimizer=optimizer,
        loss=['categorical_crossentropy', 'categorical_crossentropy'],
        loss_weights={
            'floor_output': 1.0,
            'location_output': 0.0
        },
        metrics={
            'floor_output': 'accuracy',
            'location_output': 'accuracy'
        })
    weights_file = os.path.expanduser("~/tmp/best_weights.h5")
    checkpoint = ModelCheckpoint(weights_file,
                                 monitor='val_loss',
                                 save_best_only=True,
                                 verbose=0)
    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0,
                               patience=10,
                               verbose=0)

    print("- Stage-wise training with floor information ...", end='')
    startTime = timer()
    f_history = model.fit(x={'input': rss},
                          y={
                              'floor_output': labels.floor,
                              'location_output': labels.location
                          },
                          batch_size=batch_size,
                          epochs=epochs,
                          verbose=verbose,
                          callbacks=[checkpoint, early_stop],
                          validation_split=validation_split,
                          shuffle=True)
    elapsedTime = timer() - startTime
    print(" completed in {0:.4e} s".format(elapsedTime))
    model.load_weights(weights_file)  # load weights from the best model

    # for stage-wise training with both floor and location information
    model.compile(
        optimizer=optimizer,
        loss=['categorical_crossentropy', 'categorical_crossentropy'],
        loss_weights={
            'floor_output': 1.0,
            'location_output': 1.0
        },
        metrics={
            'floor_output': 'accuracy',
            'location_output': 'accuracy'
        })
    weights_file = os.path.expanduser("~/tmp/best_weights.h5")
    checkpoint = ModelCheckpoint(weights_file,
                                 monitor='val_loss',
                                 save_best_only=True,
                                 verbose=0)
    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0,
                               patience=10,
                               verbose=0)

    print("- Stage-wise training with both floor and location information ...",
          end='')
    startTime = timer()
    fl_history = model.fit(x={'input': rss},
                           y={
                               'floor_output': labels.floor,
                               'location_output': labels.location
                           },
                           batch_size=batch_size,
                           epochs=epochs,
                           verbose=verbose,
                           callbacks=[checkpoint, early_stop],
                           validation_split=validation_split,
                           shuffle=True)
    elapsedTime = timer() - startTime
    print(" completed in {0:.4e} s".format(elapsedTime))
    model.load_weights(weights_file)  # load weights from the best model

    ### evaluate the model
    print("Evaluating the model ...")
    rss = testing_data.rss_scaled
    labels = testing_data.labels
    blds = labels.building
    flrs = labels.floor
    coord = testing_data.coord  # original coordinates
    x_col_name = 'X'
    y_col_name = 'Y'

    # calculate the classification accuracies and localization errors
    flrs_pred, locs_pred = model.predict(rss, batch_size=batch_size)
    flr_results = (np.equal(np.argmax(flrs, axis=1),
                            np.argmax(flrs_pred, axis=1))).astype(int)
    flr_acc = flr_results.mean()

    # calculate positioning error based on locations
    n_samples = len(flrs)
    n_locs = locs_pred.shape[1]  # number of locations (reference points)
    idxs = np.argpartition(
        locs_pred, -num_neighbors
    )[:,
      -num_neighbors:]  # (unsorted) indexes of up to num_neighbors nearest neighbors
    threshold = scaling * np.amax(locs_pred, axis=1)
    training_labels = np.concatenate(
        (training_data.labels.floor, training_data.labels.location), axis=1)
    training_coord_avg = training_data.coord_avg
    coord_est = np.zeros((n_samples, 2))
    coord_est_weighted = np.zeros((n_samples, 2))
    for i in range(n_samples):
        xs = []
        ys = []
        ws = []
        for j in idxs[i]:
            if locs_pred[i][j] >= threshold[i]:
                loc = np.zeros(n_locs)
                loc[j] = 1
                rows = np.where((training_labels == np.concatenate(
                    (flrs[i], loc))).all(axis=1))  # tuple of row indexes
                if rows[0].size > 0:
                    xs.append(training_df.loc[training_df.index[rows[0][0]],
                                              x_col_name])
                    ys.append(training_df.loc[training_df.index[rows[0][0]],
                                              y_col_name])
                    ws.append(locs_pred[i][j])
        if len(xs) > 0:
            coord_est[i] = np.array((xs, ys)).mean(axis=1)
            coord_est_weighted[i] = np.array(
                (np.average(xs, weights=ws), np.average(ys, weights=ws)))
        else:
            if rows[0].size > 0:
                key = str(np.argmax(blds[i])) + '-' + str(np.argmax(flrs[i]))
            else:
                key = str(np.argmax(blds[i]))
            coord_est[i] = coord_est_weighted[i] = training_coord_avg[key]

    # calculate 2D localization errors
    dist_2d = norm(coord - coord_est, axis=1)
    dist_weighted_2d = norm(coord - coord_est_weighted, axis=1)
    mean_error_2d = dist_2d.mean()
    mean_error_weighted_2d = dist_weighted_2d.mean()
    median_error_2d = np.median(dist_2d)
    median_error_weighted_2d = np.median(dist_weighted_2d)

    # calculate 3D localization errors
    flr_diff = np.absolute(
        np.argmax(flrs, axis=1) - np.argmax(flrs_pred, axis=1))
    z_diff_squared = (flr_height**2) * np.square(flr_diff)
    dist_3d = np.sqrt(
        np.sum(np.square(coord - coord_est), axis=1) + z_diff_squared)
    dist_weighted_3d = np.sqrt(
        np.sum(np.square(coord - coord_est_weighted), axis=1) + z_diff_squared)
    mean_error_3d = dist_3d.mean()
    mean_error_weighted_3d = dist_weighted_3d.mean()
    median_error_3d = np.median(dist_3d)
    median_error_weighted_3d = np.median(dist_weighted_3d)

    LocalizationResults = namedtuple('LocalizationResults', [
        'flr_acc', 'mean_error_2d', 'mean_error_weighted_2d',
        'median_error_2d', 'median_error_weighted_2d', 'mean_error_3d',
        'mean_error_weighted_3d', 'median_error_3d',
        'median_error_weighted_3d', 'elapsedTime'
    ])
    return LocalizationResults(
        flr_acc=flr_acc,
        mean_error_2d=mean_error_2d,
        mean_error_weighted_2d=mean_error_weighted_2d,
        median_error_2d=median_error_2d,
        median_error_weighted_2d=median_error_weighted_2d,
        mean_error_3d=mean_error_3d,
        mean_error_weighted_3d=mean_error_weighted_3d,
        median_error_3d=median_error_3d,
        median_error_weighted_3d=median_error_weighted_3d,
        elapsedTime=elapsedTime)
def simo_hybrid_tut(
        gpu_id: int, dataset: str, frac: float, validation_split: float,
        preprocessor: str, batch_size: int, epochs: int, optimizer: str,
        dropout: float, corruption_level: float, dae_hidden_layers: list,
        sdae_hidden_layers: list, cache: bool, common_hidden_layers: list,
        floor_hidden_layers: list, coordinates_hidden_layers: list,
        floor_weight: float, coordinates_weight: float, verbose: int):
    """Multi-floor indoor localization based on hybrid floor classification and
    coordinates regression using a single-input and multi-output (SIMO) deep
    neural network (DNN) model and TUT datasets.

    Keyword arguments:

    """

    ### initialize numpy, random, TensorFlow, and keras
    np.random.seed()  # based on current time or OS-specific randomness source
    rn.seed()  #  "
    tf.set_random_seed(rn.randint(0, 1000000))
    if gpu_id >= 0:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    else:
        os.environ["CUDA_VISIBLE_DEVICES"] = ''
    sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
    K.set_session(sess)

    ### load datasets after scaling
    print("Loading data ...")
    if dataset == 'tut':
        from tut import TUT
        tut = TUT(cache=cache,
                  frac=frac,
                  preprocessor=preprocessor,
                  classification_mode='hierarchical',
                  grid_size=0)
    elif dataset == 'tut2':
        from tut import TUT2
        tut = TUT2(cache=cache,
                   frac=frac,
                   preprocessor=preprocessor,
                   classification_mode='hierarchical',
                   grid_size=0,
                   testing_split=0.2)
    elif dataset == 'tut3':
        from tut import TUT3
        tut = TUT3(cache=cache,
                   frac=frac,
                   preprocessor=preprocessor,
                   classification_mode='hierarchical',
                   grid_size=0)
    else:
        print("'{0}' is not a supported data set.".format(dataset))
        sys.exit(0)
    flr_height = tut.floor_height
    training_df = tut.training_df
    training_data = tut.training_data
    testing_df = tut.testing_df
    testing_data = tut.testing_data

    ### build and train a SIMO model
    print(
        "Building and training a SIMO model for hybrid classification and regression ..."
    )
    rss = training_data.rss_scaled
    coord = training_data.coord_scaled
    coord_scaler = training_data.coord_scaler  # for inverse transform
    labels = training_data.labels
    input = Input(shape=(rss.shape[1], ), name='input')  # common input

    # (optional) build deep autoencoder or stacked denoising autoencoder
    if dae_hidden_layers != '':
        print("- Building a DAE model ...")
        model = deep_autoencoder(dataset=dataset,
                                 input_data=rss,
                                 preprocessor=preprocessor,
                                 hidden_layers=dae_hidden_layers,
                                 cache=cache,
                                 model_fname=None,
                                 optimizer=optimizer,
                                 batch_size=batch_size,
                                 epochs=epochs,
                                 validation_split=validation_split)
        x = model(input)
    elif sdae_hidden_layers != '':
        print("- Building an SDAE model ...")
        model = sdae(dataset=dataset,
                     input_data=rss,
                     preprocessor=preprocessor,
                     hidden_layers=sdae_hidden_layers,
                     cache=cache,
                     model_fname=None,
                     optimizer=optimizer,
                     corruption_level=corruption_level,
                     batch_size=batch_size,
                     epochs=epochs,
                     validation_split=validation_split)
        x = model(input)
    else:
        x = input
    # common hidden layers
    # x = BatchNormalization()(x)
    # x = Activation('relu')(x)
    # x = Dropout(dropout)(x)
    # if common_hidden_layers != '':
    #     for units in common_hidden_layers:
    #         x = Dense(units)(x)
    #         x = BatchNormalization()(x)
    #         x = Activation('relu')(x)
    #         x = Dropout(dropout)(x)
    #
    # common_hl_output = x
    # print(x)
    #John
    # common hidden layers

    #1D_CNN by John
    x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)

    x = Conv1D(filters=99, kernel_size=22, activation='relu')(x)
    x = Dropout(dropout)(x)
    # x = Conv1D(filters=128, kernel_size=10, activation='relu')(x)

    # x = MaxPooling1D(pool_size=2)(x)
    x = Conv1D(filters=66, kernel_size=22, activation='relu')(x)

    # x = MaxPooling1D(pool_size=2)(x)

    x = Conv1D(filters=33, kernel_size=22, activation='relu')(x)
    x = MaxPooling1D(pool_size=2)(x)
    x = Flatten()(x)
    #1D_CNN by John
    # if common_hidden_layers != '':
    #     for units in common_hidden_layers:
    #
    #         x = Dense(units, activation='relu')(x)
    #
    #         x = Dropout(dropout)(x)
    # x = Dense(labels.floor.shape[1])(x)
    # floor_output = Activation(
    #     'softmax', name='floor_output')(x)
    # x = Dense(coord.shape[1], kernel_initializer='normal')(x)
    # coordinates_output = Activation(
    #     'softmax', name='coordinates_output')(x)
    #John
    # 建立池化层1
    # x = MaxPooling1D(pool_size=4)(x)
    # x = Conv1D(filters=10,
    #              kernel_size=25,
    #              padding='same',
    #              activation='relu')(x)
    # # # 建立池化层2
    # x = MaxPooling1D(pool_size=4)(x)
    # x = Dropout(0.25)(x)
    # # # 建立平坦层
    # x = Flatten()(x)
    # # # 建立隐蔽层
    # x = Dense(128, activation='relu')(x)
    # x = Dropout(0.5)(x)
    # x = Dense(10, activation='softmax')(x)
    #1DCNN by John
    n = x
    x = Dense(labels.floor.shape[1])(x)
    # x = BatchNormalization()(x)
    floor_output = Activation('softmax', name='floor_output')(x)

    common_hl_output = n
    x = common_hl_output
    x = Dense(coord.shape[1], kernel_initializer='normal')(x)
    # x = BatchNormalization()(x)
    coordinates_output = Activation('linear', name='coordinates_output')(x)
    #1DCNN by John

    #John

    # floor classification output
    # if floor_hidden_layers != '':
    #     for units in floor_hidden_layers:
    #
    #         x = Dense(units)(x)
    #         x = BatchNormalization()(x)
    #         x = Activation('relu')(x)
    #         x = Dropout(dropout)(x)
    # x = Dense(labels.floor.shape[1])(x)
    # x = BatchNormalization()(x)
    # floor_output = Activation(
    #     'softmax', name='floor_output')(x)  # no dropout for an output layer

    # John
    # x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)
    # x = Conv1D(filters=99, kernel_size=12, activation='relu')(x)
    #
    # x = MaxPooling1D(pool_size=5)(x)
    # x = Conv1D(filters=99, kernel_size=12, activation='relu')(x)
    #
    # x = MaxPooling1D(pool_size=5)(x)
    #
    # x = Conv1D(filters=99, kernel_size=12, activation='relu')(x)
    #
    # x = Dropout(dropout)(x)
    # x = Flatten()(x)
    #
    # x = Dense(labels.floor.shape[1])(x)
    # x = BatchNormalization()(x)
    # floor_output = Activation('softmax', name='floor_output')(x)
    #John

    # coordinates regression output
    # x = common_hl_output
    # for units in coordinates_hidden_layers:
    #     x = Dense(units, kernel_initializer='normal')(x)
    #     x = BatchNormalization()(x)
    #     x = Activation('relu')(x)
    #     x = Dropout(dropout)(x)
    # x = Dense(coord.shape[1], kernel_initializer='normal')(x)
    # x = BatchNormalization()(x)
    #
    # coordinates_output = Activation(
    #     'linear', name='coordinates_output')(x)  # 'linear' activation

    #John
    # x = common_hl_output
    # x = Lambda(lambda x:K.expand_dims(x,axis=-1))(x)
    # x = Conv1D(filters=99, kernel_size=12, activation='relu')(x)
    #
    # x = MaxPooling1D(pool_size=5)(x)
    # x = Conv1D(filters=99, kernel_size=12, activation='relu')(x)
    #
    # x = MaxPooling1D(pool_size=5)(x)
    # x = Conv1D(filters=99, kernel_size=12, activation='relu')(x)
    # x = Dropout(dropout)(x)
    # x = Flatten()(x)
    # x = Dense(coord.shape[1],kernel_initializer='normal')(x)
    # x = BatchNormalization()(x)
    # coordinates_output = Activation(
    #     'linear', name='coordinates_output')(x)

    #2D_CNN by John
    # print(x)
    #
    # x = Lambda(lambda x:K.expand_dims(x,axis=0))(x)
    # x = Lambda(lambda x:K.expand_dims(x, axis=3))(x)
    #
    #
    # x = Conv2D(filters=128, kernel_size=(5,5), activation='relu')(x)
    # print(x)
    # x = Dropout(dropout)(x)
    #
    # x = Conv2D(filters=64, kernel_size=(5,5), activation='relu')(x)
    #
    # x = Conv2D(filters=32, kernel_size=(5,5), activation='relu')(x)
    # x = MaxPooling2D(pool_size=(2,2))(x)
    # x = Flatten()(x)
    # n = x
    # x = Dense(labels.floor.shape[1])(x)
    #
    # floor_output = Activation('softmax', name='floor_output')(x)
    #
    # common_hl_output = n
    # x = common_hl_output
    # x = Dense(coord.shape[1], kernel_initializer='normal')(x)
    # coordinates_output = Activation(
    #     'linear', name='coordinates_output')(x)
    # 2D_CNN by John

    model = Model(inputs=input, outputs=[floor_output, coordinates_output])
    model.compile(optimizer=optimizer,
                  loss=['categorical_crossentropy', 'mean_squared_error'],
                  loss_weights={
                      'floor_output': floor_weight,
                      'coordinates_output': coordinates_weight
                  },
                  metrics={
                      'floor_output': 'accuracy',
                      'coordinates_output': 'mean_squared_error'
                  })
    weights_file = os.path.expanduser("~/tmp/best_weights.h5")
    checkpoint = ModelCheckpoint(weights_file,
                                 monitor='val_loss',
                                 save_best_only=True,
                                 verbose=0)
    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0,
                               patience=10,
                               verbose=0)

    print("- Training a hybrid floor classifier and coordinates regressor ...",
          end='')
    startTime = timer()
    history = model.fit(x={'input': rss},
                        y={
                            'floor_output': labels.floor,
                            'coordinates_output': coord
                        },
                        batch_size=batch_size,
                        epochs=epochs,
                        verbose=verbose,
                        callbacks=[checkpoint, early_stop],
                        validation_split=validation_split,
                        shuffle=True)
    elapsedTime = timer() - startTime
    print(" completed in {0:.4e} s".format(elapsedTime))
    model.load_weights(weights_file)  # load weights from the best model

    ### evaluate the model
    print("Evaluating the model ...")
    rss = testing_data.rss_scaled
    labels = testing_data.labels
    flrs = labels.floor
    coord = testing_data.coord  # original coordinates

    # calculate the classification accuracies and localization errors
    flrs_pred, coords_scaled_pred = model.predict(rss, batch_size=batch_size)
    flr_results = (np.equal(np.argmax(flrs, axis=1),
                            np.argmax(flrs_pred, axis=1))).astype(int)
    flr_acc = flr_results.mean()
    coord_est = coord_scaler.inverse_transform(
        coords_scaled_pred)  # inverse-scaling

    # calculate 2D localization errors
    dist_2d = norm(coord - coord_est, axis=1)
    mean_error_2d = dist_2d.mean()
    median_error_2d = np.median(dist_2d)

    # calculate 3D localization errors
    flr_diff = np.absolute(
        np.argmax(flrs, axis=1) - np.argmax(flrs_pred, axis=1))
    z_diff_squared = (flr_height**2) * np.square(flr_diff)
    dist_3d = np.sqrt(
        np.sum(np.square(coord - coord_est), axis=1) + z_diff_squared)
    mean_error_3d = dist_3d.mean()
    median_error_3d = np.median(dist_3d)

    LocalizationResults = namedtuple('LocalizationResults', [
        'flr_acc', 'mean_error_2d', 'median_error_2d', 'mean_error_3d',
        'median_error_3d', 'elapsedTime'
    ])
    return LocalizationResults(flr_acc=flr_acc,
                               mean_error_2d=mean_error_2d,
                               median_error_2d=median_error_2d,
                               mean_error_3d=mean_error_3d,
                               median_error_3d=median_error_3d,
                               elapsedTime=elapsedTime)