def test_model_ensemble(model, sub_path, SRC_DIR, config):
    os.chdir(SRC_DIR)
    test_dataloader = DataSetParser(stable_mode=False,
                                    read_test_only=True,
                                    config=config)

    X_test = test_dataloader.get_dataset_test_allsnr()

    # swap axes for sequential data
    if bool(re.search('LSTM', config.exp_name, re.IGNORECASE)) or bool(
            re.search('tcn', config.exp_name, re.IGNORECASE)):
        X_test = X_test.swapaxes(1, 2)
    else:
        X_test = np.expand_dims(X_test, axis=-1)

    # Creating DataFrame with the probability prediction for each segment
    submission = pd.DataFrame()
    submission['segment_id'] = test_dataloader.test_data[1]['segment_id']

    for x, snr_type in zip(X_test, test_dataloader.test_data[1]['snr_type']):
        if snr_type == 'LowSNR':
            submission['prediction'].append(
                model['LowSNR'].predict(X_test).flatten())
        if snr_type == 'HighSNR':
            submission['prediction'].append(
                model['HighSNR'].predict(X_test).flatten())

    submission['prediction'] = submission['prediction'].astype('float')
    # Save submission
    submission.to_csv(sub_path, index=False)
def visualize_filters(model, config):
    stable_mode = config.get('stable_mode')
    testset_size = config.get('N_test')
    print('Loading data set')
    data_parser = DataSetParser(stable_mode=stable_mode, config=config)
    X = data_parser.train_data[0][0]
    print(X.shape)
    X = np.expand_dims(X, axis=-1)
    print(X.shape)
    plot_feature_map(model, X)
Exemple #3
0
def test_model(model, SRC_DIR, config):
    os.chdir(SRC_DIR)
    test_dataloader = DataSetParser(stable_mode=False, read_test_only=True)
    src_path = Path(SRC_DIR)
    parent_path = str(src_path.parent)
    os.chdir(parent_path)
    X_test = test_dataloader.get_dataset_test_allsnr()
    # swap axes for sequential data
    if config.exp_name == "LSTM":
        X_test = X_test.swapaxes(1, 2)
    else:
        X_test = np.expand_dims(X_test, axis=-1)

    # Creating DataFrame with the probability prediction for each segment
    submission = pd.DataFrame()
    submission['segment_id'] = test_dataloader.test_data[1]['segment_id']
    submission['prediction'] = model.predict(X_test).flatten().tolist()
    submission['prediction'] = submission['prediction'].astype('float')
    # Save submission
    submission.to_csv('submission.csv', index=False)
Exemple #4
0
def test_model(model, sub_path, SRC_DIR,config,BEST_RESULT_DIR):
    os.chdir(SRC_DIR)
    test_dataloader = DataSetParser(stable_mode=False, read_test_only=True, config=config)
    X_test = test_dataloader.get_dataset_test_allsnr()
    if config.with_rect_augmentation or config.with_preprocess_rect_augmentation:
        X_augmented_test = expand_test_by_sampling_rect(data=X_test,config=config)
    # swap axes for sequential data
    elif bool(re.search('LSTM',config.exp_name,re.IGNORECASE)) or bool(re.search('tcn',config.exp_name,re.IGNORECASE)):
        X_test = X_test.swapaxes(1, 2)
    else:
        X_test = np.expand_dims(X_test, axis=-1)

    result_list = []
    segment_list = []
    result_list_temp = []
    submission = pd.DataFrame()

    # Creating DataFrame with the probability prediction for each segment
    if config.snr_type == 'all':
        segment_list = test_dataloader.test_data[1]['segment_id']
        if config.with_rect_augmentation or config.with_preprocess_rect_augmentation:
            for sampled_list_x,test_index in zip(X_augmented_test,range(len(X_augmented_test))):
                sample_result_list = []
                prev_gap_doppler_burst = config.rect_augment_gap_doppler_burst_from_edge
                while not sampled_list_x:
                    '''
                    That means that we didn't manged to sample rectangle with the current doppler burst gap
                    '''
                    config.rect_augment_gap_doppler_burst_from_edge -= 1
                    print('Reducing the doppler burst gap for test_index sample {} '.format(test_index))
                    sampled_list_x = Sample_rectangle_from_spectrogram(X_test[test_index],config)

                config.rect_augment_gap_doppler_burst_from_edge = prev_gap_doppler_burst
                print('Sampled {} rectangles for test_index sample {} '.format(len(sampled_list_x), test_index))
                sampled_list_x = np.array(sampled_list_x)
                x = np.expand_dims(sampled_list_x,axis=-1)
                sample_result_list.extend(model.predict(x,batch_size=x.shape[0]).flatten().tolist())
                # result_list.append(np.mean(sample_result_list))
                result_list_temp.append(np.mean(sample_result_list))
        else:
            # result_list = model.predict(X_test).flatten().tolist()
            result_list_temp = model.predict(X_test).flatten().tolist()

    elif config.snr_type == 'low':
        if config.with_rect_augmentation or config.with_preprocess_rect_augmentation:
            for sampled_list_x, snr_type, segment_id,test_index in zip(X_augmented_test, test_dataloader.test_data[1]['snr_type'],
                                                                      test_dataloader.test_data[1]['segment_id'],range(len(X_augmented_test))):
                if snr_type == 'LowSNR':
                    sample_result_list = []
                    prev_gap_doppler_burst = config.rect_augment_gap_doppler_burst_from_edge
                    while not sampled_list_x:
                        '''
                        That means that we didn't manged to sample rectangle with the current doppler burst gap
                        '''
                        config.rect_augment_gap_doppler_burst_from_edge -= 1
                        print('Reducing the doppler burst gap for test_index sample {} '.format(test_index))
                        sampled_list_x = Sample_rectangle_from_spectrogram(X_test[test_index], config)

                    config.rect_augment_gap_doppler_burst_from_edge = prev_gap_doppler_burst
                    print('Sampled {} rectangles for test_index sample {} '.format(len(sampled_list_x), test_index))
                    sampled_list_x = np.array(sampled_list_x)
                    x = np.expand_dims(sampled_list_x, axis=-1)
                    sample_result_list.extend(model.predict(x, batch_size=x.shape[0]).flatten().tolist())
                    # result_list.append(np.mean(sample_result_list))
                    result_list_temp.append(np.mean(sample_result_list))
        else:
            low_snr_list = []
            for x,snr_type,segment_id in zip(X_test,test_dataloader.test_data[1]['snr_type'],test_dataloader.test_data[1]['segment_id']):
                if snr_type == 'LowSNR':
                    low_snr_list.append(x)
                    segment_list.append(segment_id)
            sampled_list_x = np.array(low_snr_list)
            x = np.expand_dims(sampled_list_x, axis=-1)
            # result_list = model.predict(x, batch_size=x.shape[0]).flatten().tolist()
            result_list_temp = model.predict(x, batch_size=x.shape[0]).flatten().tolist()
    else:
        # High SNR run
        if config.with_rect_augmentation or config.with_preprocess_rect_augmentation:
            for sampled_list_x, snr_type, segment_id,test_index in zip(X_augmented_test, test_dataloader.test_data[1]['snr_type'],
                                                           test_dataloader.test_data[1]['segment_id'],range(len(X_augmented_test))):
                if snr_type == 'HighSNR':
                    sample_result_list = []
                    prev_gap_doppler_burst = config.rect_augment_gap_doppler_burst_from_edge
                    while not sampled_list_x:
                        '''
                        That means that we didn't manged to sample rectangle with the current doppler burst gap
                        '''
                        config.rect_augment_gap_doppler_burst_from_edge -= 1
                        print('Reducing the doppler burst gap for test_index sample {} '.format(test_index))
                        sampled_list_x = Sample_rectangle_from_spectrogram(X_test[test_index], config)

                    config.rect_augment_gap_doppler_burst_from_edge = prev_gap_doppler_burst
                    print('Sampled {} rectangles for test_index sample {} '.format(len(sampled_list_x), test_index))
                    sampled_list_x = np.array(sampled_list_x)
                    x = np.expand_dims(sampled_list_x, axis=-1)
                    sample_result_list.extend(model.predict(x, batch_size=x.shape[0]).flatten().tolist())
                    # result_list.append(np.mean(sample_result_list))
                    result_list_temp.append(np.mean(sample_result_list))
        else:
            high_snr_list = []
            for x,snr_type,segment_id in zip(X_test,test_dataloader.test_data[1]['snr_type'],test_dataloader.test_data[1]['segment_id']):
                if snr_type == 'HighSNR':
                    high_snr_list.append(x)
                    segment_list.append(segment_id)
            sampled_list_x = np.array(high_snr_list)
            x = np.expand_dims(sampled_list_x, axis=-1)
            # result_list = model.predict(x, batch_size=x.shape[0]).flatten().tolist()
            result_list_temp = model.predict(x, batch_size=x.shape[0]).flatten().tolist()


    if config.learn_background:
        result_list_temp = np.array(result_list_temp).reshape((-1, 3))
        if config.background_implicit_inference:
            y_pred_2 = np.array([[y[0] , y[1] + y[2]] for y in result_list_temp])
        else:
            y_pred_2 = np.array([[y[0] / (1 - y[2]), y[1] / (1 - y[2])] for y in result_list_temp])
        y_pred_2 = np.array([y / (y[0] + y[1]) if y[0] + y[1] > 1 else y for y in y_pred_2]) # numeric correction
        result_list = [y[0] if y[0] > y[1] else 1 - y[1] for y in y_pred_2]
    else:
        result_list = result_list_temp

    submission['segment_id'] = segment_list
    submission['prediction'] = result_list
    submission['prediction'] = submission['prediction'].astype('float')
    # Save submission
    submission.to_csv(sub_path, index=False)
Exemple #5
0
def main():
    # capture the config path from the run arguments
    # then process configuration file
    SRC_DIR = os.getcwd()
    RADAR_DIR = os.path.join(SRC_DIR, os.pardir)
    config = preprocess_meta_data(SRC_DIR)
    exp_name = config.exp_name

    # for graph_dir and log file
    now = datetime.now()
    date = now.strftime("%Y_%m_%d_%H_%M_%S")
    exp_name_time = '{}_{}'.format(exp_name, date)
    # visualize training performance
    graph_path = os.path.join(RADAR_DIR, 'graphs', exp_name_time)
    if os.path.exists(graph_path) is False:
        os.makedirs(graph_path)
    LOG_DIR = os.path.join(RADAR_DIR, 'logs')
    if os.path.exists(LOG_DIR) is False:
        os.makedirs(LOG_DIR)
    log_path = '{}/{}.log'.format(LOG_DIR, exp_name_time)

    config = adjust_input_size(config)

    # assert configurations
    assert not (config.learn_background and config.with_rect_augmentation)
    # assert not(config.background_implicit_inference)
    assert not (config.load_complete_model_from_file
                and config.load_model_weights_from_file)
    assert config.load_complete_model_from_file or config.load_model_weights_from_file

    if config.load_model_weights_from_file:
        # build the model
        print('CURRENT DIR: {}'.format(os.getcwd()))
        adjust_input_size(config)
        model_dict = build_model(config)
        model_dict['train'].load_weights(config.model_weights_file)
        model = model_dict['train']
        model.compile(optimizer=Adam(learning_rate=config.learning_rate),
                      loss=BinaryCrossentropy(),
                      metrics=['accuracy', AUC()])
        # model_name = 'full_test_auc_95_0168'
        # print('saveing model to: {}/{}'.format(os.getcwd(),model_name))
        # model.save(model_name)
    elif config.load_complete_model_from_file:
        model = tf.keras.models.load_model(config.complete_model_file)
    else:
        raise Exception('Invalid Configuration...')

        # evaluate model
    if config.use_public_test_set:
        print(40 * '#')
        print('Model evaluation on FULL public test set:')
        os.chdir(SRC_DIR)
        eval_dataparser = DataSetParser(stable_mode=False,
                                        read_validation_only=True,
                                        config=config)
        X_valid, labels_valid = eval_dataparser.get_dataset_by_snr(
            dataset_type='validation', snr_type=config.snr_type)
        y_valid = np.array(labels_valid['target_type'])
        if config.with_rect_augmentation:
            X_augmented_test = expand_test_by_sampling_rect(data=X_valid,
                                                            config=config)
            y_pred = []
            for sampled_list_x, test_index in zip(X_augmented_test,
                                                  range(
                                                      len(X_augmented_test))):
                sample_result_list = []
                sampled_list_x = np.array(sampled_list_x)
                x = np.expand_dims(sampled_list_x, axis=-1)
                sample_result_list.extend(
                    model.predict(x, batch_size=x.shape[0]).flatten().tolist())
                y_pred.append(np.mean(sample_result_list))
            # raise Exception('Currently not supported')
            y_pred = np.array(y_pred)
        else:
            X_valid = np.expand_dims(X_valid, axis=-1)
            y_pred = model.predict(X_valid)
            res = model.evaluate(X_valid, y_valid)
        print('roc_auc_score on FULL public test: {}'.format(
            roc_auc_score(y_valid, y_pred)))
    else:
        raise Exception(
            'Invalid Configuration..., use config.use_public_test_set = True')

    SUB_DIR = os.path.join(RADAR_DIR, 'submission_files')
    BEST_RESULT_DIR = os.path.join(RADAR_DIR, 'best_preformance_history')
    if os.path.exists(SUB_DIR) is False:
        os.makedirs(SUB_DIR)
    sub_path = "{}/submission_{}.csv".format(SUB_DIR, exp_name_time)
    test_model(model, sub_path, SRC_DIR, config, BEST_RESULT_DIR)

    # if config.save_history_buffer is True:

    print('#' * 70)
    print('submission file is at: {}'.format(sub_path))
    print('')