Пример #1
0
    def __init__(self, baseplate, lock, *args, **kwargs):
        super().__init__(baseplate, lock, acl=kwargs.get('acl'))
        self.insult_array = []
        insults = h.load_file(os.path.join(LOCAL_DIR, 'lists', 'quotes.txt'),
                              raw=True)

        if insults:
            self.insult_array = insults.splitlines()
Пример #2
0
    def __init__(self, baseplate, lock, *args, **kwargs):
        super().__init__(baseplate, lock, acl=kwargs.get('acl'))
        self.role_modifiers_list = []
        self.roles_list = []
        self.companies_list = []
        role_modifiers = h.load_file(os.path.join(LOCAL_DIR, 'lists',
                                                  'role_modifiers.txt'),
                                     raw=True)
        roles = h.load_file(os.path.join(LOCAL_DIR, 'lists', 'roles.txt'),
                            raw=True)
        companies = h.load_file(os.path.join(LOCAL_DIR, 'lists',
                                             'companies.txt'),
                                raw=True)

        if role_modifiers and roles and companies:
            self.role_modifiers_list = role_modifiers.splitlines()
            self.roles_list = roles.splitlines()
            self.companies_list = companies.splitlines()
Пример #3
0
def search(collection: str, query: str, populate: bool) -> list:
    query = remove_accents(query)
    model = load_file(f"{collection}.json", model_selector(collection))

    def find_match(city: Model):
        return re.search(query, remove_accents(city.name),
                         re.IGNORECASE | re.MULTILINE) != None

    res = list(filter(find_match, model))

    if (populate):
        res = populate_collection(res, collection)

    return res
Пример #4
0
    def __init__(self, baseplate, lock, *args, **kwargs):
        super().__init__(baseplate, lock, acl=kwargs.get('acl'))

        self._set_bot_thread()
        self._set_defaults(kwargs)

        # set tx locations and load/initialize db
        tables = {}
        table_dir = os.path.join(LOCAL_DIR, 'tables')
        for _file in os.listdir(table_dir):
            path = os.path.join(table_dir, _file)
            if os.path.isfile(path):
                tables.update(h.load_file(path))

        self._init_tx_db(tables, kwargs.get('seeds', {}))
Пример #5
0
    def process_seeds(self, seeds):
        insert = None
        if seeds:
            for table_name, info in seeds.items():
                if table_name in self.tables:
                    table = getattr(self, table_name)

                    if table.count() == 0:
                        if isinstance(info, list):
                            insert = table.bulk_insert(info)
                        elif isinstance(info, dict):
                            if 'file' in info:
                                load_kwargs = info.get('load_kwargs', {})
                                data = load_file(info['file'], **load_kwargs)
                            else:
                                data = info.get('data', [])

                            transform = info.get('transform')
                            if transform:
                                data = jsearch(transform, data)

                            insert = table.bulk_insert(data)

        return insert
Пример #6
0
print('Getting model...')
result = None
with open(os.path.join(model_path, 'train.pickle'), 'rb') as f:
    result = pickle.load(f)

label_probs = result[0]
probs_per_label = result[1]
words = result[2]
labels = result[3]

predictor = partial(helpers.predict,
                    label_probs, probs_per_label, words, labels)

if os.path.isdir(input_path):
    print('Loading dataset...')
    test_target, test_data = helpers.load_dataset(input_path)

    print('Testing dataset...')
    print("============= RESULT ===============")
    accuracy = helpers.get_accuracy(test_data, test_target, predictor)
    print('Accuracy: {0:.2f}%'.format(accuracy * 100))
    print('=====================================')
else:
    print('Testing...')
    label, tokens = helpers.load_file(input_path)
    predict_label = predictor(tokens)
    print('================= RESULT ================')
    print('Expected label for the text: {}'.format(label))
    print('Predicted label for the text: {}'.format(predict_label))
    print('==========================================')
Пример #7
0
def _load_results(example_name,
                  wl_method,
                  evaluation_metric,
                  fold,
                  study,
                  alpha=None,
                  inferred_predicates_file='inferred-predicates.txt'):
    # path to this file relative to caller
    dirname = os.path.dirname(__file__)

    # read inferred predicates
    # predicted dataframe
    if study == "sampling_study":
        tuffy_experiment_directory = "{}/../../results/weightlearning/tuffy/sampling_study/{}/{}/{}/{}/{}".format(
            dirname, example_name, wl_method, evaluation_metric, alpha, fold)
    else:
        tuffy_experiment_directory = "{}/../../results/weightlearning/tuffy/{}/{}/{}/{}/{}".format(
            dirname, study, example_name, wl_method, evaluation_metric, fold)

    results_path = os.path.join(tuffy_experiment_directory,
                                inferred_predicates_file)
    print(results_path)
    results_tmp = load_file(results_path)
    results = []

    targets_path = os.path.join(tuffy_experiment_directory, 'query.db')

    for result in results_tmp:
        if len(result) == 1:
            # then we did not run in marginal mode, i.e. outputs in this file are all "true" or 1
            predicate = result[0][result[0].find("(") +
                                  1:result[0].find(")")].replace(' ',
                                                                 '').split(',')
            predicate.append(1.0)
            results.append(predicate)
        else:
            # we ran this experiment in marginal mode, i.e., the marginal probability precedes the ground atom
            predicate = result[1][result[1].find("(") +
                                  1:result[1].find(")")].replace(' ',
                                                                 '').split(',')
            predicate.append(float(result[0]))
            results.append(predicate)

    # close the predictions if we ran in discrete mode, i.e. if the target was not in the results then we predicted 0
    targets_tmp = load_file(targets_path)
    targets = []
    for target in targets_tmp:
        predicate = target[0][target[0].find("(") +
                              1:target[0].find(")")].replace(' ',
                                                             '').split(',')
        predicate.append(0.0)
        targets.append(predicate)

    # append the targets that were not in the inferred predicates
    results_dict = {(result[0], result[1]): result[2] for result in results}
    targets_dict = {(target[0], target[1]): target[2] for target in targets}
    diff = set(targets_dict.keys()) - set(results_dict.keys())
    for target in diff:
        results.append(
            [target[0], target[1], targets_dict[(target[0], target[1])]])

    return results
Пример #8
0
    def _load_moined(self):
        path = os.path.join(self.tx_dir, 'moined.json')
        if os.path.isfile(path):
            return h.load_file(path)

        return []
def main():
    st.set_option('deprecation.showfileUploaderEncoding', False)

    def _max_width_():
        max_width_str = f"max-width: 1000px;"
        st.markdown(
            f"""
        <style>
        .reportview-container .main .block-container{{
            {max_width_str}
        }}
        </style>
        """,
            unsafe_allow_html=True,
        )

    # Hide the Streamlit header and footer
    def hide_header_footer():
        hide_streamlit_style = """
                    <style>
                    footer {visibility: hidden;}
                    </style>
                    """
        st.markdown(hide_streamlit_style, unsafe_allow_html=True)

    # increases the width of the text and tables/figures
    _max_width_()

    # hide the footer and optionally the streamlit menu in the topright corner which is unrelated to our app
    hide_header_footer()

    # show the intro page
    text_markdown.intro_page()

    # load the data, currently allows for csv and excel imports
    st.sidebar.title(":floppy_disk: Upload Your File")
    filename = st.sidebar.file_uploader("Choose a file", type=["xlsx", "csv"])

    delim = st.sidebar.selectbox("In case of a CSV file, pick the delimiter.",
                                 [",", ";", "|"])

    if filename:

        df = helpers.load_file(filename, delim)

    else:
        df = pd.read_excel("titanic.xlsx")

    # space between sections
    helpers.betweensection_space()
    helpers.sidebar_space()

    # Ensures navigation between pages
    df = eda.first_inspection(df)
    eda.visuals(df)
    helpers.betweensection_space()
    helpers.sidebar_space()
    preprocessing.preprocess(df)

    # bottom line and github logo
    st.markdown("---")