def get_language_file_name(app_id=None):
    if app_id is None:
        app_id = get_artifact_app_id()

    file_extension = '.json'
    language_file_name = get_language_folder_name() + str(
        app_id) + file_extension
    return language_file_name
def main():
    # Pre-processing (only required once)
    apply_language_detector()

    # Processing: Artifact
    app_id = get_artifact_app_id()
    english_review_dict = filter_reviews(app_id)

    return
Esempio n. 3
0
def download_app_details(app_id=None):
    # For app_id, download app details (including store description)

    if app_id is None:
        app_id = get_artifact_app_id()

    app_id = str(app_id)

    (app_details, _, _) = steampi.api.load_app_details(app_id)

    return app_details
def filter_reviews(app_id=None):
    if app_id is None:
        app_id = get_artifact_app_id()

    english_review_dict = load_english_reviews(app_id)

    english_review_dict = filter_out_short_reviews(english_review_dict,
                                                   length_threshold=150)
    print('#reviews = {}'.format(len(english_review_dict)))

    detected_languages = detect_review_language(app_id)
    english_review_dict = filter_out_reviews_not_detected_as_english(
        english_review_dict, detected_languages)
    print('#reviews = {}'.format(len(english_review_dict)))

    return english_review_dict
def main():
    app_ids = [get_artifact_app_id()]

    data_request = dict()
    data_request['request'] = 'top100in2weeks'
    data = steamspypi.download(data_request)

    app_ids += list(data.keys())

    for app_id in app_ids:
        english_review_dict = filter_reviews(app_id)

        concatenated_reviews = concatenate_reviews_as_a_large_text(
            english_review_dict)

        with open(get_output_file_name(app_id), 'w', encoding='utf8') as f:
            print(concatenated_reviews, file=f)

    return
        except FileNotFoundError:
            print('Model not found. Setting initial epoch to 0.')
            initial_epoch = 0

    model.fit(x,
              y,
              batch_size=128,
              epochs=num_epochs,
              initial_epoch=initial_epoch,
              callbacks=[print_callback, save_callback])

    return model


if __name__ == '__main__':
    app_id = get_artifact_app_id()
    text = read_input(get_output_file_name(app_id))

    maxlen = 20
    params = get_params(maxlen)

    text = trim_text(text, params['chars'])

    model = train_model(text,
                        maxlen,
                        num_epochs=20,
                        full_model_filename=None,
                        initial_epoch=0)

    start_index = random.randint(0, len(text) - maxlen - 1)
    for diversity in [0.2, 0.5, 1.0, 1.2]: