def store_google_reviews_and_processed_text(gtable, target_table, city):
    #"matched_google_ams"
    #"matched_google_reviews_ams"
    gpoints = postgis_functions.get_rows_from_table(gtable)
    session, RTable = pois_storing_functions.setup_db(target_table, "notused",
                                                      "reviews")
    eng_stop, eng_exclude = setup_for_topic_modeling("en")

    if city == "ams":
        nl_stop, nl_exclude = setup_for_topic_modeling("nl")
    elif city == "ath":
        gr_stop, gr_exclude = setup_for_topic_modeling("el")
    lemma = WordNetLemmatizer()

    for g in gpoints:
        rev = {}
        ratings = []
        gjson = json.loads(g["json"])
        if "reviews" in gjson:
            for review in gjson["reviews"]:
                rev["id"] = g["id"] + "_" + review["author_name"]
                rev["placesid"] = g["id"]
                rev["name"] = g["name"]
                rev["type"] = g["type"]
                rev["point"] = g["point"]
                rev["lang"] = review["language"]
                print(rev["lang"])
                rev["text"] = review["text"]
                if rev["lang"] == "en":
                    rev["processedtextlda"] = clean(review["text"], eng_stop,
                                                    eng_exclude, lemma)
                elif rev["lang"] == "nl" and city == "ams":
                    rev["processedtextlda"] = clean(review["text"], nl_stop,
                                                    nl_exclude, lemma)
                elif rev["lang"] == "el" and city == "ath":
                    rev["processedtextlda"] = clean(review["text"], gr_stop,
                                                    gr_exclude, lemma)

                # ratings.append(review["rating"])
                # rev["avgrating"] = np.mean(ratings)
                print(
                    "############################################################"
                )
                try:
                    session.add(RTable(**rev))
                    session.commit()
                    print(rev["name"], " INSERTED!")
                except Exception as err:
                    session.rollback()
                    print("# NOT INSERTED: ", err)
                print(
                    "############################################################"
                )
def add_matched_placesid_from_fsqid(store_table, ftable, gtable):
    fpoints = postgis_functions.get_rows_from_table(ftable)
    for f in fpoints:
        placesid = postgis_functions.get_matched_placesid_from_fsqid(f["id"], ftable, gtable)
        postgis_functions.update_column_to_table_by_key(store_table, placesid, "placesid", "id", f["id"])
    print("Places ids added !")
def update_language_from_langid():
    tweets = postgis_functions.get_rows_from_table("matched_twitter_ams")
    for t in tweets:
        lang = get_text_language(t["text"])
        # lang = json.loads(t["json"])["lang"]
        postgis_functions.update_tweets_language("matched_twitter_ams", lang, t["id"])
        lan = "nl"
    else:
        lan = "el"
    ftable = "matched_places_fsq_" + city
    gtable = "matched_places_google_" + city
    tweet_table = "matched_places_twitter_" + city
    store_table = "matched_places_text_features_10_25_" + city + "_2"
    num_topics_small = 10
    num_topics_big = 25
    session, TFTable = pois_storing_functions.setup_db_text(store_table, "twitter",
                                                            num_topics_small = 10,
                                                            num_topics_big = 25, lan=lan)
    format_str = "%Y-%m-%d %H:%M:%S"

    # get places
    fpoints = postgis_functions.get_rows_from_table(ftable)

    #add_matched_placesid_from_fsqid(store_table, ftable, gtable)
    #fpoints = postgis_functions.get_rows_from_id_not_in_table(ftable, store_table, "id")

    # ADD processed tweets for lda to db
    add_processed_lda_text_tweets(tweet_table, city)
    # get processed tweets for lda per language
    print("LOADING Tweets....")
    eng_tweets, other_tweets = get_processed_lda_tweets_from_db(tweet_table, load=True, city=city)
    print("TRAINING Model...")
    # train or load models
    lda_eng_5, lda_other_5, eng_dict, other_dict = get_lda_models(eng_tweets, other_tweets,
                                                        ntopics=num_topics_small, passes=20, load=True, evaluate=False, city=city)

    lda_eng_10, lda_other_10, eng_dict, other_dict = get_lda_models(eng_tweets, other_tweets,