def unwanted_copies(path): os.chdir(path) table_name = 'gw_burntarea_effis.rob_ba_evo_test' df, gdf = db_connection(table_name, use='r') print('The number of evolutions to check is {}\n'.format(gdf.shape[0])) print(type(df['initialdate'][0])) df['initialdate'] = df['initialdate'].apply(lambda x: x.date()) # nduplicates: dataframe containing the count of the evolutions with the same ba_id, initialdate, finaldate, area_ha nduplicates = df.groupby(['ba_id', 'initialdate', 'finaldate', 'area_ha' ])['id'].nunique().sort_values(ascending=False) nduplicates = pd.DataFrame(nduplicates) # nduplicates = gdf.groupby(['ba_id', 'initialdate', 'finaldate', 'area_ha'])['id'].nunique().sort_values( # 'ba_id') nduplicates.to_csv('duplicated_evolutions_number.csv') nduplicates = pd.read_csv('duplicated_evolutions_number.csv') # duplicates: dataframe containing the id of the evolutions to keep evo = df.groupby(['ba_id', 'initialdate', 'finaldate', 'area_ha'])['id'].first().sort_values(ascending=False) evo = pd.DataFrame(evo) # duplicates = gdf.groupby(['ba_id', 'initialdate', 'finaldate', 'area_ha'])['id'].first().sort_values( # 'ba_id') evo.to_csv('correct_evolutions.csv') evo = pd.read_csv('correct_evolutions.csv') evo_merge = evo.merge(nduplicates[['ba_id', 'id']], how='inner', left_on='ba_id', right_on='ba_id') evo_merge.to_csv('evo_merge.csv') evo_merge = pd.read_csv('evo_merge.csv') evo_merge = evo_merge.rename(columns={"id_x": "evo_id", "id_y": "count"}) id_redundant = set(df.id) - set(evo_merge.evo_id) sql_list = '(' for i in id_redundant: sql_list = sql_list + str(i) + ', ' sql_list = sql_list[0:-2] + ')' print(sql_list) if gdf.shape[0] - evo.shape[0] != 0: print( 'The number of cleaned up evolutions is {}. {} duplicates will be eliminated.\n' .format(len(set(evo_merge.evo_id)), len(id_redundant))) writing_table_name = 'gw_burntarea_effis.rob_ba_evo_test' sql_query = 'DELETE FROM {} WHERE id IN {}'.format( writing_table_name, sql_list) print(sql_query) db_connection(writing_table_name, use='w', statement=sql_query) else: print('No update needed on the evolutions table') return 0
def get_all_users(lower_bound=0, upper_bound=1): mydb = db_connection() cursor = mydb.cursor() stmt = "SELECT user_name from user where processed = 0 AND influenze_project = 1 AND user_id is not null AND id > " + str(lower_bound) + " AND id < " + str(upper_bound) + " order by id desc" cursor.execute(stmt) result = cursor.fetchall() return result
def main(): root = 'C:/Users/piermaio/Documents/gisdata/jrc/advance_report/' corine_path = 'C:\\Users\\piermaio\\Documents\\gisdata\\jrc\\BAmapping\\Corine\\raster\\Corine_globcover_MA_TU_ukraine.tif' # PM local path to the corine geotif if not os.path.exists(root): os.makedirs(root) os.chdir(root) # defining the table from which the data are retrieved and connecting table_name = 'gw_burntarea_effis.ba_oracle_export_year' df_sql, gdf_sql, nat2k_year, nat2kweek, df_nations, nat2k_by_country, nat2k_areas, df_nat2k_countries\ , df_nat2k_sort_countries, df_nat2k_sort_area = db_connection.db_connection(table_name) # Chapter 1 print('-- Processing chapter 1\n') ba_ref = ba_and_number_by_country.main(df_sql, df_nations) # Chapter 2 print('-- Processing chapter 2\n') natura2000_protected_areas.main(df_nat2k_countries) # Chapter 3 print('-- Processing chapter 3\n') monthly_ba_and_number.main(df_sql, df_nations) # Chapter 4 print('-- Processing chapter 4\n') ba_by_fire_class_in_eu.main(df_sql, df_nations) # Chapter 5 print('-- Processing chapter 5\n') list_eu_fires_gt_500ha.sum_count(df_sql, df_nations) # Chapter 6 print('-- Processing chapter 6\n') weekly_evolutions.main(df_sql, df_nations) # Chapter 8 (PM - contains a tab necessary for chapter 7) print('-- Processing chapter 8\n') df_tab_corine = corine_landuse_stats_by_country.main( ba_ref, gdf_sql, corine_path) # Chapter 7 (PM - df_tab_corine from chapter 8 requested) print('-- Processing chapter 7\n') # df_tab_corine = pd.read_csv('temp_corine.csv') landcover_by_country_and_comparison_with_history.main(df_tab_corine) # Chapter 9 (PM - still under construction - see sql_queries, also it requires a new intersection between ba and nat2k followed by zonal stats over the corine) print('-- Processing chapter 9\n') print( 'Not available until we define the right natura2k layer used, also it requires a new intersection between ba and nat2k followed by zonal stats over the corine' ) # natura2000_corinelandcover_stats_by_country.main() # Chapter 10 print('-- Processing chapter 10\n') natura2000_sites_list_by_country.main(df_nat2k_sort_countries, df_nat2k_sort_area) print('-- End of processing --\n') return 0
def get_if_product_exists_in_specific_table(table, column_name, product_name): product_id = str(get_product_id_by_name(product_name)) db = db_connection.db_connection() db.connect() cursor = db.query("SELECT " + column_name + " FROM " + table + " WHERE " + column_name + "=" + '"' + product_id + '"') rowcount = int(cursor.rowcount) if rowcount > 0: return 1 return 0
def set_processed(username, code=1): stmt = "UPDATE `user` SET processed = %s where user_name = %s" mydb = db_connection() try: cursor = mydb.cursor() cursor.execute(stmt, (code, username)) mydb.commit() except mysql.connector.Error as err: print("Something went wrong: {}".format(err))
def get_score(student_id): db = db_connection.db_connection() data = db.query("SELECT student.student_id, student.student_name, Exam.subject_id, subject.subject_name, Exam.exam_id, exam_name, exam_heso, exam_score, exam_date FROM Student, student_subject, subject, Exam WHERE student.student_id = student_subject.student_id and student_subject.subject_id = Subject.subject_id and subject.subject_id = Exam.subject_id and student.student_id = '" + student_id + "'") header = ["student_id", "student_name", "subject_id", "subject_name", "exam_id", "exam_name", "exam_heso", "exam_score", "exam_date"] rr = [] for i in data: rt = {} for j in range(len(header)): rt[header[j]] = i[j] rr.append(rt) return rr;
def get_subject(): db = db_connection.db_connection() data = db.query("SELECT * FROM Subject") header = ["id", "name"] rr = [] for i in data: rt = {} for j in range(len(header)): rt[header[j]] = i[j] rr.append(rt) return jsonify(rr)
def main(access_token, general_configurations): db = db_connection.db_connection() db_data = { 'database_name': "great_library", 'hostname': "localhost", 'username': "******", 'password': "******", } db.set_database_parameters(db_data['database_name'], db_data['hostname'], db_data['username'], db_data['password']) BotActions(access_token, general_configurations)
def login(): if request.method == 'GET': return render_template("login.html") else: id = request.form['id'] password = request.form['password'] db = db_connection.db_connection() data = db.query("SELECT * FROM `user` WHERE user_id = \"" + id + "\" and user_password = \"" + password + "\"") if len(data) == 0: pass
def single_value_select(table, column_needed, condition_column, condition_value): db = db_connection.db_connection() db.connect() cursor = db.query("SELECT " + column_needed + " FROM " + table + " WHERE " + condition_column + "=" + '"' + str(condition_value) + '"') rowcount = int(cursor.rowcount) for i in range(0, rowcount): row = cursor.fetchone() return row[0] return -1
def add_user(user_name, user_id): print("adding user now.") mydb = db_connection() stmt1 = "INSERT INTO user (user_id, user_name) VALUES (%s, %s)" try: cursor = mydb.cursor() cursor.execute(stmt1, (user_id, user_name)) mydb.commit() return 0 except mysql.connector.Error as err: print("Something went wrong: {}".format(err))
def main(): conn, cur = db_connection() api = get_api() for wish in users_to_tweet(conn, cur): status = f'Happy birthday from {wish.wisher_displayed_name} @{wish.target_username}, time flies {wish.target_name}!' print(f'Trying to tweet this: {status}') api.update_status(status=status) cur.execute( "UPDATE targets SET last_wish_year = date_part('year', CURRENT_DATE)\n" "WHERE username = %s", [wish.target_username] ) conn.commit() print("Successfully tweeted!")
def add_single_entry_single_value_no_dup(table_name, column_name, value): # if value == "": # return -3 if not value: return -3 product_id = get_product_id_by_name(value) if product_id != -1: return -1 table = table_name column = column_name query = "INSERT INTO " + table + " (" + column + ") VALUES ('" + value + "')" db = db_connection.db_connection() db.connect() db.query(query) return 0
def add_follower(follower_id, followee_id): mydb = db_connection() stmt1 = "INSERT INTO follow (follower_id, followee_id) VALUES (%s, %s)" try: cursor = mydb.cursor() cursor.execute(stmt1, (follower_id, followee_id)) mydb.commit() stmt2 = "" except: print("Exception adding Follower Relationship") return 1
def add_pair_data_entry_single_value_no_dup(table_name, first_column, first_value, second_column, second_value): # if value == "": # return -3 if not first_value: return -3 product_id = str(get_product_id_by_name(first_value)) table = table_name column = first_column query = "INSERT INTO " + table + " (" + first_column + ", " + second_column + \ ") VALUES ('" + product_id + "', ' " + str(second_value) + "')" db = db_connection.db_connection() db.connect() db.query(query) return 0
def update_user(user_name, user_id): mydb = db_connection() if user_id != 0: try: stmt1 = "UPDATE `user` SET user_id = %s where user_name = %s" cursor = mydb.cursor() cursor.execute(stmt1, (user_id, user_name)) mydb.commit() set_id_processed(user_name, 1) return 0 except mysql.connector.Error as err: print("Something went wrong: {}".format(err)) return 1 else: set_id_processed(user_name, 2)
def friends(): headers = request.headers conn, cur = db_connection() try: wisher_id = get_id_by_username_and_password(headers, cur) except KeyError: return Response(status=400) except TypeError: return Response(status=401) cur.execute( "SELECT name, username, birthday, wisher_displayed_name FROM targets \ WHERE wisher_id = %s", [wisher_id]) return jsonify([{ 'name': name, 'username': username, 'birthday': birthday.strftime('%Y-%m-%d'), 'wisher_displayed_name': wisher_displayed_name } for name, username, birthday, wisher_displayed_name in cur.fetchall()])
def sign_up(): headers = request.headers conn, cur = db_connection() try: username = headers['username'] password = headers['password'] default_wisher_displayed_name = headers[ 'default_wisher_displayed_name'] except KeyError: return Response(status=400) try: cur.execute( "INSERT INTO users (username, hashed_password, default_wisher_displayed_name) VALUES (%s, %s, %s)", [username, to_sha256(password), default_wisher_displayed_name]) except pg.errors.UniqueViolation: return Response(status=409) conn.commit() return Response(status=201)
def update_protein_amount_by_product_name(product_name, protein_amount): table = "protein_per_product" column_value = "value" value = str(protein_amount) product_column = "product" product_id = str(get_product_id_by_name(product_name)) if product_id == -1: return -2 entry_exist_flag = get_if_product_exists_in_specific_table( "protein_per_product", product_column, product_name) if entry_exist_flag == 0: add_protein_amount_by_product_name(product_name, protein_amount) return 0 query = "UPDATE " + table + " SET " + column_value + " = '" + value + "' WHERE " + \ product_column + " = '" + product_id + "'" db = db_connection.db_connection() db.connect() db.query(query) return 0
def retreive_information(key_word): query = "select * from agentdb.historial where palabra_clave like '%{}%' limit 1".format( key_word) xml_query = "select * from agentdb.xml where id_xml = {}".format(key_word) print(query) conn = db_connection() # crea cursor de tipo dict cursor = conn.cursor() result_set = '' try: cursor.execute(query) result_set = cursor.fetchone() if result_set[3] is not None: cursor.execute(xml_query) xml_file = cursor.fetchone() return str(xml_file) except Exception as error: print(error) print('No se pudo obtener historial') return ''
def get_user(search=None): mydb = db_connection() cursor = mydb.cursor() if type(search) == 'int': stmt = "SELECT * FROM user where id = %s" elif search is None: stmt = "SELECT user_name FROM user" else: stmt = "SELECT * FROM user WHERE user_name = %s" try: if search is not None: cursor.execute(stmt, search) else: result = cursor.execute(stmt) return result result = cursor.fetchone() return result except Exception as e: print("Error '{0}' occured. Arguments {1}.".format(e.message, e.args)) return 1
def add_friend(): headers = request.headers conn, cur = db_connection() try: wisher_id = get_id_by_username_and_password(headers, cur) target_name = headers['target_name'] target_username = headers['target_username'] target_birthday = headers['target_birthday'] # Can be null if there is not a preferred name to display. wisher_displayed_name = headers.get('wisher_displayed_name') except KeyError: return Response(status=400) except TypeError: return Response(status=401) # TODO add duplicate error handling. cur.execute( "INSERT INTO targets (name, username, birthday, wisher_id, wisher_displayed_name) \ VALUES (%s, %s, %s, %s, %s)", [ target_name, target_username, target_birthday, wisher_id, wisher_displayed_name ]) conn.commit() return Response(status=201)
def insert(): add_user = ("INSERT INTO angular_login " "(name, username, password) " "VALUES (%(name)s, %(username)s, %(password)s)") data_insert = { 'name': "Raghav", 'username': "******", 'password': "******", } cnx, cursor = db_connection() print(cnx, cursor) if cnx != None and cursor != None: try: cursor.execute(add_user, data_insert) cnx.commit() print("Data Inserted") except Exception as e: print(e) finally: # Make sure data is committed to the database print("Close") cursor.close() cnx.close()
from db_connection import db_connection # Creacion de objeto de conexion a la db conn = db_connection() # crea cursor de tipo dict cursor = conn.cursor(dictionary=True) #funcion de consulta generica, retorna una lista def generic_query(query): try: cursor.execute(query) return cursor.fetchall() except Exception as error: print(error) print('Error al realizar la consulta ' + query) return None def generic_insert(insert): try: cursor.execute(insert) conn.commit() return 1 except Exception as error: print(error) print('Error insertando registro ' + insert) return 0 def generic_db_opperation(query):
c = twint.Config() c.Limit = 100 c.Hide_output = True c.Search = "vax" c.Store_object = True c.Resume = 1134879742304276480 user_added = 0 user_present = 0 tweet_added = 0 tweet_present = 0 progress = 0 loops = 2 limit = c.Limit * loops mydb = db_connection() while loops >= 1: try: twint.run.Search(c) except: # TODO: Figure out the error print("UNDETERMINED ERROR") tweets = twint.output.tweets_object for tweet in tweets: progress = 1 + progress print(str(progress) + "/" + str(limit)) single_tweet = tweet tweet_id = single_tweet.id
#!/usr/bin/python # -*- coding: utf-8 -*- from db_connection import db_connection def send_query(con, query=None): try: #get cursor object cur = con.cursor() #build and send query if not query: query = raw_input('Enter query: ') cur.execute(query) #get results results = cur.fetchall() return results except Exception as err: print 'Error: %s' % err exit() if __name__ == '__main__': env = '.env' con = db_connection(env) results = send_query(con)
class Crawler: # Creacion de objeto de conexion a la db conn = db_connection() # crea cursor de tipo dict cursor = conn.cursor(dictionary=True) # constantes URL_QUERY = 'select * from agentdb.url where crawled_ind = 0 limit 1' URL_UPDATE = 'update agentdb.url set crawled_ind = {} where id_url = {}' THEME_INFORMATION = 'select tema from agentdb.tema where id_tema = {}' folder_name = 'HTML' def __init__(self, folder_name): Crawler.folder_name = folder_name Crawler.crawl_page('crawler_init') # obtener url de la tabla para proceder a hacer el crawling @staticmethod def fetch_url_info(): try: Crawler.cursor.execute(Crawler.URL_QUERY) for row in Crawler.cursor: return row['id_url'], row['url'], row['id_tema'], row['institucion'] except Exception as error: print('NO hay URLs a buscar') return '', '' # hace el update en el registro/url que se consultó para marcar como visitada @staticmethod def update_url(indicator, url_id): update_query = Crawler.URL_UPDATE.format(indicator, url_id) print(update_query) try: Crawler.cursor.execute(update_query) Crawler.conn.commit() except Exception as error: print('Error al actualizar la url') finally: Crawler.cursor.close() Crawler.conn.close() # metodo para obtener el tema de la url que se visita @staticmethod def get_theme(theme_id): query = Crawler.THEME_INFORMATION.format(theme_id) try: Crawler.cursor.execute(query) for row in Crawler.cursor: return row['tema'] except Exception as error: print(error) print('No hay temas a buscar') return '', '' # obtiene url a buscar y una vez termina, guarda archivo con informacion encontrada @staticmethod def crawl_page(thread_name): url_info = Crawler.fetch_url_info() if url_info is not None: if url_info[0] and url_info[1]: url_to_crawl = url_info[1] url_id = url_info[0] theme_id = url_info[2] url_institution = url_info[3] try: str_theme = Crawler.get_theme(theme_id) Crawler.crawl_page_for_search(url_to_crawl,str_theme, Crawler.folder_name) Crawler.update_url(1, url_id) except Exception as e: print(str(e)) Crawler.update_url(0, url_id) else: pass else: print('Error al obtener url') @staticmethod def crawl_page_for_search(url_to_crawl, key_word, folder_name): try: #validar a que institucion pertenece la url a buscar if 'aleph' in str(url_to_crawl): url_institution = 'POLIJIC' # se hace la peticion GET a la url webpage = requests.get(url_to_crawl, verify=False) elif 'tdea' in str(url_to_crawl): url_institution = 'TDA' # se hace la peticion GET a la url webpage = requests.post(url_to_crawl, verify=False) else: url_institution = 'COLMA' # se hace la peticion GET a la url webpage = requests.post(url_to_crawl, verify=False) content = webpage.text file_name = key_word+'-'+url_institution create_data_files(folder_name, file_name, content) except Exception as e: print(str(e)) @staticmethod def work(): while True: Crawler.crawl_page('crawler_init') time.sleep(5)