Esempio n. 1
0
def start_nomad(URL,T_SLEEP,STATE):
    os.system("clear")
    
    print "***************************************************************"
    print "*                        n.o.m.a.d.                           *" 
    print "*    DEMONE DI MONITORAGGIO AUTOMATICO NETWORK - ORIENTED     *"    
    print "***************************************************************"
    # Obtain an ID of opened connection with server
    ID = link_pbs_server.connect(URL)
    print "\nConnection with " + URL + " [OK]"
    print "Monitor started"
    print "---------------------------------------------------------------\n"
    
    idconn = connect_db.connect("localhost","root","eclipse88","nomad")
    # explore_cluster(ID,idconn)
    if ID != 0:
        while 1:
            jobs = stat_job.job_list(ID,STATE)
            date_time = time.asctime()
            n_jobs = len(jobs)
            if n_jobs != 0:
                print "[" + str(date_time) + "] Job in Wait state: [" + str(n_jobs) +"]"
                for job in jobs:
                    report_job_problem(idconn,ID,job,STATE)
                #TODO
                # Inizializza la connessione al database
                # Aggiorna database
            else:
                print "[" + str(date_time) + "] Job in Wait state: [" + str(n_jobs) +"]"
            time.sleep(T_SLEEP)    
    else:
        print "Connection with " + URL + " [FAILED]"
        print "------------------------------------------------------------"
    connect_db.close(idconn)
Esempio n. 2
0
def saveToDB(rest_data, grid, hash):
	client = db.connect()
	for cell in grid:
		src = grid[cell]
		sql_query = "insert into Grid CONTENT "
		for rest in rest_data:
			dst = rest_data[rest][1]
 def __init__(self):
     self.emulator = RestEmulator()
     try:
         self.conn = connect()
         self.cursor = self.conn.cursor(buffered=True)
     except Error as e:
         print (e)
Esempio n. 4
0
def sub_runner_checker(subname):
    try:
        connection = connect_db.connect()
        cur = connection.cursor()
        # Checks to see if the name exist in the record, then grabs a random row from that column limiting it to one.
        try:
            command = "SELECT pubsub_name, dates, on_sale, price, image FROM {table} WHERE pubsub_name = '{name}' ORDER BY dates DESC LIMIT 1"
            query = cur.execute(
                command.format(table=connect_db.get_table(), name=subname)
            )
        except:
            return (
                "Unfortunately, we do not have deal data available on "
                + subname.replace("-", " ")
                + f" sub at this time."
            )

        # Fetches us all the rows so we can grab data from each
        records = cur.fetchall()
        for row in records:
            last_on_sale = row[1]
            on_sale = row[2]
            price = row[3]
            image = row[4]
        print(records)
        # Creates a dictionary
        data = {}

        # Creates a primary catagory
        data["sub_names"] = []

        # Create a default JSON structure
        data["sub_names"].append(
            {
                "sub_name": subname.lower(),
                "last_sale": last_on_sale,
                "status": on_sale,
                "price": price,
                "image": image,
            }
        )

        sub_info = json.dumps(data["sub_names"], indent=2)
        return sub_info
    except:
        return abort(404)
Esempio n. 5
0
def on_sale_check():
    connection = connect_db.connect()
    cur = connection.cursor()
    """
    Queries if there are no none entires
    """
    query = "SELECT pubsub_name, dates, on_sale, price, image FROM {table} where pubsub_name is NOT NULL"
    cur.execute(query.format(table=connect_db.get_table()))
    sub_name = []
    last_on_sale = []
    on_sale = []
    price = []
    image = []

    records = cur.fetchall()
    """
    Loops through all the columns and rows
    """
    for i in range(len(records)):
        sub_name.append(records[i][0])
        last_on_sale.append(records[i][1])
        on_sale.append(records[i][2])
        price.append(records[i][3])
        image.append(records[i][4])
    sub_name = [x for x in sub_name if x is not None]
    original_name = [w.replace("-", " ") for w in sub_name]
    data = {}
    """
    Creates a primary catagory
    """
    data["All_subs".lower()] = []
    """
    Create a default JSON structure
    """
    for i in range(len(records)):
        data["All_subs".lower()].append({
            "name": original_name[i],
            "on_sale": on_sale[i],
            "image": image[i],
            "last_on_sale": last_on_sale[i],
            "price": price[i],
            "query_name": sub_name[i],
        })
    response = jsonify(data["All_subs".lower()])
    response.headers.add("Access-Control-Allow-Origin", "*")
    return response
Esempio n. 6
0
def maxWalkingTime(query=''):
	walk_time = 20
	eating_time = 30
	total_time = eating_time + (2*walk_time)
	if (total_time - 40) < 30:
		max_walk = 15*60
	else:
		max_walk = 20*60
	info = {}
	results = defaultdict(dict)
	cl = db.connect()
	if query == '':
		sql_query = "SELECT FROM Restaurant"
	else:
		sql_query = "SELECT FROM Restaurant WHERE cuisines LIKE " + "\'%" +query.lower() + "%\'" + " OR name LIKE " + "\'%" + query.title() + "%\'" + " OR address LIKE " + "\'%" + query.title() + "%\'" 
	res = cl.command(sql_query)
	print "Query executed " + sql_query
	for item in res:
		if getDuration(item.address) <= max_walk:
			info[item.rid] = getDuration(item.address) #Will be changed to health score later
		else:
			continue
		#print item.name
		results[item.rid]['distance'] = info[item.rid]
                results[item.rid]['address'] = item.address
                results[item.rid]['latitude'] = item.latitude
                results[item.rid]['longitude'] = item.longitude
                results[item.rid]['mobile_url'] = item.mobile_url
                results[item.rid]['rating'] = item.rating
                results[item.rid]['name'] = item.name
                results[item.rid]['res_id'] = item.res_id
                results[item.rid]['review_count'] = item.review_count
                results[item.rid]['cuisines'] = item.cuisines
                #results[item.rid]['health_index'] = "{0:.2f}".format(item.health_index*10)
                results[item.rid]['health_option'] = item.health_option
                results[item.rid]['image_url'] = item.image_url
	rids = sorted(info, key=info.get)[:5]
	result = defaultdict(dict)
	for key in rids:
		result[key] = results[key]
	result = sorted(result.values(), key=lambda x:int(x["health_option"]), reverse=True)
	print result			
Esempio n. 7
0
def random_subs():
    try:
        connection = connect_db.connect()
        cur = connection.cursor()
        # Checks to see if the name exist in the record, then grabs a random row from that column limiting it to one.
        command = "SELECT pubsub_name, dates, on_sale, price, image FROM {table} WHERE pubsub_name is NOT NULL ORDER BY random() DESC LIMIT 1"
        query = cur.execute(command.format(table=connect_db.get_table()))

        # Fetches us all the rows so we can grab data from each
        records = cur.fetchall()

        for row in records:
            subname = row[0]
            last_on_sale = row[1]
            on_sale = row[2]
            price = row[3]
            image = row[4]

        # Creates a dictionary
        data = {}

        # Creates a primary catagory
        data["random_sub"] = []

        # Create a default JSON structure
        data["random_sub"].append({
            "sub_name": subname.lower(),
            "last_sale": last_on_sale,
            "status": on_sale,
            "price": price,
            "image": image,
        })

        sub_info = jsonify(data["random_sub"])
        sub_info.headers.add("Access-Control-Allow-Origin", "*")
        return sub_info
    except:
        return abort(404)
Esempio n. 8
0
def extractGridPoints():
	print "\nAccessing the database to retrieve the restaurant information"

	# Connect to the database and get all the restaurant data
	client = db.connect()
	sql_query = "SELECT FROM Restaurant"
	result = client.command(sql_query)
	
	# Extract the points that form the grid
	grid_points = defaultdict(list)
	for restaurant in result:
		# Convert Address for Google API readiness
		# Ex - 201+North+Goodwin+Avenue+Urbana+IL+61801
		address = restaurant.address.replace(', ',' ').replace(',',' ').replace(' ','+')
		# Store the lat long as a tuple
		lat_long = (restaurant.latitude, restaurant.longitude)
		
		# Insert into a dictionary of grid points
		grid_points[restaurant.res_id] = [address,lat_long]

	# Return a dictionary of Restaurant and Co-ordinate points
	print "\nFinished processing and extracting the coordinates"
	return grid_points
Esempio n. 9
0
def all_subs_data():
    connection = connect_db.connect()
    cur = connection.cursor()

    query = (
        "SELECT pubsub_name FROM {table} WHERE pubsub_name is not NULL ORDER BY on_sale"
    )
    cur.execute(query.format(table=connect_db.get_table()))

    records = cur.fetchall()

    data = {}
    """
    Creates a primary catagory
    """
    data["All_subs".lower()] = []
    """
    Create a default JSON structure
    """
    for sub in records:
        data["All_subs".lower()].append({"name": sub[0]})
    response = jsonify(data["All_subs".lower()])
    response.headers.add("Access-Control-Allow-Origin", "*")
    return response
Esempio n. 10
0
def saveToCSV(rest_data, grid, hash):
	client = db.connect()
if __name__ == "__main__":
    # Call options:
    # 1. python main_topic_change.py 1950-01-01 1955-01-01 five
    # 2. python main_topic_change.py 1950-01-01 1955-01-01 decade
    if len(sys.argv)>1:
        date = sys.argv[1]
        period = sys.argv[3]
        date2 = sys.argv[2]
        print "Arguments given: ",date,period,date2
        # Check if aggregate file exists or user wants it, and perform aggregation:
        filename = os.path.join(cs.path_aggr_five,date+cs.postfix_aggr_five)
        ch = "n"
        if os.path.isfile(filename):
            ch = raw_input("Do you want to perform aggregation? (y/n)")
        if not os.path.isfile(filename) or ch == "y":
            db = cdb.connect()
            all_cases = cdb.extract_cases(db,date,date2)
            at.aggregate_all_text(date,all_cases)

        if period == "five":
            # Option 1
            # Perform Word2Vec:
            entire_text = w2v.CSV_to_list(date)
            w2v.convert_text_to_vector(entire_text,date,period)

            # Perform TF:
            entire_text = tf.CSV_to_list(date)
            tf.calculate_TF(entire_text,date,period)
        else:
            # Option 2
            # Perform Word2Vec:
Esempio n. 12
0
import connect_db
import psutil
import time


db = connect_db.connect()
time.sleep(1)

while(1):
	cpu = psutil.cpu_percent()
	ram = psutil.virtual_memory().percent
	disk = psutil.disk_usage('/').percent
	date_time = time.strftime("%Y-%m-%d  %H:%M:%S", time.localtime())
	# print str(cpu) + " : " + str(ram) + " : " + str(disk)
	cursor = db.cursor()
	sql = "INSERT INTO resource_monitor (cpu, ram, disk, time) VALUES ("+ str(cpu) + ","  + str(ram) + "," + str(disk) + "," + "'"  + str(date_time) + "'" + ")"
	try:
		cursor.execute(sql)
		db.commit()
	except Exception,e :
		db.rollback()
		print e
	time.sleep(1)
Esempio n. 13
0
import sys
import scrape
import connect_db
db = connect_db.connect()
cur = connect_db.get_cursor(db)

# RETRIEVE THE HTML FOR THE JOB ID...
try:
	id = str(sys.argv[1])
	html = 'Job '+ id +' not found'
	try:
		sqlTxt = "SELECT html FROM tbl_jobs WHERE ID="+ id
		cur.execute(sqlTxt)
		row = cur.fetchone()
		if row:
			html = row[0]
			if not html:
				html = 'Job '+ id +' not yet completed'
			else:
				html = scrape.decodeHTMLEntities(html);

	finally:
		print html
	
except:
	print 'Please gimme an id'
Esempio n. 14
0
def create_csv(table_name,verbosity,file_location="./",output_limiter="none"):
	"create formatted CSV file based on arguments provided "

	SQL = assign_sql(table_name,output_limiter)

	if output_limiter == "none":
		filename=  str(table_name) +".csv"
	else :
		filename=  str(table_name) +'_'+ output_limiter +".csv"	
	
	try:
		os.chdir(file_location)
	except :
		print "Directory Does not exist \n Creating directory"
		os.makedirs(file_location,0777)
		os.chdir(file_location)	

	FILE=open(filename,"w");
	output=csv.writer(FILE)

	connection, cursor = connect_db.connect()
	cursor.execute("select count(*) from "+ str(table_name))
	Number_of_records = cursor.fetchone()[0]


	cursor.execute(SQL)
	#print Number_of_records
	
	
	if table_name == "inconsistent_blocks":
		output.writerow(generate_header(cursor))

		output.writerow(["# Rows in this document:" + str( Number_of_records) ])
		
		if verbosity:
			for row in cursor:
				row = rearrange(row)
				print row
				output.writerow(row)
		else:
		
			for row in cursor:
				row = rearrange(row)
				output.writerow(row)
			
	    

	elif table_name == "inconsistent_files":
		
		file_header = []
		
		for i in range(1,11):
			file_header.append(cursor.description[i][0])
		output.writerow(file_header)
		
		output.writerow(["# Rows in this document: " + str( Number_of_records) ])
		
		if verbosity == True:
			for row in cursor:
				print row
				output.writerow(row[1:])
		else:
		
			for row in cursor:
				output.writerow(row[1:])
		
	elif table_name == "invalid_dbs_blocks":

		file_header = []
		
		for i in range(0,4):
			file_header.append(cursor.description[i][0])
		output.writerow(file_header)
		
		output.writerow(["# Rows in this document: " + str( Number_of_records) ])
		
		if verbosity == True:
			for row in cursor:
				print row[:-1]
				output.writerow(row[:-1])
		else:
		
			for row in cursor:
				output.writerow(row[:-1])

	else:
		for row in cursor:
			
			print row
			output.writerow(row)
    	
	cursor.close()
	connection.close()
	FILE.close()
Esempio n. 15
0
def pnpoly(nvert, vertx, verty, testx, testy):
    i = 0
    j = nvert - 1
    c = 0
    while (i < nvert):
        if (((verty[i] > testy) != (verty[j] > testy))
                and (testx < (vertx[j] - vertx[i]) * (testy - verty[i]) /
                     (verty[j] - verty[i]) + vertx[i])):
            c = not c
        j = i
        i = i + 1
    return c


_, mydb = mdb.connect()

floor = "piso1"

cursor = mydb["dangerZones"].find({"floor": floor})

for document in cursor:
    vertx = []
    verty = []
    cont = True
    line = 0
    while (cont):
        if "line_" + str(line) in document:
            vertx.append(int(document["line_" + str(line)]["start_x"]))
            verty.append(int(document["line_" + str(line)]["start_y"]))
            line = line + 1
Esempio n. 16
0
    def add_entry(self):
        sub_name = self.sub_name.text()
        dates = self.date.text()
        on_sale = self.on_sale.text()
        sub_name = sub_name.replace(" ", "-").lower()
        original = self.sub_name.text().lower()
        price = self.price.text()
        image = self.image.text()

        connection = connect_db.connect()

        cur = connection.cursor()
        # Checks to see if that row exist
        exist_query = (
            "select exists(select 1 from {table} where pubsub_name ='{sub}' limit 1)"
        )
        exist_check = cur.execute(
            exist_query.format(table=connect_db.get_table(), sub=sub_name))
        count = cur.fetchone()[0]
        # If our data returns true, update the status and dates
        if count == True:
            print("There exist a version of " + sub_name + " now updating!")
            update_string = "Update {table} SET on_sale = '{on_sale}', dates = '{dates}', price = '{price}', image = '{image}' WHERE pubsub_name = '{sub}'"
            update_query = cur.execute(
                update_string.format(
                    table=connect_db.get_table(),
                    on_sale=on_sale,
                    dates=dates,
                    price=price,
                    image=image,
                    sub=sub_name,
                ))
            # Sends an email out if a sub is now on sale
            if on_sale == "True":
                print("Sub is on sales")
                mailgun.send_email(original, dates)
                with open("webhook.json") as webhook_data:
                    data = json.load(webhook_data)
                webhook = DiscordWebhook(url=data["webhook"])
                embed = DiscordEmbed(
                    title="New sub on sale!",
                    description=":tada:  A sub is on sale!\n" + sub_name +
                    " is on sale from: " + dates + ", for the price of " +
                    price)
                embed.set_image(url=image)

                # add embed object to webhook
                webhook.add_embed(embed)

                response = webhook.execute()
        else:
            print("This sub doesn't exist, now adding!")
            # Inserts the data into each column
            cur.execute(
                "INSERT INTO " + connect_db.get_table() +
                "(pubsub_name, dates, on_sale, price, image) VALUES (%s, %s, %s, %s, %s)",
                (sub_name, dates, on_sale, price, image),
            )
            if on_sale == "True":
                print("Sub is on sale")
                mailgun.send_email(original, dates)
                with open("webhook.json") as webhook_data:
                    data = json.load(webhook_data)
                webhook = DiscordWebhook(url=data["webhook"])
                embed = DiscordEmbed(
                    title="New sub on sale!",
                    description=":tada:  A sub is on sale!\n" + sub_name +
                    " is on sale from: " + dates + ", for the price of " +
                    price)
                embed.set_image(url=image)

                # add embed object to webhook
                webhook.add_embed(embed)

                response = webhook.execute()

        connect_db.close(connection)
Esempio n. 17
0
def create_csv(table_name,
               verbosity,
               file_location="./",
               output_limiter="none"):
    "create formatted CSV file based on arguments provided "

    SQL = assign_sql(table_name, output_limiter)

    if output_limiter == "none":
        filename = str(table_name) + ".csv"
    else:
        filename = str(table_name) + '_' + output_limiter + ".csv"

    try:
        os.chdir(file_location)
    except:
        print "Directory Does not exist \n Creating directory"
        os.makedirs(file_location, 0777)
        os.chdir(file_location)

    FILE = open(filename, "w")
    output = csv.writer(FILE)

    connection, cursor = connect_db.connect()
    cursor.execute("select count(*) from " + str(table_name))
    Number_of_records = cursor.fetchone()[0]

    cursor.execute(SQL)
    #print Number_of_records

    if table_name == "inconsistent_blocks":
        output.writerow(generate_header(cursor))

        output.writerow(["# Rows in this document:" + str(Number_of_records)])

        if verbosity:
            for row in cursor:
                row = rearrange(row)
                print row
                output.writerow(row)
        else:

            for row in cursor:
                row = rearrange(row)
                output.writerow(row)

    elif table_name == "inconsistent_files":

        file_header = []

        for i in range(1, 11):
            file_header.append(cursor.description[i][0])
        output.writerow(file_header)

        output.writerow(["# Rows in this document: " + str(Number_of_records)])

        if verbosity == True:
            for row in cursor:
                print row
                output.writerow(row[1:])
        else:

            for row in cursor:
                output.writerow(row[1:])

    elif table_name == "invalid_dbs_blocks":

        file_header = []

        for i in range(0, 4):
            file_header.append(cursor.description[i][0])
        output.writerow(file_header)

        output.writerow(["# Rows in this document: " + str(Number_of_records)])

        if verbosity == True:
            for row in cursor:
                print row[:-1]
                output.writerow(row[:-1])
        else:

            for row in cursor:
                output.writerow(row[:-1])

    else:
        for row in cursor:

            print row
            output.writerow(row)

    cursor.close()
    connection.close()
    FILE.close()
Esempio n. 18
0
def sub_runner(subname):
    sub = connect_db.connect(subname)         
    return sub
Esempio n. 19
0
    sql = '''
        INSERT INTO `ingredient` (`id`, `recipe_id`, `text`, `step`)
        VALUES (NULL, '{}', "{}", {})
    '''.format(r_id, text, step)
    cur.execute(sql)

def insert_instruction(r_id, step, text):
    text = text.replace('"', "'").replace("'", "''")
    sql = '''
        INSERT INTO `instruction` (`id`, `recipe_id`, `text`, `step`)
        VALUES (NULL, '{}', "{}", {})
    '''.format(r_id, text, step)
    cur.execute(sql)

if __name__ == '__main__':
    con.connect()
    cur = con.db.cursor()

    # === test for add user ===
    # cur.execute('''
    #     INSERT INTO `account` (`user_id`, `email`, `password`, `name`, `gender`, `pic_path`)
    #     VALUES (NULL, '*****@*****.**', '', 'admin', 'none', '');
    # ''')

    # === insert recipe ===
    with open('./json/recipes_out.json') as json_f:
        recipes = json.loads(json_f.read())
    with open('./json/img_imgur.json') as json_f:
        img_imgur = json.loads(json_f.read())

    for rec in recipes:
Esempio n. 20
0
                print "[+] event_info"
                for key in event_info:
                    if key[0] == 'detail':
                        print "\t[+]", key[0]
                        print "\t\t[-]", key[1]
                    else:
                        print "\t[+]", key[0], ":", str(key[1])[:32]
                print "-------------------------"

        # except Exception, e:
        #      raise e
        # pass


if __name__ == '__main__':
    import os
    # sys.path.insert(0,"/home/ubuntu/anhtvd/waf-log-monitor/") # uncomment
    mysql = connect_db.connect()
    # mysql = mysql.cursor()
    if os.path.isfile(audit_log_path):
        parser = Parser(mysql, audit_log_path)
        parser.start()
    pass

    # parser = Parser(mysql, audit_log_path)
    # f = open('./modsec_audit.log')
    # logs = f.read()
    # f.close()
    # # print logs
    # parser.parser_log(logs)
Esempio n. 21
0
import connect_db
cur = connect_db.connect()
		
cur.execute("SELECT * FROM tbl_jobs")
for row in cur.fetchall() :
	print row[0] ,'-', row[1]