コード例 #1
0
ファイル: mdbNew.py プロジェクト: dhruv4/DataCanopySQL
def createDCTableLeveln(table, levels, numChunks, numCols, numRows):

	conn = mdb.connect(username="******", password="******", database="test")
	cur = conn.cursor()

	for c in range(numChunks):
		for i in range(1, 2**numCols):
			if(checkLevel1(i) == 1 or checkLevel2(i) == 1):
				#print("gotcha", i)
				continue
			
			vals = []
			for x in range(numCols):
				if((i >> x) & 1 == 1):
					for y in range(x+1, numCols):
						if((i >> y) & 1 == 1):
							cur.execute("SELECT col1 FROM dc_" + table + " WHERE col0 = " 
								+ str(idChunkCombine(2**x + 2**y, c, numChunks)))
							
							vals.append(cur.fetchone()[0])	

			correlation = sum(vals) + 42

			cur.execute("INSERT INTO dc_" + table + " (col0, col1) VALUES (%s, %s)", 
				[idChunkCombine(i, c, numChunks), correlation])

	conn.commit()
コード例 #2
0
ファイル: mdbperf.py プロジェクト: dhruv4/dc-demo
def demo():
	numRows = int(sys.argv[1])
	numCols = int(sys.argv[2])
	numChunks = int(sys.argv[3])

	name = "demop" + str(random.randint(0, 12412099999999))

	conn = mdb.connect(username="******", password="******", database="test")
	cur = conn.cursor()

	createTable(cur, conn, name, numCols)
	insertRandData(cur, conn, name, numRows)
	conn.commit()

	createDCTableSetup(name, numCols, numChunks, numCols, numRows)
	#print("setup done")
	nodeCount = createDCTableLevel1(name, numCols, numChunks, numCols, numRows)
	#print("level 1 made")
	nodeCount = createDCTableLevel2(name, numCols, numChunks, numCols, numRows, nodeCount)
	#print("level 2 made")
	createDCTableLeveln(name, numCols, numChunks, numCols, numRows, nodeCount)
	#print("done")

	conn.commit()

	#drop table here?

	print("done")
	#print(time.time() - startTime)

	cur.execute("DROP TABLE " + name)
	cur.execute("DROP TABLE dc_" + name)
	conn.commit()
コード例 #3
0
ファイル: mdbNew.py プロジェクト: dhruv4/DataCanopySQL
def createDCTableLevel1(table, levels, numChunks, numCols, numRows):

	conn = mdb.connect(username="******", password="******", database="test")
	cur = conn.cursor()

	cur.execute("SELECT * FROM " + table)
	colList = [x[0] for x in  cur.description]

	maxRows = (2**numCols - 1)*numChunks
	sizeChunk = math.ceil(numRows/numChunks)

	ID = 1
	for c in range(numChunks):
		for i in range(numCols):

			cur.execute("SELECT AVG(" + colList[i] + "), STDDEV_SAMP(" + colList[i] + "), VAR_SAMP(" + colList[i] + ") FROM (SELECT " + colList[i] + ", ROW_NUMBER() OVER() as rnum FROM " 
				+ table + ") as foo WHERE rnum > " + str(c*sizeChunk) + " AND rnum < " + str(sizeChunk + c*sizeChunk))

			#avg, std, var, med = cur.fetchone()
			avg, std, var = cur.fetchone()

			med = 0

			#cur.execute("SELECT TOP 1 COUNT( ) val, freq FROM " + table + " GROUP BY " + colList[i] + " ORDER BY COUNT( ) DESC")
			#mod = int(cur.fetchone()[0])
			mod = 0

			ID = 1<<i

			ID = idChunkCombine(ID, c, numChunks)

			cur.execute("INSERT INTO dc_" + table + " (col0, col1, col2, col3, col4, col5) VALUES (%s, %s, %s, %s, %s, %s)",
				[ID, avg, std,var,med,mod])

	conn.commit()
コード例 #4
0
ファイル: mdbTest.py プロジェクト: dhruv4/DataCanopySQL
def main():

	#DC INFO
	numChunks = 5
	numCols = 5
	numRows = 100
	levels = numCols

	conn = mdb.connect(username="******", password="******", database="test")
	cur = conn.cursor()

	if(sys.argv[1] == "get"):
		getAllData(cur, conn, sys.argv[2])
	elif(sys.argv[1] == "insert"):
		insertRandData(cur, conn, sys.argv[2], sys.argv[3])
	elif(sys.argv[1] == "graph"):
		graphData(cur, conn, sys.argv[2], sys.argv[3])
	elif(sys.argv[1] == "create"):
		createTable(cur, conn, sys.argv[2], sys.argv[3])
	elif(sys.argv[1] == "createdc"):
		createDCTable(cur, conn, sys.argv[2], levels, numChunks, numCols, numRows)

	conn.commit()
	cur.close()
	conn.close()
	print("Run time: ", time.time() - startTime, " seconds")
コード例 #5
0
ファイル: mdbCache.py プロジェクト: dhruv4/DataCanopySQL
def createDCTableLevel2(table, levels, numChunks, numCols, numRows):

	conn = mdb.connect(username="******", password="******", database="test")
	cur = conn.cursor()

	maxRows = (2**numCols - 1)*numChunks
	sizeChunk = math.floor(numRows/numChunks)
	#sizeChunk = math.ceil(numRows/numChunks)

	cur.execute("SELECT * FROM " + table)
	colList = [x[0] for x in  cur.description]

	print("reached 2")

	#level 2
	for i, j in itertools.combinations(range(1, numCols+1), 2):
		for c in range(numChunks):
			cur.execute("CREATE FUNCTION GET_CHUNK() RETURNS TABLE (cl1 bigint, cl2 bigint) "
			+ "BEGIN RETURN SELECT " + colList[i] + "," + colList[j] + " FROM " + table 
			+ " LIMIT " + str(sizeChunk) + " OFFSET " + str(c*sizeChunk) + "; END;")
			
			cur.execute("SELECT CORR(cl1, cl2) FROM GET_CHUNK()")

			if(numCols + math.ceil(math.log(numChunks, 2)) >= 32):
				cur.execute("INSERT INTO dc_" + table + " (col0, col1) VALUES (%s, %s)", 
					[recToBinTrans([i, j], c, numCols, numChunks), cur.fetchone()[0]])
			else:
				cur.execute("INSERT INTO dc_" + table + " (col0, col1) VALUES (%s, %s)", 
					[int(recToBinTrans([i, j], c, numCols, numChunks), 2), cur.fetchone()[0]])

			cur.execute("DROP FUNCTION GET_CHUNK()")

	conn.commit()
コード例 #6
0
ファイル: mdbCache.py プロジェクト: dhruv4/DataCanopySQL
def createDCTableLeveln(table, levels, numChunks, numCols, numRows, two=0):
 	
	conn = mdb.connect(username="******", password="******", database="test")
	cur = conn.cursor()

	for i in range(3, levels+1):
		#print("reached", i)
		comb = list(itertools.combinations(range(1, numCols + 1), i))
		for j in comb: #create combinations of cols
			if(two == 1):
				comb2 = list(itertools.combinations(j, 2))
			else:
				comb2 = list(itertools.combinations(j, i-1))
			for c in range(numChunks):
				vals = []
				for k in comb2:
					if(numCols + math.ceil(math.log(numChunks, 2)) >= 32):
						cur.execute("SELECT col1 FROM dc_" + table + " WHERE col0='" 
							+ recToBinTrans(k, c, numCols, numChunks) + "'")
					else:
						cur.execute("SELECT col1 FROM dc_" + table + " WHERE col0='" 
							+ str(int(recToBinTrans(k, c, numCols, numChunks), 2)) + "'")

					vals.append(cur.fetchone()[0])

				correlation = sum(vals) + 42

				if(numCols + math.ceil(math.log(numChunks, 2)) >= 32):
					cur.execute("INSERT INTO dc_" + table + " (col0, col1) VALUES (%s, %s)", 
						[str(recToBinTrans(j, c, numCols, numChunks)), correlation])
				else:
					cur.execute("INSERT INTO dc_" + table + " (col0, col1) VALUES (%s, %s)", 
						[str(int(recToBinTrans(j, c, numCols, numChunks), 2)), correlation])
			conn.commit()
コード例 #7
0
ファイル: mdbperf.py プロジェクト: dhruv4/dc-demo
def createDCTableLevel2(table, levels, numChunks, numCols, numRows, nodeCount):
	
	conn = mdb.connect(username="******", password="******", database="test")
	cur = conn.cursor()

	cur.execute("SELECT * FROM " + table)
	colList = [x[0] for x in  cur.description]

	sizeDC = numChunks * (2**numCols - 1)
	prevPercent = findPercent(nodeCount, sizeDC)
	sizeChunk = math.ceil(numRows/numChunks)

	for c in range(numChunks):
		for i in range(numCols - 1):
			for j in range(i+1, numCols):

				cur.execute("SELECT CORR(cl1, cl2) FROM (SELECT " + colList[i] + " as cl1," + colList[j] + " as cl2, ROW_NUMBER() OVER() as rnum FROM " 
					+ table + ") as foo WHERE rnum > " + str(c*sizeChunk) + " AND rnum < " + str(sizeChunk + c*sizeChunk))

				cur.execute("INSERT INTO dc_" + table + " (col0, col1) VALUES (%s, %s)", 
					[idChunkCombine(2**i + 2**j, c, numChunks),float(cur.fetchone()[0])])

				nodeCount+=1

				p = findPercent(nodeCount, sizeDC)
				if(p - prevPercent >= 5):
					print(str(random.randint(23,28123)) + "|" + str(p) + "&", sep="")
					prevPercent = p
					sys.stdout.flush()

	conn.commit()
	return nodeCount
コード例 #8
0
ファイル: testCache.py プロジェクト: dhruv4/DataCanopySQL
def main():

	if(sys.argv[1] == "pg"):

		conn = pg.connect(dbname="postgres")
		cur = conn.cursor()

	elif(sys.argv[1] == "mdb"):

		conn = mdb.connect(username="******", password="******", database="test")
		cur = conn.cursor()

	if(sys.argv[2] == "get"):
		getAllData(cur, conn, sys.argv[3])
	elif(sys.argv[2] == "insert"):
		insertRandData(cur, conn, sys.argv[3], sys.argv[4], sys.argv[1])
	elif(sys.argv[2] == "graph"):
		graphData(cur, conn, sys.argv[3], sys.argv[4])
	elif(sys.argv[2] == "create"):
		createTable(cur, conn, sys.argv[3], sys.argv[4])
	elif(sys.argv[2] == "createdc"):
		createDCTable(cur, conn, sys.argv[3])

	conn.commit()
	cur.close()
	conn.close()
	print("Run time: ", time.time() - startTime, " seconds")
コード例 #9
0
ファイル: mdbNew.py プロジェクト: dhruv4/DataCanopySQL
def test():
	numChunks = 10
	numCols = 10
	numRows = 1000000

	numChunks = int(numChunks)
	print(numChunks)

	conn = mdb.connect(username="******", password="******", database="test")
	cur = conn.cursor()

	'''
	createTable(cur, conn, "testa", numCols)
	insertRandData(cur, conn, "testa", numRows)
	conn.commit()
	'''

	createDCTableSetup("testa", numCols, numChunks, numCols, numRows)
	print("setup done")
	createDCTableLevel1("testa", numCols, numChunks, numCols, numRows)
	print("level 1 made")
	createDCTableLevel2("testa", numCols, numChunks, numCols, numRows)
	print("level 2 made")
	createDCTableLeveln("testa", numCols, numChunks, numCols, numRows)
	print("done")

	conn.commit()
	print(time.time() - startTime)
コード例 #10
0
ファイル: skymodel.py プロジェクト: mfkiwl/lofar-1
    def go(self):
        self.logger.info("Building sky model")
        super(skymodel, self).go()

        ra_min = self.inputs['ra'] - self.inputs['search_size']
        ra_max = self.inputs['ra'] + self.inputs['search_size']
        dec_min = self.inputs['dec'] - self.inputs['search_size']
        dec_max = self.inputs['dec'] + self.inputs['search_size']

        try:
            with closing(
                    db.connect(
                        hostname=self.inputs["db_host"],
                        port=int(self.inputs["db_port"]),
                        database=self.inputs["db_dbase"],
                        username=self.inputs["db_user"],
                        password=self.inputs["db_password"])) as db_connection:
                with closing(db_connection.cursor()) as db_cursor:
                    db_cursor.execute(query_central % (float(
                        self.inputs['ra']), float(self.inputs['dec']), "VLSS"))
                    central_source = db_cursor.fetchone()
                    if central_source:
                        self.outputs["source_name"], self.outputs[
                            "source_flux"] = central_source
                    else:
                        raise PipelineException(
                            "Error reading central source from database; got %s"
                            % str(central_source))
                    self.logger.info("Central source is %s; flux %f" %
                                     (self.outputs["source_name"],
                                      self.outputs["source_flux"]))
                    db_cursor.execute(query_skymodel % (
                        4,
                        4,  # Only using VLSS for now
                        float(ra_min),
                        float(ra_max),
                        float(dec_min),
                        float(dec_max),
                        float(self.inputs['min_flux'])))
                    results = db_cursor.fetchall()

        except db.Error as my_error:
            self.logger.warn("Failed to build sky model: %s " % (my_error))
            return 1

        try:
            with open(self.inputs['skymodel_file'], 'w') as file:
                file.write(header_line)
                file.writelines(", ".join(line) + ",\n" for line in results)
        except Exception as e:
            self.logger.warn("Failed to write skymodel file")
            self.logger.warn(str(e))
            return 1

        return 0
コード例 #11
0
ファイル: mdbperf.py プロジェクト: dhruv4/dc-demo
def createDCTableSetup(table, levels, numChunks, numCols, numRows):
	
	conn = mdb.connect(username="******", password="******", database="test")
	cur = conn.cursor()

	if(numCols + math.ceil(math.log(numChunks, 2)) >= 32):
		createTable(cur, conn, 'dc_' + table, 6, 1, 1)
	else:
		createTable(cur, conn, 'dc_' + table, 6, 1)

	conn.commit()
コード例 #12
0
ファイル: mdbNew.py プロジェクト: dhruv4/DataCanopySQL
def banana():
	numChunks = 10
	numCols = 10
	numRows = 10000

	conn = mdb.connect(username="******", password="******", database="test")
	cur = conn.cursor()

	createTable(cur, conn, "banana", numCols)
	insertRandData(cur, conn, "banana", numRows)

	conn.commit()
コード例 #13
0
ファイル: db.py プロジェクト: sirrice/dbwipes_src
def connect(dbname, engine='pg'):
    try:
      if engine == 'monet' and dbname in ('intel', 'med'):
        db = msql.connect(user='******', password='******', hostname='localhost', database=dbname)
      else:
        conn = "postgresql://sirrice@localhost:5432/%s" % dbname
        db = create_engine(conn)
        #connection = "dbname='%s' user='******' host='localhost' port='5432'" % (dbname)
        #db = psycopg2.connect(connection)
    except:
        sys.stderr.write( "couldn't connect\n")
        sys.exit()
    return db
コード例 #14
0
ファイル: testCache.py プロジェクト: dhruv4/DataCanopySQL
def runAccessExperiment():

	numTrials = 100
	numChunks = 5
	numCols = 16
	numLevels = numCols
	numRows = 1000

	#test access
	if(sys.argv[1] == "pg"):

		conn = pg.connect(dbname="postgres")
		cur = conn.cursor()
		pgTest.createTable(cur, conn, 'exp', numCols + 1)
		pgTest.insertRandData(cur, conn, 'exp', numRows)
		pgTest.createDCTable(cur, conn, 'exp', numLevels, numChunks, numCols, numRows)

	elif(sys.argv[1] == "mdb"):

		conn = mdb.connect(username="******", password="******", database="test")
		cur = conn.cursor()
		mdbTest.createTable(cur, conn, 'exp', numCols + 1)
		mdbTest.insertRandData(cur, conn, 'exp', numRows)
		mdbTest.createDCTable(cur, conn, 'exp', numLevels, numChunks, numCols, numRows)
	
	conn.commit()	

	timing = []

	for x in range(numLevels):
		timeSum = 0
		for i in range(numTrials):
			startTime = time.time()

			randList = []

			while(len(randList) < x+1):
				num = random.randint(1, numLevels+1)
				if(num not in randList):
					randList.append(num)

			if(sys.argv[1] == "mdb"):
				cur.execute("SELECT * FROM dc_exp WHERE col0 = " + str(mdbTest.recToBinTrans(randList, random.randint(0, numChunks))))
			else:
				cur.execute("SELECT * FROM dc_exp WHERE col0 = cast('" + pgTest.recToBinTrans(randList, random.randint(0, numChunks)) + "', as varbit)")

			timeSum += time.time() - startTime

		timing.append(timeSum/numTrials)

	graph(range(numLevels), timing, "levels", "AccessTest", sys.argv[1])
コード例 #15
0
 def _create_monet_db_connection(self, hostname, database, username,
                                 password, port):
     """
     Create and return a monat db connection. Return None if the creation 
     failed and log the error. Returns the connection if succeed.
     """
     try:
         conn = db.connect(hostname=hostname, database=database,
                                    username=username, password=password,
                                    port=port)
     except db.Error, dberror:
         self.logger.error("Failed to create a monetDB connection: "
                           "{0}".format(str(dberror)))
         raise dberror
コード例 #16
0
 def _create_monet_db_connection(self, hostname, database, username,
                                 password, port):
     """
     Create and return a monat db connection. Return None if the creation 
     failed and log the error. Returns the connection if succeed.
     """
     try:
         conn = db.connect(hostname=hostname, database=database,
                                    username=username, password=password,
                                    port=port)
     except db.Error, dberror:
         self.logger.error("Failed to create a monetDB connection: "
                           "{0}".format(str(dberror)))
         raise dberror
コード例 #17
0
ファイル: skymodel.py プロジェクト: jjdmol/LOFAR
    def go(self):
        self.logger.info("Building sky model")
        super(skymodel, self).go()

        ra_min = self.inputs['ra'] - self.inputs['search_size']
        ra_max = self.inputs['ra'] + self.inputs['search_size']
        dec_min = self.inputs['dec'] - self.inputs['search_size']
        dec_max = self.inputs['dec'] + self.inputs['search_size']

        try:
            with closing(
                db.connect(
                    hostname=self.inputs["db_host"],
                    port=int(self.inputs["db_port"]),
                    database=self.inputs["db_dbase"],
                    username=self.inputs["db_user"],
                    password=self.inputs["db_password"]
                )
            ) as db_connection:
                with closing(db_connection.cursor()) as db_cursor:
                    db_cursor.execute(
                        query_central % (float(self.inputs['ra']), float(self.inputs['dec']), "VLSS")
                    )
                    central_source = db_cursor.fetchone()
                    if central_source:
                        self.outputs["source_name"], self.outputs["source_flux"] = central_source
                    else:
                        raise PipelineException(
                            "Error reading central source from database; got %s" %
                            str(central_source)
                        )
                    self.logger.info("Central source is %s; flux %f" %
                        (self.outputs["source_name"], self.outputs["source_flux"])
                    )
                    db_cursor.execute(
                        query_skymodel % (
                            4, 4, # Only using VLSS for now
                            float(ra_min),
                            float(ra_max),
                            float(dec_min),
                            float(dec_max),
                            float(self.inputs['min_flux'])
                        )
                    )
                    results = db_cursor.fetchall()

        except db.Error, my_error:
            self.logger.warn("Failed to build sky model: %s " % (my_error))
            return 1
コード例 #18
0
ファイル: mdbTest.py プロジェクト: dhruv4/DataCanopySQL
def test():
	numChunks = 5
	numCols = 10
	numRows = 10000
	levels = numCols

	conn = mdb.connect(username="******", password="******", database="test")
	cur = conn.cursor()
	
	createTable(cur, conn, "test", numCols + 1)
	insertRandData(cur, conn, "test", numRows)
	conn.commit()
	timing = createDCTable(cur, conn, "test", numCols, numChunks, numCols, numRows, 0)

	print(timing)
コード例 #19
0
ファイル: mdbCache.py プロジェクト: dhruv4/DataCanopySQL
def createDCTableLevel1(table, levels, numChunks, numCols, numRows):

	conn = mdb.connect(username="******", password="******", database="test")
	cur = conn.cursor()

	maxRows = (2**numCols - 1)*numChunks
	sizeChunk = math.floor(numRows/numChunks)
	#sizeChunk = math.ceil(numRows/numChunks)

	cur.execute("SELECT * FROM " + table)
	colList = [x[0] for x in  cur.description]

	#level 1
	for i in range(1, len(colList)):
		for x in range(numChunks):

			#cur.execute("CREATE FUNCTION GET_CHUNK(lim INT, off INT, tbl varchar(32), col varchar(32)) RETURNS TABLE (clm integer)"
			#	+" RETURN SELECT col FROM tbl LIMIT lim OFFSET off; END;")
			##^^This is the statement that SHOULD work but doesn't because monetdb doesn't recognize the variables like "col", "lim"
			
			cur.execute("CREATE FUNCTION GET_CHUNK() RETURNS TABLE (clm integer) "
				+"BEGIN RETURN SELECT " + colList[i] + " FROM " + table + " LIMIT " + str(sizeChunk) + " OFFSET " + str(x*sizeChunk) + "; END;")
			
			#cur.execute("SELECT AVG(clm), STDDEV_SAMP(clm), VAR_SAMP(clm), MEDIAN(clm) FROM GET_CHUNK()")

			#removed median for consistency

			cur.execute("SELECT AVG(clm), STDDEV_SAMP(clm), VAR_SAMP(clm) FROM GET_CHUNK()")

			#avg, std, var, med = cur.fetchone()
			avg, std, var = cur.fetchone()

			med = 0

			#cur.execute("SELECT TOP 1 COUNT( ) val, freq FROM " + table + " GROUP BY " + colList[i] + " ORDER BY COUNT( ) DESC")
			#mod = int(cur.fetchone()[0])
			mod = 0

			if(numCols + math.ceil(math.log(numChunks, 2)) >= 32):
				cur.execute("INSERT INTO dc_" + table + " (col0, col1, col2, col3, col4, col5) VALUES (%s, %s, %s, %s, %s, %s)", 
					[recToBinTrans([i], x, numCols, numChunks), avg, std,var,med,mod])
			else:
				cur.execute("INSERT INTO dc_" + table + " (col0, col1, col2, col3, col4, col5) VALUES (%s, %s, %s, %s, %s, %s)", 
					[int(recToBinTrans([i], x, numCols, numChunks), 2), avg, std,var,med,mod])
			cur.execute("DROP FUNCTION GET_CHUNK()")

	conn.commit()
コード例 #20
0
 def _cursor(self):
     kwargs = {}
     if not self.connection:
         if self.settings_dict['USER']:
             kwargs['username'] = self.settings_dict['USER']
         if self.settings_dict['NAME']:
             kwargs['database'] = self.settings_dict['NAME']
         if self.settings_dict['PASSWORD']:
             kwargs['password'] = self.settings_dict['PASSWORD']
         if self.settings_dict['HOST']:
             kwargs['hostname'] = self.settings_dict['HOST']
         if self.settings_dict['PORT']:
             kwargs['port'] = int(self.settings_dict['PORT'])
         self.connection = Database.connect(**kwargs)
     cursor = self.connection.cursor()
     cursor.arraysize = 1000
     return cursor
コード例 #21
0
ファイル: mdbperf.py プロジェクト: dhruv4/dc-demo
def createDCTableLevel1(table, levels, numChunks, numCols, numRows):

	conn = mdb.connect(username="******", password="******", database="test")
	cur = conn.cursor()

	cur.execute("SELECT * FROM " + table)
	colList = [x[0] for x in  cur.description]

	sizeDC = numChunks * (2**numCols - 1)
	nodeCount = 0
	prevPercent = 0
	sizeChunk = math.ceil(numRows/numChunks)

	ID = 1
	for c in range(numChunks):
		for i in range(numCols):

			cur.execute("SELECT AVG(" + colList[i] + "), STDDEV_SAMP(" + colList[i] + "), VAR_SAMP(" + colList[i] + ") FROM (SELECT " + colList[i] + ", ROW_NUMBER() OVER() as rnum FROM " 
				+ table + ") as foo WHERE rnum > " + str(c*sizeChunk) + " AND rnum < " + str(sizeChunk + c*sizeChunk))

			#avg, std, var, med = cur.fetchone()
			avg, std, var = cur.fetchone()

			med = 0

			#cur.execute("SELECT TOP 1 COUNT( ) val, freq FROM " + table + " GROUP BY " + colList[i] + " ORDER BY COUNT( ) DESC")
			#mod = int(cur.fetchone()[0])
			mod = 0

			ID = 1<<i

			ID = idChunkCombine(ID, c, numChunks)

			cur.execute("INSERT INTO dc_" + table + " (col0, col1, col2, col3, col4, col5) VALUES (%s, %s, %s, %s, %s, %s)",
				[ID, avg, std,var,med,mod])

			nodeCount+=1

			p = findPercent(nodeCount, sizeDC)
			if(p - prevPercent >= 5):
				print(str(random.randint(23,28123)) + "|" + str(p) + "&", sep="")
				prevPercent = p
				sys.stdout.flush()

	conn.commit()
	return nodeCount
コード例 #22
0
 def __init__(self, database="test", use_monet=True):
     self.monet = use_monet
     if use_monet:
         db_port = 50000
         db_autocommit = True
     db_host = "localhost"
     db_dbase = database
     self.database = database
     db_user = "******"
     db_passwd = "monetdb"
     if use_monet:
         self.conn = db.connect(hostname=db_host, database=db_dbase,
                                username=db_user, password=db_passwd,
                                port=db_port,
                                autocommit=db_autocommit)
     else:
         connect = psycopg2.connect(host=db_host, user=db_user, 
                                    password=db_passwd, database=db_dbase)
         connect.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
         self.conn = connect.cursor()
コード例 #23
0
    def _cursor(self):
        if self.connection is None:
            settings_dict = self.settings_dict
            if 'NAME' in settings_dict.keys() \
               and 'DATABASE_NAME' not in settings_dict.keys():
                settings_dict['DATABASE_NAME'] = settings_dict['NAME']
            if 'OPTIONS' in settings_dict.keys() \
               and 'DATABASE_OPTIONS' not in settings_dict.keys():
                settings_dict['DATABASE_OPTIONS'] = settings_dict['OPTIONS']

            if not settings_dict['DATABASE_NAME']:
                from django.core.exceptions import ImproperlyConfigured
                raise ImproperlyConfigured, "Please fill out DATABASE_NAME in the settings module before using the database."
            kwargs = {
                'database': settings_dict['DATABASE_NAME'],
                #'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
            }
            kwargs.update(settings_dict['DATABASE_OPTIONS'])
            self.connection = Database.connect(**kwargs)
            # Register extract, date_trunc, and regexp functions.
        return self.connection.cursor()  #factory=MonetdbCursorWrapper)
コード例 #24
0
ファイル: base.py プロジェクト: marcoq/monetdb-python
    def _cursor(self):
        if self.connection is None:
            settings_dict = self.settings_dict
            if 'NAME' in settings_dict.keys() \
               and 'DATABASE_NAME' not in settings_dict.keys():
                settings_dict['DATABASE_NAME'] = settings_dict['NAME']
            if 'OPTIONS' in settings_dict.keys() \
               and 'DATABASE_OPTIONS' not in settings_dict.keys():
                settings_dict['DATABASE_OPTIONS'] = settings_dict['OPTIONS']

            if not settings_dict['DATABASE_NAME']:
                from django.core.exceptions import ImproperlyConfigured
                raise ImproperlyConfigured, "Please fill out DATABASE_NAME in the settings module before using the database."
            kwargs = {
                'database': settings_dict['DATABASE_NAME'],
                #'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
            }
            kwargs.update(settings_dict['DATABASE_OPTIONS'])
            self.connection = Database.connect(**kwargs)
            # Register extract, date_trunc, and regexp functions.
        return self.connection.cursor() #factory=MonetdbCursorWrapper)
コード例 #25
0
ファイル: mdbperf.py プロジェクト: dhruv4/dc-demo
def createDCTableLeveln(table, levels, numChunks, numCols, numRows, nodeCount):

	conn = mdb.connect(username="******", password="******", database="test")
	cur = conn.cursor()

	sizeDC = numChunks * (2**numCols - 1)
	prevPercent = findPercent(nodeCount, sizeDC)

	for c in range(numChunks):
		for i in range(1, 2**numCols):
			if(checkLevel1(i) == 1 or checkLevel2(i) == 1):
				continue
			
			vals = []
			kids = []
			for x in range(numCols):
				if((i >> x) & 1 == 1):
					for y in range(x+1, numCols):
						if((i >> y) & 1 == 1):
							cur.execute("SELECT col1 FROM dc_" + table + " WHERE col0 = " 
								+ str(idChunkCombine(2**x + 2**y, c, numChunks)))
							
							vals.append(cur.fetchone()[0])	
					kids.append(x)

			correlation = sum(vals)

			cur.execute("INSERT INTO dc_" + table + " (col0, col1) VALUES (%s, %s)", 
				[idChunkCombine(i, c, numChunks), correlation])

			nodeCount+=1

			p = findPercent(nodeCount, sizeDC)
			if(p - prevPercent >= 5):
				print(str(random.randint(23,28123)) + "|" + str(p) + "&", sep="")
				prevPercent = p
				sys.stdout.flush()

	conn.commit()
コード例 #26
0
ファイル: mdbNew.py プロジェクト: dhruv4/DataCanopySQL
def createDCTableLevel2(table, levels, numChunks, numCols, numRows):
	
	conn = mdb.connect(username="******", password="******", database="test")
	cur = conn.cursor()

	cur.execute("SELECT * FROM " + table)
	colList = [x[0] for x in  cur.description]

	maxRows = (2**numCols - 1)*numChunks
	sizeChunk = math.ceil(numRows/numChunks)

	for c in range(numChunks):
		for i in range(numCols - 1):
			for j in range(i+1, numCols):

				cur.execute("SELECT CORR(cl1, cl2) FROM (SELECT CAST(" + colList[i] + " as bigint) as cl1, CAST(" + colList[j] + " as bigint) as cl2, ROW_NUMBER() OVER() as rnum FROM " 
					+ table + ") as foo WHERE rnum > " + str(c*sizeChunk) + " AND rnum < " + str(sizeChunk + c*sizeChunk))

				cur.execute("INSERT INTO dc_" + table + " (col0, col1) VALUES (%s, %s)", 
					[idChunkCombine(2**i + 2**j, c, numChunks),float(cur.fetchone()[0])])

	conn.commit()
コード例 #27
0
ファイル: base.py プロジェクト: marcoq/monetdb-python
 def _DEPRECATED_cursor(self, settings):
     if not self._valid_connection():
         kwargs = {
             'conv': django_conversions,
             'charset': 'utf8',
             'use_unicode': True,
         }
         if settings.DATABASE_USER:
             kwargs['user'] = settings.DATABASE_USER
         if settings.DATABASE_NAME:
             kwargs['db'] = settings.DATABASE_NAME
         if settings.DATABASE_PASSWORD:
             kwargs['passwd'] = settings.DATABASE_PASSWORD
         if settings.DATABASE_HOST.startswith('/'):
             kwargs['unix_socket'] = settings.DATABASE_HOST
         elif settings.DATABASE_HOST:
             kwargs['host'] = settings.DATABASE_HOST
         if settings.DATABASE_PORT:
             kwargs['port'] = int(settings.DATABASE_PORT)
         kwargs.update(self.options)
         self.connection = Database.connect(**kwargs)
     cursor = CursorWrapper(self.connection.cursor())
     return cursor
コード例 #28
0
 def __init__(self, database="test", use_monet=True):
     self.monet = use_monet
     if use_monet:
         db_port = 50000
         db_autocommit = True
     db_host = "localhost"
     db_dbase = database
     self.database = database
     db_user = "******"
     db_passwd = "monetdb"
     if use_monet:
         self.conn = db.connect(
             hostname=db_host,
             database=db_dbase,
             username=db_user,
             password=db_passwd,
             port=db_port,
             autocommit=db_autocommit,
         )
     else:
         connect = psycopg2.connect(host=db_host, user=db_user, password=db_passwd, database=db_dbase)
         connect.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
         self.conn = connect.cursor()
コード例 #29
0
 def _DEPRECATED_cursor(self, settings):
     if not self._valid_connection():
         kwargs = {
             'conv': django_conversions,
             'charset': 'utf8',
             'use_unicode': True,
         }
         if settings.DATABASE_USER:
             kwargs['user'] = settings.DATABASE_USER
         if settings.DATABASE_NAME:
             kwargs['db'] = settings.DATABASE_NAME
         if settings.DATABASE_PASSWORD:
             kwargs['passwd'] = settings.DATABASE_PASSWORD
         if settings.DATABASE_HOST.startswith('/'):
             kwargs['unix_socket'] = settings.DATABASE_HOST
         elif settings.DATABASE_HOST:
             kwargs['host'] = settings.DATABASE_HOST
         if settings.DATABASE_PORT:
             kwargs['port'] = int(settings.DATABASE_PORT)
         kwargs.update(self.options)
         self.connection = Database.connect(**kwargs)
     cursor = CursorWrapper(self.connection.cursor())
     return cursor
コード例 #30
0
ファイル: base.py プロジェクト: mbucc/monetdb-python
    def _cursor(self):
	kwargs = {}
        if not self.connection:
            if settings.DATABASE_USER:
                kwargs['username'] = settings.DATABASE_USER
            if settings.DATABASE_NAME:
                kwargs['database'] = settings.DATABASE_NAME
            if settings.DATABASE_PASSWORD:
                kwargs['password'] = settings.DATABASE_PASSWORD

            # Force strings to always come back as unicode.
            kwargs['use_unicode'] = True

            self.connection = Database.connect(**kwargs)

        c =  self.connection.cursor()

	#
	# fetch more rows at once, makes things faster (useable, actually)
	#

	c.arraysize = 1000

	return CursorWrapper(c)
コード例 #31
0
ファイル: lsm.py プロジェクト: jjdmol/LOFAR
#
import sys, os, time

import monetdb
import monetdb.sql as db
import gsmutils as gsm 

db_host = "ldb002"
db_dbase = "gsm"
db_user = "******"
db_passwd = "msss"
db_port = 51000
db_autocommit = True

try:
    conn = db.connect(hostname=db_host, database=db_dbase, username=db_user, password=db_passwd, port=db_port, autocommit = db_autocommit)
except db.Error, e:
    raise

#ra_c = 289.89258333333333
#decl_c = 0.0017444444444444445

#ra_c = 287.80216666666666 
#decl_c = 9.096861111111112

#ra_c = 0.2307692222*15.
#decl_c = 90.

#Test Wouter Klijn
#ra_c=2.15373629697 
#decl_c=0.841248699461 
コード例 #32
0
def gsmMain(name, argv):

    import sys

    import monetdb
    import monetdb.sql as db
    import lofar.gsm.gsmutils as gsm
    #import gsmutils as gsm

    if len(argv) < 4 or (argv[0] == '-p' and len(argv) < 6):
        print ''
        print 'Insufficient arguments given; run as:'
        print ''
        print '   %s [-p patchname] outfile RA DEC radius [vlssFluxCutoff [assocTheta]]' % name
        print 'to select using a cone'
        print ''
        print '   -p patchname    if given, all sources belong to this single patch'
        print '   outfile         path-name of the output file'
        print '                   It will be overwritten if already existing'
        print '   RA              cone center Right Ascension (J2000, degrees)'
        print '   DEC             cone center Declination     (J2000, degrees)'
        print '   radius          cone radius                 (degrees)'
        print '   vlssFluxCutoff  minimum flux (Jy) of VLSS sources to use'
        print '                   default = 4'
        print '   assocTheta      uncertainty in matching     (degrees)'
        print '                   default = 0.00278  (10 arcsec)'
        print ''
        return False

    # Get the arguments.
    patch = ''
    st = 0
    if argv[0] == '-p':
        patch = argv[1]
        st = 2
    outfile = argv[st]
    ra = float(argv[st + 1])
    dec = float(argv[st + 2])
    radius = float(argv[st + 3])
    cutoff = 4.
    theta = 0.00278
    if len(argv) > st + 4:
        cutoff = float(argv[st + 4])
    if len(argv) > st + 5:
        theta = float(argv[st + 5])

    db_host = "ldb002"
    #db_host = "napels"
    db_dbase = "gsm"
    db_user = "******"
    db_passwd = "msss"
    #db_passwd = "gsm"
    db_port = 51000
    db_autocommit = True

    try:
        conn = db.connect(hostname=db_host,
                          database=db_dbase,
                          username=db_user,
                          password=db_passwd,
                          port=db_port,
                          autocommit=db_autocommit)
        gsm.expected_fluxes_in_fov(conn,
                                   ra,
                                   dec,
                                   radius,
                                   theta,
                                   outfile,
                                   patchname=patch,
                                   storespectraplots=False,
                                   deruiter_radius=3.717,
                                   vlss_flux_cutoff=cutoff)
    except db.Error, e:
        raise
コード例 #33
0
ファイル: gsm.py プロジェクト: mfkiwl/lofar-1
def gsmMain(name, argv):

    import sys

    import math

    import monetdb
    import monetdb.sql as db
    import lofar.gsm.gsmutils as gsm
    #import gsmutils as gsm

    if len(argv) < 4 or (argv[0] == '-p' and len(argv) < 6):
        print('')
        print('Insufficient arguments given; run as:')
        print('')
        print(
            '   %s [-p patchname] outfile RA DEC radius [vlssFluxCutoff [assocTheta]]'
            % name)
        print('to select using a cone')
        print('')
        print(
            '   -p patchname    if given, all sources belong to this single patch'
        )
        print('   outfile         path-name of the output file')
        print('                   It will be overwritten if already existing')
        print(
            '   RA              cone center Right Ascension (J2000, degrees)')
        print(
            '   DEC             cone center Declination     (J2000, degrees)')
        print('   radius          cone radius                 (degrees)')
        print('   vlssFluxCutoff  minimum flux (Jy) of VLSS sources to use')
        print('                   default = 4')
        print('   assocTheta      uncertainty in matching     (degrees)')
        print('                   default = 0.00278  (10 arcsec)')
        print('')
        return False

    # Get the arguments.
    patch = ''
    st = 0
    if argv[0] == '-p':
        patch = argv[1]
        st = 2
    outfile = argv[st]
    try:
        ra = float(argv[st + 1])
        dec = float(argv[st + 2])
    except ValueError:
        # Try to parse ra-dec as in the output of msoverview, e.g. 08:13:36.0000 +48.13.03.0000
        ralst = argv[st + 1].split(':')
        ra = math.copysign(
            abs(float(ralst[0])) + float(ralst[1]) / 60. +
            float(ralst[2]) / 3600., float(ralst[0]))
        declst = argv[st + 2].split('.')
        dec = math.copysign(
            abs(float(declst[0])) + float(declst[1]) / 60. +
            float('.'.join(declst[2:])) / 3600., float(declst[0]))
    radius = float(argv[st + 3])
    cutoff = 4.
    theta = 0.00278
    if len(argv) > st + 4:
        cutoff = float(argv[st + 4])
    if len(argv) > st + 5:
        theta = float(argv[st + 5])

    db_host = "gsmdb.control.lofar"
    #db_host = "napels"
    db_dbase = "gsm"
    db_user = "******"
    db_passwd = "msss"
    #db_passwd = "gsm"
    db_port = 51000
    db_autocommit = True

    try:
        conn = db.connect(hostname=db_host,
                          database=db_dbase,
                          username=db_user,
                          password=db_passwd,
                          port=db_port,
                          autocommit=db_autocommit)
        gsm.expected_fluxes_in_fov(conn,
                                   ra,
                                   dec,
                                   radius,
                                   theta,
                                   outfile,
                                   patchname=patch,
                                   storespectraplots=False,
                                   deruiter_radius=3.717,
                                   vlss_flux_cutoff=cutoff)
    except db.Error as e:
        raise
コード例 #34
0
ファイル: testCache.py プロジェクト: dhruv4/DataCanopySQL
def runExperiment():
	
	Config = configparser.ConfigParser()
	################################################
	Config.read("config.ini")
	####^^CHANGE THE CONFIG FILE TO CHANGE VARIABLES
	################################################

	numTrials = Config.getint("Experiment Config", "NumberOfTrials")

	numChunks = Config.getint("Data Canopy Config", "NumChunks")
	numRows = Config.getint("Data Set Config", "NumRows")
	numStats = Config.getint("Data Canopy Config", "NumStats")
	numCols = Config.getint("Data Set Config", "NumCols")
	numLevels = Config.getint("Data Canopy Config", "NumLevels")
	xaxis = Config.get("Experiment Config", "XAxis")

	times = []
	caches = []
	vals = []

	if(xaxis == "Chunks"):
		#r = int(math.ceil(math.log(numChunks, 10)))
		r = int(numChunks/10)
		a = 1
	elif(xaxis == "Cols"):
		#r = int(numCols/5)
		r = int(numCols)
		#a = 1
		a = 2
	elif(xaxis == "Rows"):
		r = int(math.ceil(math.log(numRows, 10)))
		a = 4

	for i in range(a, r+1):
		
		if(xaxis == "Cols"):
			#numCols = 5*i
			numCols = i
			numLevels = numCols
		elif(xaxis == "Rows"):
			numRows = 10**i
		elif(xaxis == "Chunks"):
			#numChunks = 10**i
			numChunks = 10*i
		
		startTime = time.time()

		if(sys.argv[1] == "pg"):

			conn = pg.connect(dbname="postgres")
			cur = conn.cursor()
			pgNew.createTable(cur, conn, 'exp', numCols)
			conn.commit()
			#pgNew.insertRandData(cur, conn, 'exp', numRows)
			if(xaxis == "Cols"):
				cur.execute("COPY exp FROM '/home/gupta/DataCanopySQL/test" + str(numCols) + ".csv' DELIMITER ',' CSV")
			elif(xaxis == "Rows"):
				cur.execute("COPY exp FROM '/home/gupta/DataCanopySQL/test" + str(numRows) + ".csv' DELIMITER ',' CSV")
			elif(xaxis == "Chunks"):
				cur.execute("COPY exp FROM '/home/gupta/DataCanopySQL/test" + str(numRows) + ".csv' DELIMITER ',' CSV")

		elif(sys.argv[1] == "mdb"):

			conn = mdb.connect(username="******", password="******", database="test")
			cur = conn.cursor()
			mdbNew.createTable(cur, conn, 'exp', numCols)
			#mdbNew.insertRandData(cur, conn, 'exp', numRows)
			if(xaxis == "Cols"):
				cur.execute("COPY INTO exp FROM '/home/gupta/DataCanopySQL/test" + str(numCols) + ".csv' USING DELIMITERS ','")
			elif(xaxis == "Rows"):
				cur.execute("COPY INTO exp FROM '/home/gupta/DataCanopySQL/test" + str(numRows) + ".csv' USING DELIMITERS ','")
			elif(xaxis == "Chunks"):
				cur.execute("COPY INTO exp FROM '/home/gupta/DataCanopySQL/test" + str(numRows) + ".csv' USING DELIMITERS ','")

			#cur.execute("COPY INTO exp FROM 'test" + str(numRows) + ".npy'")
		
		conn.commit()

		print("Table loaded", time.time() - startTime)

		timing = {}
		timing['setup'] = 0
		timing['level1'] = 0
		timing['level2'] = 0
		timing['leveln'] = 0
		timing['total'] = 0
		caching = {}
		caching['setup'] = 0
		caching['level1'] = 0
		caching['level2'] = 0
		caching['leveln'] = 0
		caching['total'] = 0

		for j in range(numTrials):

			if(sys.argv[1] == "pg"):

				os.system("rm -rf filenamepg.txt")

				totalStart = time.time()
				
				startTime = time.time()
				os.system("perf stat -e 'cache-misses' -x- python3 pgNew.py setup exp " + str(numLevels) + " " + str(numChunks) + " " + str(numCols) + " " + str(numRows) + " >> filenamepg.txt 2>&1")
				timing['setup'] += time.time() - startTime
				print("reached 1")
				startTime = time.time()
				os.system("perf stat -e 'cache-misses' -x- python3 pgNew.py level1 exp " + str(numLevels) + " " + str(numChunks) + " " + str(numCols) + " " + str(numRows) + " >> filenamepg.txt 2>&1")
				timing['level1'] += time.time() - startTime
				print("reached 2")
				startTime = time.time()
				os.system("perf stat -e 'cache-misses' -x- python3 pgNew.py level2 exp " + str(numLevels) + " " + str(numChunks) + " " + str(numCols) + " " + str(numRows) + " >> filenamepg.txt 2>&1")
				timing['level2'] += time.time() - startTime
				print("reached n")
				print("numLevels", numLevels)
				startTime = time.time()
				if(numLevels >= 2):
					os.system("perf stat -e 'cache-misses' -x- python3 pgNew.py leveln exp " + str(numLevels) + " " + str(numChunks) + " " + str(numCols) + " " + str(numRows) + " >> filenamepg.txt 2>&1")
				timing['leveln'] += time.time() - startTime
				'''
				startTime = time.time()
				os.system("python3 pgNew.py setup exp " + str(numLevels) + " " + str(numChunks) + " " + str(numCols) + " " + str(numRows) + " >> filenamepg.txt 2>&1")
				timing['setup'] += time.time() - startTime
				print("reached 1")
				startTime = time.time()
				os.system("python3 pgNew.py level1 exp " + str(numLevels) + " " + str(numChunks) + " " + str(numCols) + " " + str(numRows) + " >> filenamepg.txt 2>&1")
				timing['level1'] += time.time() - startTime
				print("reached 2")
				startTime = time.time()
				os.system("python3 pgNew.py level2 exp " + str(numLevels) + " " + str(numChunks) + " " + str(numCols) + " " + str(numRows) + " >> filenamepg.txt 2>&1")
				timing['level2'] += time.time() - startTime
				print("reached n")
				print("numLevels", numLevels)
				startTime = time.time()
				if(numLevels > 2):
					os.system("python3 pgNew.py leveln exp " + str(numLevels) + " " + str(numChunks) + " " + str(numCols) + " " + str(numRows) + " >> filenamepg.txt 2>&1")
				timing['leveln'] += time.time() - startTime
				'''

				timing['total'] += time.time() - totalStart

				lines = [line.rstrip('\n') for line in open('filenamepg.txt')]

				for line in lines:
					if(line[-12:] == "cache-misses"):
						caching['setup'] += int(line.split('-')[0])
						break

				lines.remove(line)

				for line in lines:
					if(line[-12:] == "cache-misses"):
						caching['level1'] += int(line.split('-')[0])
						break

				lines.remove(line)

				for line in lines:
					if(line[-12:] == "cache-misses"):
						caching['level2'] += int(line.split('-')[0])
						break

				lines.remove(line)

				for line in lines:
					if(line[-12:] == "cache-misses"):
						caching['leveln'] += int(line.split('-')[0])
						break

				lines.remove(line)

			elif(sys.argv[1] == "mdb"):

				os.system("rm -rf filenamemdb.txt")

				totalStart = time.time()

				startTime = time.time()

				os.system("perf stat -e 'cache-misses' -x- python3 mdbNew.py setup exp " + str(numLevels) + " " + str(numChunks) + " " + str(numCols) + " " + str(numRows) + " >> filenamemdb.txt 2>&1")
				timing['setup'] += time.time() - startTime
				print("reached 1")
				startTime = time.time()
				os.system("perf stat -e 'cache-misses' -x- python3 mdbNew.py level1 exp " + str(numLevels) + " " + str(numChunks) + " " + str(numCols) + " " + str(numRows) + " >> filenamemdb.txt 2>&1")
				timing['level1'] += time.time() - startTime
				print("reached 2")
				startTime = time.time()
				os.system("perf stat -e 'cache-misses' -x- python3 mdbNew.py level2 exp " + str(numLevels) + " " + str(numChunks) + " " + str(numCols) + " " + str(numRows) + " >> filenamemdb.txt 2>&1")
				timing['level2'] += time.time() - startTime
				print("reached n")	
				print("numLevels", numLevels)
				startTime = time.time()
				if(numLevels >= 2):
					os.system("perf stat -e 'cache-misses' -x- python3 mdbNew.py leveln exp " + str(numLevels) + " " + str(numChunks) + " " + str(numCols) + " " + str(numRows) + " >> filenamemdb.txt 2>&1")
				timing['leveln'] += time.time() - startTime

				timing['total'] += time.time() - totalStart

				lines = [line.rstrip('\n') for line in open('filenamemdb.txt')]

				for line in lines:
					if(line[-12:] == "cache-misses"):
						caching['setup'] += int(line.split('-')[0])
						break
				print(lines, line)
				lines.remove(line)

				for line in lines:
					if(line[-12:] == "cache-misses"):
						caching['level1'] += int(line.split('-')[0])
						break
				print(lines, line)
				lines.remove(line)

				for line in lines:
					if(line[-12:] == "cache-misses"):
						caching['level2'] += int(line.split('-')[0])
						break
				print(lines, line)
				lines.remove(line)

				for line in lines:
					if(line[-12:] == "cache-misses"):
						caching['leveln'] += int(line.split('-')[0])
						break

				print(lines, line)
				lines.remove(line)
	
			caching['total'] += caching['setup'] + caching['level1'] + caching['level2'] + caching['leveln']
			#^SUM OF THE CACHE MISSES

			cur.execute("SELECT COUNT(*) FROM dc_exp")
			print("Size of Data Canopy: ", cur.fetchone()[0])
			print("Predicted Size of DC: ", numChunks*(2**numCols - 1))
			cur.execute("DROP TABLE dc_exp")
			conn.commit()

			print(j)

		for x in caching:
			caching[x] /= numTrials
		caches.append(caching)

		for x in timing:
			timing[x] /= numTrials
		times.append(timing)
		
		print("time: ", timing['total'], "cache misses: ", caching['total'])

		if(xaxis == "Cols"):
			vals.append(numCols)
			print("trial", numCols, "ran")
		elif(xaxis == "Rows"):
			vals.append(numRows)
			print("trial", numRows, "ran")
		elif(xaxis == "Chunks"):
			vals.append(numChunks)
			print("trial", numChunks, "ran")

		cur.execute("DROP TABLE exp")
		conn.commit()
		cur.close()
		conn.close()

		#####MOVE WRITING TO FILE TO HERE

		#Ornah?? It would give me a way to stop and start again...

	print("vals", vals)
	print("caches", caches)
	
	for j in caching:

		graph(vals, [k[j] for k in caches], Config.get("Experiment Config", "XAxis"), Config.get("Experiment Config", "Title"), j, sys.argv[1], 1)

	plt.close()

	for j in caching:

		graph(vals, [k[j] for k in caches], Config.get("Experiment Config", "XAxis"), Config.get("Experiment Config", "Title"), j, sys.argv[1], 1, 1)
	
	plt.close()

	for j in timing:

		graph(vals, [k[j] for k in times], Config.get("Experiment Config", "XAxis"), Config.get("Experiment Config", "Title"), j, sys.argv[1], 0)

	plt.close()

	for j in timing:

		graph(vals, [k[j] for k in times], Config.get("Experiment Config", "XAxis"), Config.get("Experiment Config", "Title"), j, sys.argv[1], 0, 1)
	
	plt.close()
コード例 #35
0
	def connect(self):
		return monetdblib.connect( username="******",
									password="******",
									hostname="192.168.30.92",
									database="benchmark")
コード例 #36
0
ファイル: lsm.py プロジェクト: venkatarajasekhar/pyimager
import monetdb
import monetdb.sql as db
import gsmutils as gsm

db_host = "ldb002"
db_dbase = "gsm"
db_user = "******"
db_passwd = "msss"
db_port = 51000
db_autocommit = True

try:
    conn = db.connect(hostname=db_host,
                      database=db_dbase,
                      username=db_user,
                      password=db_passwd,
                      port=db_port,
                      autocommit=db_autocommit)
except db.Error, e:
    raise

#ra_c = 289.89258333333333
#decl_c = 0.0017444444444444445

#ra_c = 287.80216666666666
#decl_c = 9.096861111111112

#ra_c = 0.2307692222*15.
#decl_c = 90.

#Test Wouter Klijn
コード例 #37
0
 def get_new_connection(self, conn_params):
     conn = Database.connect(**conn_params)
     return conn
コード例 #38
0
ファイル: test.py プロジェクト: dhruv4/DataCanopySQL
def runExperiment():
	
	Config = configparser.ConfigParser()
	################################################
	Config.read("config.ini")
	####^^CHANGE THE CONFIG FILE TO CHANGE VARIABLES
	################################################

	numTrials = Config.getint("Experiment Config", "NumberOfTrials")

	numChunks = Config.getint("Data Canopy Config", "NumChunks")
	numRows = Config.getint("Data Set Config", "NumRows")
	numStats = Config.getint("Data Canopy Config", "NumStats")
	numCols = Config.getint("Data Set Config", "NumCols")
	numLevels = Config.getint("Data Canopy Config", "NumLevels")
	xaxis = Config.get("Experiment Config", "XAxis")

	times = []
	vals = []

	if(xaxis == "Chunks"):
		r = int(math.ceil(math.log(numChunks, 10)))
		a = 1
	elif(xaxis == "Cols"):
		r = numCols/5
		a = 2
	elif(xaxis == "Rows"):
		r = int(math.ceil(math.log(numRows, 10)))
		a = 4

	for i in range(a, r+1):

		if(xaxis == "Cols"):
			numCols = 5*i
		elif(xaxis == "Rows"):
			numRows = 10**i
		elif(xaxis == "Chunks"):
			numChunks = 10**i
		
		numLevels = numCols

		if(sys.argv[1] == "pg"):

			conn = pg.connect(dbname="postgres")
			cur = conn.cursor()
			pgTest.createTable(cur, conn, 'exp', numCols + 1)
			pgTest.insertRandData(cur, conn, 'exp', numRows)

		elif(sys.argv[1] == "mdb"):

			conn = mdb.connect(username="******", password="******", database="test")
			cur = conn.cursor()
			mdbTest.createTable(cur, conn, 'exp', numCols + 1)
			mdbTest.insertRandData(cur, conn, 'exp', numRows)
		
		conn.commit()

		timing = {}
		timing['setup'] = 0
		timing['level1'] = 0
		timing['level2'] = 0
		timing['leveln'] = 0
		timing['total'] = 0

		for j in range(numTrials):

			startTime = time.time()

			if(sys.argv[1] == "pg"):

				s, one, two, n = pgTest.createDCTable(cur, conn, 'exp', numLevels, numChunks, numCols, numRows)

				timing['setup'] += s
				timing['level1'] += one
				timing['level2'] += two
				timing['leveln'] += n

			elif(sys.argv[1] == "mdb"):

				s, one, two, n = mdbTest.createDCTable(cur, conn, 'exp', numLevels, numChunks, numCols, numRows)
				timing['setup'] += s
				timing['level1'] += one
				timing['level2'] += two
				timing['leveln'] += n
				
			timing['total'] += time.time()-startTime
			
			cur.execute("SELECT COUNT(*) FROM dc_exp")
			print("Size of Data Canopy: ", cur.fetchone()[0])
			print("Predicted Size of DC: ", numChunks*(2**numCols -1))
			cur.execute("DROP TABLE dc_exp")
			conn.commit()

			print(j)

		for x in timing:
			timing[x] /= numTrials
		times.append(timing)
		
		if(xaxis == "Cols"):
			vals.append(numCols)
			print("trial", numCols, "ran")
		elif(xaxis == "Rows"):
			vals.append(numRows)
			print("trial", numRows, "ran")
		elif(xaxis == "Chunks"):
			vals.append(numChunks)
			print("trial", numChunks, "ran")

		cur.execute("DROP TABLE exp")
		conn.commit()
		cur.close()
		conn.close()

	print("vals", vals)
	print("times", times)
	
	for j in timing:

		graph(vals, [k[j] for k in times], xaxis, Config.get("Experiment Config", "Title") + j, sys.argv[1])

	plt.close()

	for j in timing:

		graph(vals, [k[j] for k in times], xaxis, Config.get("Experiment Config", "Title") + j, sys.argv[1], 1)
	
	plt.close()
コード例 #39
0
ファイル: base.py プロジェクト: bourivouh/djonet
 def get_new_connection(self, conn_params):
     conn = Database.connect(**conn_params)
     return conn
コード例 #40
0
 def connect(self):
     return monetdblib.connect(username="******",
                               password="******",
                               hostname="192.168.30.92",
                               database="benchmark")