def __init__(self, remoteShell, domainAdmin="admin", domain=None):
     self.remoteShell = remoteShell
     self.vastoolPath = "/opt/quest/bin/vastool"     
     self.domainAdmin = domainAdmin
     self.defaultDomain = domain
     
     self.info = info.info(self.run)
     self.flush = flush.flush(self.run)
     self.create = create.create(self.run, self.defaultDomain)
     self.delete = delete.delete(self.run)
     self.timesync = timesync.timesync(self.run)
     self.nss = nss.nss(self.run)
     self.group = group.group(self.run)
     self.isvas = isvas.isvas(self.run)
     self.list = list.list(self.run)
     self.auth = auth.auth(self.run, self.defaultDomain)
     self.cache = cache.cache(self.run)
     self.configure = configure.configure(self.run)
     self.configureVas = configureVas.configureVas(self.run)
     self.schema = schema.schema(self.run)
     self.merge = merge.merge(self.run)
     self.unmerge = unmerge.unmerge(self.run)
     self.user = User.user(self.run)
     self.ktutil = ktutil.ktutil(self.run)
     self.load = load.load(self.run)
     self._license = License.License(self.run)
     self.License = self._license.License
     self.parseLicense = self._license.parseLicense
     self.compareLicenses = self._license.compareLicenses
     #self.vasUtilities = vasUtilities.vasUtilities(self.remoteShell)
     self.unconfigure = unconfigure.unconfigure(self.run)
     self.nssdiag = nssdiag(self.run)
     
     isinstance(self.info, info.info)
     isinstance(self.flush, flush.flush)
     isinstance(self.create, create.create)
     isinstance(self.delete, delete.delete)
     isinstance(self.timesync, timesync.timesync)
     isinstance(self.nss, nss.nss)
     isinstance(self.group, group.group)
     isinstance(self.isvas, isvas.isvas)
     isinstance(self.list, list.list)
     isinstance(self.auth, auth.auth)
     isinstance(self.cache, cache.cache)
     isinstance(self.configure, configure.configure)
     isinstance(self.configureVas, configureVas.configureVas)
     isinstance(self.schema, schema.schema)
     isinstance(self.merge, merge.merge)
     isinstance(self.unmerge, unmerge.unmerge)
     isinstance(self.user, User.user)
     isinstance(self.ktutil, ktutil.ktutil)
     isinstance(self.load, load.load)
     #isinstance(self.vasUtilities, vasUtilities.vasUtilities)
     isinstance(self.unconfigure, unconfigure.unconfigure)
     isinstance(self.nssdiag, nssdiag)
Example #2
0
def convert_to_schema(rpc_schema):
    """ Converts an rpc schema to a schema for confluo.

    Args:
        rpc_schema: The rpc_schema to convert.
    Returns:
        The schema for confluo.
    """
    builder = schema_builder()
    for column in rpc_schema:
        builder.add_column(data_type(column.type_id, column.type_size),
                           column.name)
    return schema(builder.build())
Example #3
0
def main(argv):

    #Pick scaleFactor
    scaleFactor = 10

    #Create Schema (pass type and scale factor)
    s = schema('TPCH', scaleFactor)

    #Create query
    #Test operators         : ('select', 'filter', 'join', 'aggregation', 'groupby')
    #Test mult cols         : ('joinc2',  'joinc4')
    #Test mult joins        : ('join2c2', 'join4c2)
    #Test complex joins     : ('join4c8)
    #Test (un) nest queries : ( 'q2-sub-simple', 'q2-unsub-simple')
    #Actual queries         : ( 'q2-nested', 'q2-unnested')
    q = query('q2-sub-simple2', scaleFactor)

    #Create system configuration ('TeslaV100' , 0)
    # Hardware: ('RI2')
    # GPU-DB Optimizations: 0 -> No opts, 1 -> mempool, 2 -> cache, 3-> All)
    c = config('RI2', 0)

    #Planner
    p = planner(s, q, c)
    queryEstimation = p.estimateCost()  # Estimate

    #Print query estimation
    queryEstimation.printRes()

    # #Print estimation (For single estimation)
    # planner.printRes() # Print final estimation
    # planner.printResVerdose() # Print all estimations

    # #Forcast query (For multiple estimations with scale factors between 1 and 20)
    # for i in range (1,20):

    # 	#Set new scale factor
    # 	schema.setScaleFactor(i)

    # 	#Re-estimate
    # 	planner.estimateCost()

    # 	#Print result
    # 	print "Scale factor "+str(i)+" estimation is :"+planner.getEstimation()

    #Debug msg
    print "DONE."

    #Exit normally
    exit(0)
Example #4
0
    def boot(self,create_indices=False):

        self.schema = schema.schema(bootstrap_filenames = self.bootstrap_filenames,in_test_mode=self.in_test_mode)

        if self.in_test_mode:
            return

        # Create a right hand side for literal properties stored in link
        self.null_node=self.ndb.get_node(0)
        
        # Make sure our indices are set up
        #self.init_indices()
        self.init_indices(create_indices=create_indices)

        # Get our bootstrap schema
        if create_indices:
            self.schema.load_bootstrap()  # First time load
            self.log.debug(self.schema.errors)
        else:
            self.schema.load_from_neodb(self)
Example #5
0
	def __init__(self, schema_file, directory=""):
		
		self.schema = schema(schema_file)
		self.element_list = self.schema.get_element_list("element","vip_object")
		self.addresses = self.address_data()
		if not directory.endswith("/"): #consistency for later file lookups
			directory += "/"
		self.directory = directory
		self.dir_list = listdir(directory)
		self.invalid_files = []
		self.missing_files = []
		self.valid_files = {}
		self.invalid_columns = {}
		self.missing_columns = {}
		self.valid_columns = {}
		self.duplicate_columns = {}
		self.invalid_sections = []
		if self.uses_config():
			self.validate_with_config()
		else:
			self.validate()
Example #6
0
    def __init__(self, schema_file, directory=""):

        self.schema = schema(schema_file)
        self.element_list = self.schema.get_element_list(
            "element", "vip_object")
        self.addresses = self.address_data()
        if not directory.endswith("/"):  #consistency for later file lookups
            directory += "/"
        self.directory = directory
        self.dir_list = listdir(directory)
        self.invalid_files = []
        self.missing_files = []
        self.valid_files = {}
        self.invalid_columns = {}
        self.missing_columns = {}
        self.valid_columns = {}
        self.duplicate_columns = {}
        self.invalid_sections = []
        if self.uses_config():
            self.validate_with_config()
        else:
            self.validate()
Example #7
0
from schema import schema
from seed import seed


schema()
seed()
Example #8
0
 def setUpClass(cls):
     schema(DBPATH)
     setDB(DBPATH)
Example #9
0
import unittest
from app.shirt import Shirts
import schema
from settings import TESTDBPATH
import sqlite3

schema.schema(TESTDBPATH)
Shirts.dbpath = TESTDBPATH
test_shirt = None
#python3 -m unittest discover test 

class TestAccount(unittest.TestCase):

    def setUp(self):
        # runs before every test case
        global test_shirt
        with sqlite3.connect(Shirts.dbpath) as conn:
            cur = conn.cursor()
            SQL = f"DELETE FROM {Shirts.tablename}"
            cur.execute(SQL)
            SQL = f"""INSERT INTO {Shirts.tablename}(style, size, color)  VALUES ("casual", "L", "green")"""
            cur.execute(SQL)
            test_shirt = cur.lastrowid

    def test_dummy(self):
        '''if everything is working correctly this will pass'''
        self.assertTrue(True)

    def test_all(self):
        result = Shirts.all()
        self.assertEqual(len(result), 1, "all func returns list with correct number of elements")
Example #10
0
from lxml import etree
import MySQLdb as mdb
import schema
import urllib

#This uses individual inserts for each new row for into the database, which is
#done due max insert size problems we could run into. Also, I think this will
#free up the database for other tasks when doing a large upload

fschema = urllib.urlopen("http://election-info-standard.googlecode.com/files/vip_spec_v3.0.xsd")
#fschema = open("schema.xsd")

schema = schema.schema(fschema)

simpleAddressTypes = schema.get_elements_of_attribute("type", "simpleAddressType")
detailAddressTypes = schema.get_elements_of_attribute("type", "detailAddressType")

ELEMENT_LIST = schema.get_element_list("element","vip_object")
SIMPLECONTENTS = {)
for elem in schema.schema["element"][0]["elements"]:
	for e in elem["elements"]:
		if "simpleContent" in e:
			if e["name"] in SIMPLECONTENTS:
				SIMPLECONTENTS[e["name"]]["parents"].append(elem['name'])
			else:
				SIMPLECONTENTS[e["name"]] = {"parents":[elem['name']]}

UNBOUNDEDS = {}
for elem in schema.schema["element"][0]["elements"]:
	for e in elem["elements"]:
		if "maxOccurs" in e and "simpleContent" not in e and e["maxOccurs"] == "unbounded":
Example #11
0
    def prep_db(self):
        print("runs 1")
        counter = 1
        while counter < 10:
            shutil.copyfile(
                '/home/mbraly/python-for-byte-academy/Final_Project/Website/matapp/my-app/src/output/placeholder.jpg',
                f'/home/mbraly/python-for-byte-academy/Final_Project/Website/matapp/my-app/src/output/img{counter}.jpg'
            )
            counter += 1
        name_counter = 0
        counter = 0
        schema.schema()
        name_sim = []
        industry_sim = []
        while name_counter < len(self.brand_name) - 2:
            with sqlite3.connect(self.DBPATH) as conn:
                cur = conn.cursor()
                sql = """SELECT brand_name 
                FROM logos WHERE brand_name 
                LIKE '%{}%';""".format(
                    self.brand_name[name_counter:name_counter + 2])
                cur.execute(sql, )
                name_sim = cur.fetchall()
                name_counter += 1
                for i in name_sim:
                    with sqlite3.connect(self.IMG_DBPATH) as conn:
                        cur = conn.cursor()
                        sql = """INSERT INTO brands (brand_name)
                        VALUES (?);"""

                        try:
                            for j in range(1):
                                cur.execute(sql, (i[0], ))
                                counter += 1
                        except sqlite3.IntegrityError:
                            pass

        if self.industry != "":
            with sqlite3.connect(self.DBPATH) as conn:
                cur = conn.cursor()
                sql = """SELECT brand_name, industry 
                FROM logos WHERE industry LIKE '%{}%';""".format(self.industry)
                cur.execute(sql, )
                industry_sim = cur.fetchall()

                for i in industry_sim:
                    with sqlite3.connect(self.IMG_DBPATH) as conn:
                        cur = conn.cursor()

                        sql = """INSERT INTO brands (brand_name)
                        VALUES (?)"""
                        try:
                            for j in range(self.MULTIPLIER):
                                cur.execute(sql, (i[0], ))
                                counter += 1
                        except sqlite3.IntegrityError:
                            pass
        if counter < self.MAX_FILES:
            remaining_spots = self.MAX_FILES - counter
            with sqlite3.connect(self.DBPATH) as conn:
                cur = conn.cursor()
                sql = """SELECT brand_name FROM logos
                ORDER BY RANDOM() LIMIT {};""".format(remaining_spots)

                cur.execute(sql, )
                rand = cur.fetchall()

                with sqlite3.connect(self.IMG_DBPATH) as conn:
                    cur = conn.cursor()
                    for i in rand:
                        sql = """INSERT INTO brands (brand_name)
                        VALUES (?);"""
                        cur.execute(sql, (i[0], ))
 def setUp(self):
     """ the setup method must be called setup """
     TodoItem.dbpath = DBPATH
     schema(DBPATH)
     seed(DBPATH)
Example #13
0
    except Exception as e:
        return(str(e))


@app.route('/download', methods=["POST"])
@login_required
def download():
    """Function which helps to export the bookmarks list."""
    if request.method == 'POST':
        c, conn = connection()
        c.execute("SELECT uid FROM users WHERE username = (%s)",
                  session['username'])

        data = c.execute("SELECT * FROM bookmarks WHERE uid = (%s)",
                         c.fetchone()[0])   # fetching bookmarks for logged in user
        data = (tuple(a[1:-1] for a in c.fetchall()))  # truncating data from list

        heading = ((('Title', 'URL', 'Timestamp'),))  # adding heading for excel file
        output = excel.make_response_from_array(heading + data, 'csv')  # converting list into csv
        output.headers["Content-Disposition"] = "attachment; \
                      filename=export.csv"
        output.headers["Content-type"] = "text/csv"  # setting header to download the file
    return output


if __name__ == "__main__":
    schema()  # function for checking databse schema
    app.secret_key = '12345'  # secret key used for hashing
    app.run()  # running the instance of the app
Example #14
0
def run():
    schema()
    seed()