def setupTableInitialization(o): """ Adds a table to the unittest object. """ o.table = createTable(TABLE_NAME) num = 1 # Number of columns in o.table column1 = cl.Column(COLUMN1) column1.addCells(COLUMN1_CELLS) o.table.addColumn(column1) num += 1 column2 = cl.Column(COLUMN2) column2.addCells(COLUMN2_CELLS) o.table.addColumn(column2) num += 1 column5 = cl.Column(COLUMN5) column5.addCells(COLUMN5_CELLS) o.table.addColumn(column5) num += 1 o.subtable = createTable(SUBTABLE_NAME, is_subtable=True) o.subtable_column1 = cl.Column(COLUMN1) o.subtable_column1.addCells(COLUMN1_CELLS) o.subtable.addColumn(o.subtable_column1) o.table.addChild(o.subtable) num += 2 # 'row' in SUBTALBE + column1 o.num_columns = num
def testAddCellsStr(self): if IGNORE_TEST: return single_str = "cccc ccc" new_list_str = ["aa", "bbb bb"] test_array = np.array(new_list_str) column = cl.Column(COLUMN_NAME) column.addCells(single_str) self.assertTrue(compareValues(column._cells, single_str)) column = cl.Column(COLUMN_NAME) column.addCells(new_list_str) self.assertTrue(compareValues(column._cells, new_list_str)) column = cl.Column(COLUMN_NAME) column.addCells(test_array) self.assertTrue(compareValues(column._cells, test_array))
def createTable(name, column_name=None, is_subtable=False): """ :param str name: str table name :param str or list column_name: column(s) to create :param bool is_subtable: true if table is a subtable :return: Table object """ if column_name is None: colnms = [] elif isinstance(column_name, list): colnms = column_name else: colnms = [column_name] table = DTTable(name) factor = 1 for colnm in colnms: column = cl.Column(colnm) values = [factor * n for n in range(5)] factor += 1 column.addCells(values, replace=True) table.addColumn(column) if not is_subtable: versioned_file = VersionedFile(TABLE_FILEPATH, TEST_DIR, MAX_VERSIONS) table.setVersionedFile(versioned_file) api_util.writeObjectToFile(table, TABLE_FILEPATH) return table
def testAddCellsFloat(self): if IGNORE_TEST: return single_float = 1.1 list_float = [2.0, 3.0] test_array = np.array(list_float) column = cl.Column(COLUMN_NAME) column.addCells(single_float) self.assertTrue(compareValues(column._cells, single_float)) self.assertEqual(np.array(column._cells).dtype, np.float64) # pylint: disable=E1101 column = cl.Column(COLUMN_NAME) column.addCells(list_float) self.assertTrue(compareValues(column._cells, list_float)) column = cl.Column(COLUMN_NAME) column.addCells(test_array) self.assertTrue(compareValues(column._cells, test_array))
def testConstructor(self): if IGNORE_TEST: return column = cl.Column(COLUMN_NAME) self.assertEqual(column._name, COLUMN_NAME) self.assertIsNone(column._parent) self.assertIsNone(column._formula_statement.getFormula())
def analyze(data): """ The main analyze function The parameter data is a pandas dataframe Returns a list of columns: for each column of the data an object with the information attached """ # TODO: Null columns = [] # finding the type of each column for column in data: # TODO: Check for changes sinces last analysis (if not changed, you can skip the analysis) # Also check previously created col_obj whether we should process (col_obj.disable_processing) # For this we should keep track of when the last analysis was print("Analyzing column {}".format(column)) col_obj = column_file.Column(column) # Get the data from the current column column_data = data[column] col_obj.record_count = column_data.size # Arguments get passed by reference (and edited) predict_type(column_data, col_obj) if col_obj.disable_processing: # Can be enabled if all empty # There is nothing useful to say about most common nan's in an empty column columns.append(col_obj) continue col_obj.common_values = analyze_most_common(column_data) num_analysis = False numerical_types = [ type_url("bool"), type_url("int"), type_url("float") ] if col_obj.data_type in numerical_types: numerical_data = get_numerical_data(column_data) num_analysis = True str_types = [type_url("str"), type_url("datetime")] if col_obj.data_type in str_types: num_analysis = True numerical_data = get_string_lengths(column_data) if num_analysis: col_obj.mean = 0 if len(numerical_data) == 0 else ( float(sum(numerical_data)) / len(numerical_data)) # the avg length col_obj.median = median(numerical_data) # statistics.median col_obj.min = min(numerical_data) # min length col_obj.max = max(numerical_data) # max length columns.append(col_obj) return columns
def _addColumn(self, name, cells=None, formula=None): column = cl.Column(name) if formula is not None: column.setFormula(formula) if cells is not None: column.addCells(cells) self.table.addColumn(column) return column
def addcolumn(self, uname, col, prime, type="string", min="", max="", clas="", qoute="", pos=0): if pos == 0: self.columns.append( column.Column(uname, col, type, prime, min, max, clas, qoute)) else: self.columns.insert( pos - 1, column.Column(uname, col, type, prime, min, max, clas, qoute))
def analyze(data): """ The main analyze function The parameter data is a pandas dataframe Returns a list of columns: for each column of the data an object with the information attached """ columns = [] data_info = dict() # Contains the info about each column # finding the type of each column for column in data: print("Analyzing column {}".format(column)) col_obj = column_file.Column( column ) # TODO: Retrieve previously created col_obj and check if we should process column_data = data[column] stats = dict() col_obj = predict_type(column_data, col_obj) if col_obj.disable_processing: # Can be enabled if all empty # There is nothing useful to say about most common nan's in an empty column continue col_obj.record_count = column_data.size col_obj.common_values = analyze_most_common(column_data) numerical_types = [type_url(bool), type_url(int), type_url(float)] if col_obj.data_type in numerical_types: col_obj.mean = column_data.mean() col_obj.median = column_data.median() col_obj.min = column_data.min() col_obj.max = column_data.max() stats["sd"] = column_data.std() # Standard deviation str_types = [type_url(str), type_url("datetime")] if col_obj.data_type in str_types: str_lengths = get_string_lengths(column_data) col_obj.mean = 0 if len(str_lengths) == 0 else ( float(sum(str_lengths)) / len(str_lengths)) # the avg length col_obj.median = median(str_lengths) # statistics.median col_obj.min = min(str_lengths) # min length col_obj.max = max(str_lengths) # max length # stats["str-data"] = analyze_string_row(column_data) # gets already analyzed in type prediction # Add a timestamp for when the last update was stats["timestamp"] = str(datetime.now()) data_info[column] = stats columns.append(col_obj) # pprint(data_info) export_json("report.json", data_info) return columns
def testEvaluateError(self): if IGNORE_TEST: return column_invalid_formula = cl.Column(COLUMN_INVALID_FORMULA) column_invalid_formula.setFormula(INVALID_FORMULA) self.table.addColumn(column_invalid_formula) evaluator = TableEvaluator(self.table) error = evaluator.evaluate(user_directory=TEST_DIR) self.assertIsNotNone(error)
def testAddColumn(self): if IGNORE_TEST: return table = ht.createTable(ht.TABLE_NAME) # Add an empty column column = cl.Column(ht.COLUMN) column.addCells(ht.COLUMN1_CELLS) table.addColumn(column) # Add a column with the same name self.assertEqual(table.getColumns()[1], column) error = table.addColumn(column) self.assertIsNotNone(error) table = ht.createTable(ht.TABLE_NAME) # Add a column with data column = cl.Column(ht.COLUMN1) column.addCells(ht.LIST) table.addColumn(column) self.assertEqual(column.numCells(), table.numRows())
def __init__(self, algorithm: str): self.run = True self.window = pygame.display.set_mode((WIDTH, HEIGHT)) pygame.display.set_caption(algorithm) self.algorithm = algorithm self.result = False self.update_loop = False self.start = None self.time = 'Time: 0 sec' # => SORTING METHODS SET IN A DICT self.sorting_methods = { "selection-sort": selection_sort.selection_sort, "bubble-sort": bubble_sort.bubble_sort, "insertion-sort": insertion_sort.insertion_sort, "quick-sort": quick_sort.quick_sort, "heap-sort": heap_sort.heap_sort, } self.clock = pygame.time.Clock() # => FONT pygame.font.init() self.font = pygame.font.SysFont(FONT, FONT_SIZE) CURRENT = self.algorithm.upper() # => PROGRAM LABELS self.labels = [ # KEY TEXT # POSITION [self.font.render(KEY_G, True, WHITE), (50, 25)], [self.font.render(KEY_S, True, WHITE), (50, 75)], [self.font.render(KEY_E, True, WHITE), (50, 125)], [self.font.render(CURRENT, True, WHITE), (550, 25)], [self.font.render(self.time, True, WHITE), (1050, 25)] ] # => GENERATE COLUMNS LAST = 150 self.columns = [] self.sizes = [x for x in range(10, 410) if x % 2 == 0] self.LENGTH = len(self.sizes) # CREATE 200 INSTANCES OF COLUMN for i in range(c_AMOUNT + 1): try: column_inst = c.Column(LAST, self.window, self.sizes) self.columns.append(column_inst) except IndexError: break # GET NEW X LAST = column_inst.return_last() self.sizes = column_inst.return_sizes()
def save_columns(self): # save all the columns of the user if self.url is None: print "Anonymous user, cannot save columns" return else: column_url_list = self.get_columns_url() for column_url in column_url_list: column.Column(column_url).save_all_posts() return
def _makeThreeLevelTable(self): """ Updtes self.table to add "SUBTABLE" under "DUMMY_SUBTABLE" """ first_subtable = self.table.tableFromName(ht.SUBTABLE_NAME) second_subtable = ht.createTable(NEW_SUBTABLE) column = cl.Column(ht.COLUMN) column.addCells(ht.COLUMN1_CELLS) second_subtable.addColumn(column) first_subtable.addChild(second_subtable)
def setUp(self): self.table = createTable(TABLE_NAME) column1 = cl.Column(COLUMN1) column1.addCells(COLUMN1_CELLS) self.table.addColumn(column1) self.column2 = cl.Column(COLUMN2) self.column2.addCells(COLUMN2_CELLS) error = self.table.addColumn(self.column2) if error is not None: import pdb pdb.set_trace() self.column5 = cl.Column(COLUMN5) self.column5.addCells(COLUMN5_CELLS) self.table.addColumn(self.column5) self.columns = self.table.getColumns() self.subtable = Table(SUBTABLE) self.table.addChild(self.subtable) self.subtable_column = self.subtable.getChildAtPosition(0) self.subtable_column_name = 'row'
def set_vals(self, numcolumns): self.movecounter = 0 self.update_label() self.C.delete(self.winner) self.winner = None self.C.delete(self.minimum_moves) self.minimum_moves = self.C.create_text( 155, 0, anchor=N, text="Minimum Possible Moves: " + str(2**numcolumns - 1)) tpl = [i for i in range(300, 59, -(240 // (numcolumns - 1)))] self.l.append( column.Column(155, [ column.Disk(numcolumns - i, tpl[i]) for i in range(numcolumns) ], self.C)) for i in range(2): self.l.append(column.Column(455 + 300 * i, [], self.C)) self.clicked = False self.pvcolumn = 0
def testAdjustColumnLength(self): if IGNORE_TEST: return table = ht.createTable(ht.TABLE_NAME) column = cl.Column(ht.COLUMN) column.addCells(ht.COLUMN1_CELLS) table.addColumn(column) column = cl.Column(ht.COLUMN1) column.addCells(['aa']) table.addColumn(column) table.adjustColumnLength() self.assertEqual(column.numCells(), table.numRows()) self.assertIsNone(column.getCells()[1]) column = cl.Column("YetAnotherColumn") column.addCells([1]) table.addColumn(column) table.adjustColumnLength() self.assertEqual(column.numCells(), table.numRows()) if column.isFloats(): self.assertTrue(np.isnan(column.getCells()[1])) # pylint: disable=E1101 else: self.assertIsNone(column.getCells()[1])
def createColumn(name, data=np.array([]), table=None, formula=None): """ :param name: str column name :param data: np.ndarray data values :param table: Table that reerences the column :param formula: formula in column :return: column object with data populated """ aColumn = cl.Column(name) aColumn.addCells(data) aColumn.setTable(table) aColumn.setFormula(formula) return aColumn
Created on Fri Sep 6 13:53:17 2019 @author: rosto """ import sys sys.path.append("..") import form import builder import row import container import column from size import Size cont = container.Container() riga = row.Row() colonna_form = column.Column() colonna_form.add_col_size(12, Size.PICCOLO) colonna_form.add_col_size(6, Size.MEDIO) riga.add_child(colonna_form) cont.add_child(riga) f = form.Form(action_url="/login") f.add_child( form.InputField("id_nome", label="Nome", inp_type=form.InputType.TEXT)) f.add_child( form.InputField("id_cognome", label="Email", inp_type=form.InputType.EMAIL)) f.add_child( form.InputField("id_psw",
#!/usr/bin/env python3 import column, multihelp, sys, json import multiprocessing # Values for the Characters for the 14 Seg character_source = './data/characters.json' # configuration_source config = './data/hardware.json' with open(character_source) as file: character_source = json.load(file) with open(config) as file: config = json.load(file) #List Comprehension that stores Column object and display name columns = [(column.Column(config[col]['segments']['bus_number'], config[col]['segments']['address'], character_source, config[col]['button'], config[col]['lightline']), col) for col in sys.argv[2:]] if sys.argv[1] == 'off': for c in columns: c[0].off() print(c[0]) else: for col in columns: extra = multiprocessing.Pool(processes=len(columns)) extra.apply_async(multihelp.run_activate(col[0], col[1]))
def saveLogic(data, type, loc, allTables): print("Generating Logical Model ...") root = data.getroot() rootlo = "" datenbank.__init__(datenbank) if not loc == None: rootlo = loc.getroot() for title in root.findall("title"): if not loc == None: if title.get("lang"): datenbank.settitle(datenbank, urlify(title.get("name"))) datenbank.setlanguage(datenbank, title.get("lang")) else: if (not (title.get("lang"))): datenbank.settitle(datenbank, urlify(title.get("name"))) for ent in root.findall("ent"): tab = table.Table() tab.name = ent.get("name") tab.setuname(ent.get("name")) tab.desc = "Created from <ent>" tab.qoute = ent.get("qoute") if not loc == None: for entlo in rootlo.findall("entlo"): if entlo.get("entref") == ent.get("name"): tab.setuname(entlo.get("name-lo")) for attr in ent: col = attr.get("name") ucol = col prime = attr.get("prime") qoute = attr.get("qoute") if not loc == None: for entlo in rootlo.findall("entlo"): if entlo.get("entref") == ent.get("name"): for attrlo in entlo: if col == attrlo.get("name"): ucol = attrlo.get("name-lo") if type == None: tab.addcolumn(ucol, col, prime, "string", "", "", "", qoute) else: createColumn(tab, ent.get("name"), ucol, col, prime, type, qoute) datenbank.addtable(datenbank, tab) findInkonsistence(root) for rel in root.findall("rel"): counter = 0 max = 0 bsuper = False for part in rel: if part.tag == "part" or part.tag == "attr": max += 1 if part.get("max") == "n": counter += 1 if part.tag == "attr": counter += 1 else: bsuper = True if bsuper == False: if (counter != max and allTables) or (max > 3 and allTables): maintable = "" for part in rel: if part.get("max") == "n": maintable = part.get("ref") primarys = datenbank.getprimarycolumns(datenbank, maintable) group = 0 for part in rel: if part.get("max") == "1": group += 1 for prime in primarys: if part.get("weak") == "true": datenbank.addcolumn(datenbank, part.get("ref"), prime, 1) datenbank.addrelation( datenbank, part.get("ref"), prime.uname, datenbank.tableuname(datenbank, maintable), group, len(primarys), datenbank.tabqoute(datenbank, part.get("ref"))) #print(maintable) if counter == max or max > 2 or allTables: tab = table.Table() tab.name = rel.get("to") tab.qoute = rel.get("qoute") tab.setuname(tab.name) tab.desc = "Created from a m:n relation" if not loc == None: for rello in rootlo.findall("rello"): if rello.get("relref") == tab.name: tab.setuname(rello.get("name-lo")) group = 0 for part in rel: group += 1 if part.tag == "part": primarys = datenbank.getprimarycolumns( datenbank, part.get("ref")) for prime in primarys: if tab.unique( datenbank.tableuname( datenbank, part.get("ref")) + "_" + prime.uname): tab.addcolumn( datenbank.tableuname( datenbank, part.get("ref")) + "_" + prime.uname, prime.uname, prime.prime, prime.type, prime.min, prime.max, prime.clas, prime.qoute) tab.addrelation( tab.name, datenbank.tableuname( datenbank, part.get("ref")) + "_" + prime.uname, datenbank.tableuname( datenbank, part.get("ref")), group, len(primarys), datenbank.tabqoute(datenbank, part.get("ref"))) else: tab.addcolumn( datenbank.tableuname( datenbank, part.get("ref")) + "_" + prime.uname + str(group), prime.uname, prime.prime, prime.type, prime.min, prime.max, prime.clas, prime.qoute) tab.addrelation( tab.name, datenbank.tableuname( datenbank, part.get("ref")) + "_" + prime.uname + str(group), datenbank.tableuname( datenbank, part.get("ref")), group, len(primarys), datenbank.tabqoute(datenbank, part.get("ref"))) else: uname = part.get("name") if not loc == None: for rello in rootlo.findall("rello"): for attr in rello: if part.get("name") == attr.get("name"): uname = attr.get("name-lo") types = "" min = "" max = "" clas = "" qoute = "" if not type == None: root = type.getroot() for typdsc in root.findall("reldsc"): for attr in typdsc.findall("attr"): if attr.get("name") == part.get("name"): types = attr.get("type") min = attr.get("min") max = attr.get("max") clas = attr.get("class") qoute = attr.get("qoute") if min == None: min = "" if max == None: max = "" if clas == None: clas = "" if qoute == None: qoute = "" tab.addcolumn(uname, uname, part.get("prime"), types, min, max, clas, qoute) datenbank.addtable(datenbank, tab) else: maintable = "" for part in rel: if part.get("max") == "n": maintable = part.get("ref") primarys = datenbank.getprimarycolumns(datenbank, maintable) group = 0 pos = 0 for part in rel: if part.get("max") == "1": group += 1 for prime in primarys: if part.get("weak") == "true": pos += 1 datenbank.addcolumn(datenbank, part.get("ref"), prime, pos) datenbank.addrelation( datenbank, part.get("ref"), prime.uname, datenbank.tableuname(datenbank, maintable), group, len(primarys), datenbank.tabqoute(datenbank, maintable)) else: maintable = "" for part in rel: if part.tag == "super": maintable = part.get("ref") primarys = datenbank.getprimarycolumns(datenbank, maintable) group = 0 for part in rel: if part.tag == "sub": group += 1 for primary in primarys: temp = column.Column(primary.uname, primary.name, primary.type, primary.prime, primary.min, primary.max, primary.clas, primary.qoute) datenbank.addcolumn(datenbank, part.get("ref"), temp) datenbank.addrelation( datenbank, datenbank.tableuname(datenbank, part.get("ref")), temp.name, maintable, group, len(primarys), datenbank.tabqoute(datenbank, part.get("ref"))) for tablen in datenbank.tabellen: primarys = datenbank.getprimarycolumns(datenbank, tablen.name) if len(primarys) == 0: print(Fore.YELLOW + "->Primary key for the table " + tablen.name + " is not set." + Fore.RESET)