def store_input(input_file, session): input_file = import_file(input_file) client_id = int(input_file.client_id) with session.begin(): contract = None try: contract_id = int(input_file.contract_id) contract = session.query(Contract).filter(Contract.id==contract_id).first() except: pass currency = None try: currency = input_file.currency except: pass client = session.query(Client).filter(Client.id==client_id).first() date = date.today() total_price = sum([t['price'] for t in args.input.tasks]) invoice = InvoiceSent(client_id=client_id, date=date, taxes=0, total=total_price, pre_tax=total_price, from_date=du_parse(args.input.from_date), to_date=du_parse(input_file.to_date), currency=currency) session.add(invoice) tasks = [Task(client_id=client_id, contract_id=contract_id, invoice=invoice, date=du_parse(task['date']), time_amount=task['time_amount'], price=task['price'], detail=task['detail'] ) for task in input_file.tasks] session.add_all(tasks) return invoice, tasks, client, contract
def literal_filter_condition(self): filter_def_file = os.path.join(self.flask_report.report_dir, str(self.id_), 'filter_def.py') if os.path.exists(filter_def_file): lib = import_file(filter_def_file) return lib.get_filter(self.flask_report.db, self.flask_report.model_map)
def __cached_import(self, file): if file in self.modules.keys(): return self.modules[file] else: imp = import_file(file) self.modules[file] = imp return imp
def __init__(self, x, y, properties, img='char_ 44_D.png', direction = DIR_DOWN, mv_pattern = engine.mv_pattern_stationary, behavior = 'npcbehavior.py'): self.x = x self.y = y self.start_x = x self.start_y = y self.move_speed = 2 self.mv_pattern = mv_pattern self.behavior = import_file(script_dir + behavior) self.properties = properties self.dir = direction self.radius = 3 self.isMoving = False self.dest_x = self.x self.dest_y = self.y self.step = 0 self.type = "npc" self.spr_f = pygame.image.load(spr_dir + img) self.spr_down = pygame.transform.chop(pygame.transform.chop(self.spr_f, (32,0,0,0)), (32,32,128,128)) self.spr_down_s0 = pygame.transform.chop(pygame.transform.chop(self.spr_f, (0,0,32,0)), (32,32,128,128)) self.spr_down_s1 = pygame.transform.chop(pygame.transform.chop(self.spr_f, (0,0,96,0)), (32,32,128,128)) self.spr_left = pygame.transform.chop(pygame.transform.chop(self.spr_f, (32,0,0,32)), (32,32,128,128)) self.spr_left_s0 = pygame.transform.chop(pygame.transform.chop(self.spr_f, (0,0,32,32)), (32, 32, 128, 128)) self.spr_left_s1 = pygame.transform.chop(pygame.transform.chop(self.spr_f, (0,0,96,32)), (32, 32, 128, 128)) self.spr_right = pygame.transform.chop(pygame.transform.chop(self.spr_f, (32,0,0,64)), (32,32,128,128)) self.spr_right_s0 = pygame.transform.chop(pygame.transform.chop(self.spr_f, (0,0,32,64)), (32, 32, 128, 128)) self.spr_right_s1 = pygame.transform.chop(pygame.transform.chop(self.spr_f, (0,0,96,64)), (32, 32, 128, 128)) self.spr_up = pygame.transform.chop(pygame.transform.chop(self.spr_f, (32,0,0,96)), (32,32,128,128)) self.spr_up_s0 = pygame.transform.chop(pygame.transform.chop(self.spr_f, (0, 0,32,96)), (32,32,128,128)) self.spr_up_s1 = pygame.transform.chop(pygame.transform.chop(self.spr_f, (0, 0,96,96)), (32,32,128,128)) self.sprite = self.spr_down self.m = None
def proxy_filter_map(self): proxy_filter_file = os.path.join(self.dir, 'proxy_filters.py') ret = {} if os.path.exists(proxy_filter_file): lib = import_file(proxy_filter_file) for filter_ in lib.__all__: ret[filter_.name] = filter_ return ret
def query(self): ''' the query of data set ''' query_def_file = os.path.join(self.flask_report.data_set_dir, str(self.id_), "query_def.py") lib = import_file(query_def_file) return lib.get_query(self.flask_report.db, self.flask_report.model_map)
def ImportPythonClassOrVar(sPythonFileName, sClassOrVar): # 从python源码文件动态import try: from import_file import import_file imp = import_file(sPythonFileName) return getattr(imp,sClassOrVar) except Exception, e: import traceback traceback.print_exc() PrintTimeMsg('ImportPythonClassOrVar.Exception.e=(%s)' % (str(e)))
def ImportPythonClassOrVar(sPythonFileName, sClassOrVar): # 从python源码文件动态import try: from import_file import import_file imp = import_file(sPythonFileName) return getattr(imp, sClassOrVar) except Exception, e: import traceback traceback.print_exc() PrintTimeMsg('ImportPythonClassOrVar.Exception.e=(%s)' % (str(e)))
def make_df_from_excel(file_name, nrows=10000, formatting): impModule = import_file(formatting) """Read from an Excel file in chunks and make a single DataFrame. Parameters ---------- file_name : str nrows : int Number of rows to read at a time. These Excel files are too big, so we can't read all rows in one go. """ file_path = os.path.abspath(os.path.join(DATA_DIR, file_name)) xl = pd.ExcelFile(file_path) # In this case, there was only a single Worksheet in the Workbook. sheetname = xl.sheet_names[0] # Read the header outside of the loop, so all chunk reads are # consistent across all loop iterations. #df_header = pd.read_excel(file_path, sheet_name=sheetname, nrows=1) print(f"Excel file: {file_name} (worksheet: {sheetname})") sheets = [] for i in xl.sheet_names: chunks = [] i_chunk = 0 # The first row is the header. We have already read it, so we skip it. skiprows = 0 while True: df_chunk = xl.parse(i, nrows=nrows, skiprows=skiprows, ignore_index=True, header=None) skiprows += nrows # When there is no data, we know we can break out of the loop. if not df_chunk.shape[0]: break else: print(f" - chunk {i_chunk} ({df_chunk.shape[0]} rows)") chunks.append(df_chunk) i_chunk += 1 sheets.append( impModule.formatting( pd.concat(chunks, ignore_index=True).fillna(''))) df_chunks = pd.concat(sheets, ignore_index=True) df_chunks['File Name'] = basename(file_name) # Rename the columns to concatenate the chunks with the header. #columns = {i: col for i, col in enumerate(df_header.columns.tolist())} #df_chunks.rename(columns=columns, inplace=True) #df = pd.concat([df_header, df_chunks]) return df_chunks
def _get_report_class(self, id_, default=None): if default is None: raise ValueError filter_def_file = os.path.join(self.report_dir, str(id_), "report_templates.py") if not os.path.exists(filter_def_file): filter_def_file = os.path.join(self.report_dir, "0", "report_templates.py") if os.path.exists(filter_def_file): from import_file import import_file lib = import_file(filter_def_file) return getattr(lib, default.__name__, default) return default
def synthetic_filter_map(self): ''' a map of synthetic (user defined) filters, keys are filters'name, values are filters ''' synthetic_filter_file = os.path.join(self.dir, 'synthetic_filters.py') ret = {} if os.path.exists(synthetic_filter_file): lib = import_file(synthetic_filter_file) for filter_ in lib.__all__: ret[filter_.name] = filter_ return ret
def test(): try: os.makedirs('a/b/c') shutil.copy('module.py', 'a/b/c') assert os.path.isfile('a/b/c/module.py') m = import_file('a/b/c/module.py') assert m.__doc__ == 'This is a module for testing import_file' assert m.x == 5 assert m.__author__ == 'gigi' m.prettify({'4': '5', 'a': range(5), 'b': 'c'}) finally: shutil.rmtree('a')
def testMain(): print PrettyPrintStr(LoadWinSVConfigFmFile(__file__,'demoSettingsWinSV.py','gDictConfigByGroupId', 'groupExample.programQQ,groupExample.programJC,')) return # from demoSettingsWinSV import gDictConfigByGroupId from import_file import import_file # gDictConfigByGroupId= ImportModuleClass('demoSettingsWinSV','gDictConfigByGroupId') imp = import_file('demoSettingsWinSV.py') print dir(imp) gDictConfigByGroupId = getattr(imp,'gDictConfigByGroupId') print gDictConfigByGroupId print PrettyPrintStr(LoadWinSVConfigFmDict(__file__,gDictConfigByGroupId, 'groupExample.programQQ,groupExample.programJC,')) pass
def make_df_from_excelDepen(file_name, formatting): nrows = 10000 filename = basename(file_name) print(f"Excel file: {filename}") impModule = import_file(formatting) file_path = os.path.abspath(os.path.join(DATA_DIR, file_name)) xl = pd.ExcelFile(file_path) sheets = [] for i in xl.sheet_names: print(f"- File {filename} - Worksheet: {i}") chunks = [] i_chunk = 0 # The first row is the header. We have already read it, so we skip it. skiprows = 0 while True: df_chunk = xl.parse(i, nrows=nrows, skiprows=skiprows, ignore_index=True, header=None) skiprows += nrows # When there is no data, we know we can break out of the loop. if not df_chunk.shape[0]: break else: print( f" - File {filename} - Chunk {i_chunk} ({df_chunk.shape[0]} rows)" ) chunks.append(df_chunk) i_chunk += 1 sheets.append(pd.concat(chunks, ignore_index=True).fillna('')) print(f' - File {filename} - Concatenating') xl.close() df_chunks = impModule.formatting(pd.concat(sheets, ignore_index=True)) df_chunks['File Name'] = filename sheets = [] chunks = [] return df_chunks
def load_file(self): openFileDialog = wx.FileDialog(None, "Open the desired algorithm", "", "", "Python files (*.py)|*.py", wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) openFileDialog.ShowModal() alg_path = openFileDialog.GetPath() #global alg_path_filename alg_path_filename = openFileDialog.GetFilename #print alg_path_filename.strip('.py') alg_path_directory = openFileDialog.GetDirectory() ## global latest ## latest = imp.load_source(alg_path_filename, alg_path) global latest latest = import_file(alg_path) ## sys.path.append(os.path.abspath(alg_path_directory)) ## import alg openFileDialog.Destroy()
def _describe(self, coverage_data): mod, covered_lines = coverage_data module = import_file(mod) mod_attr = partial(getattr, module) mod_attr_names = dir(module) _isclass = lambda name: inspect.isclass(mod_attr(name)) _desc_class = lambda name: self._describe_class(mod_attr(name), covered_lines) _isfunction = lambda name: inspect.isfunction(mod_attr(name)) _desc_function = lambda name: self._describe_function(mod_attr(name), covered_lines) classes = map(_desc_class, filter(_isclass, mod_attr_names)) functions = map(_desc_function, filter(_isfunction, mod_attr_names)) return ModuleInfo(classes, functions)
def testMain(): print PrettyPrintStr( LoadWinSVConfigFmFile( __file__, 'demoSettingsWinSV.py', 'gDictConfigByGroupId', 'groupExample.programQQ,groupExample.programJC,')) return # from demoSettingsWinSV import gDictConfigByGroupId from import_file import import_file # gDictConfigByGroupId= ImportModuleClass('demoSettingsWinSV','gDictConfigByGroupId') imp = import_file('demoSettingsWinSV.py') print dir(imp) gDictConfigByGroupId = getattr(imp, 'gDictConfigByGroupId') print gDictConfigByGroupId print PrettyPrintStr( LoadWinSVConfigFmDict( __file__, gDictConfigByGroupId, 'groupExample.programQQ,groupExample.programJC,')) pass
from flask import Flask, flash, render_template, session, escape, request, redirect, url_for import db, hashlib import import_file, logging Allowances = import_file.import_file('models/Allowances.py') AuthorizedManHours = import_file.import_file('models/AuthorizedManHours.py') ClientContactPersons = import_file.import_file( 'models/ClientContactPersons.py') Clients = import_file.import_file('models/Clients.py') DetachmentContactPersons = import_file.import_file( 'models/DetachmentContactPersons.py') Detachments = import_file.import_file('models/Detachments.py') FieldEmployees = import_file.import_file('models/FieldEmployees.py') FieldEmployeeTypes = import_file.import_file('models/FieldEmployeeTypes.py') HolidayMOR = import_file.import_file('models/HolidayMOR.py') IncentiveMOR = import_file.import_file('models/IncentiveMOR.py') Logs = import_file.import_file('models/Logs.py') ManHourLogs = import_file.import_file('models/ManHourLogs.py') OfficeEmployees = import_file.import_file('models/OfficeEmployees.py') OfficeEmployeeTypes = import_file.import_file('models/OfficeEmployeeTypes.py') PagibigCalamityLoans = import_file.import_file( 'models/PagibigCalamityLoans.py') PagibigSalaryLoans = import_file.import_file('models/PagibigSalaryLoans.py') PayrollRecord = import_file.import_file('models/PayrollRecord.py') PersonalPayables = import_file.import_file('models/PersonalPayables.py') Rates = import_file.import_file('models/Rates.py') RateTypes = import_file.import_file('models/RateTypes.py') Receivables = import_file.import_file('models/Receivables.py') SSSContributions = import_file.import_file('models/SSSContributions.py') SSSLoans = import_file.import_file('models/SSSLoans.py') UniformDeposits = import_file.import_file('models/UniformDeposits.py')
import MySQLdb, hashlib, cgi, cgitb; cgitb.enable() import import_file, logging from flask import flash Allowances = import_file.import_file('models/Allowances.py') AuthorizedManHours = import_file.import_file('models/AuthorizedManHours.py') ClientContactPersons = import_file.import_file('models/ClientContactPersons.py') Clients = import_file.import_file('models/Clients.py') DetachmentContactPersons = import_file.import_file('models/DetachmentContactPersons.py') Detachments = import_file.import_file('models/Detachments.py') FieldEmployees = import_file.import_file('models/FieldEmployees.py') FieldEmployeeTypes = import_file.import_file('models/FieldEmployeeTypes.py') HolidayMOR = import_file.import_file('models/HolidayMOR.py') IncentiveMOR = import_file.import_file('models/IncentiveMOR.py') Logs = import_file.import_file('models/Logs.py') ManHourLogs = import_file.import_file('models/ManHourLogs.py') OfficeEmployees = import_file.import_file('models/OfficeEmployees.py') OfficeEmployeeTypes = import_file.import_file('models/OfficeEmployeeTypes.py') PagibigCalamityLoans = import_file.import_file('models/PagibigCalamityLoans.py') PagibigSalaryLoans = import_file.import_file('models/PagibigSalaryLoans.py') PayrollRecord = import_file.import_file('models/PayrollRecord.py') PersonalPayables = import_file.import_file('models/PersonalPayables.py') Rates = import_file.import_file('models/Rates.py') RateType = import_file.import_file('models/RateType.py') Receivables = import_file.import_file('models/Receivables.py') SSSContributions = import_file.import_file('models/SSSContributions.py') SSSLoans = import_file.import_file('models/SSSLoans.py') UniformDeposits = import_file.import_file('models/UniformDeposits.py') mysql = MySQLdb.connect('localhost','raymond','password','Eaglewatch') cur = mysql.cursor()
import os import librosa import pickle import numpy as np from import_file import import_file import re import tensorflow as tf import logging import time import matplotlib.pyplot as plt model_7 = import_file( '/home/preetham/Documents/Preetham/masters-thesis/codes/grapheme-to-phoneme/luong/model_7.py' ) from model_7 import Encoder, Decoder os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' logging.getLogger('tensorflow').setLevel(logging.FATAL) physical_devices = tf.config.list_physical_devices('GPU') tf.config.experimental.set_visible_devices(physical_devices[0], 'GPU') tf.config.experimental.set_memory_growth(physical_devices[0], enable=True) mel_filter = librosa.filters.mel(sr=22050, n_fft=1024, n_mels=80, fmin=0, fmax=8000) def open_file(name): loc_to = '/home/preetham/Documents/Preetham/masters-thesis/'
import import_file db = import_file.import_file('db') SSSLoans = import_file.import_file('SSSLoans') def getAllSSSLoans(): res = db.List("SSSLoans") SSSLoansList = [] for row in res: if row is not None: SSSLoan = SSSLoans.SSSLoans( int(row[0]), int(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5]) ) SSSLoansList.append(SSSLoan) row = db.cur.fetchone() return SSSLoansList def getSSSLoan(val): res = db.SubList("SSSLoans", "ID", val) for row in res: if row is not None: SSSLoan = SSSLoans.SSSLoans( int(row[0]), int(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5]) ) return SSSLoans def addSSSLoan(FieldEmpID, Amount, MonthlyPay): sql = "CALL addSSSLoan( %s, %s, %s)" params = (FieldEmpID, Amount, MonthlyPay) try: db.cur.execute(sql, params) db.mysql.commit() except: print 'Error saving client' def deleteSSSLoan(ID):
import db, import_file Allowances = import_file.import_file('Allowances') def getAllowances(): res = db.List("Allowances") AllowancesList = [] for row in res: if row is not None: Allowance = Allowances.Allowances( int(row[0]), int(row[1]), int(row[2]), int(row[3]), str(row[4]), str(row[5]) ) AllowancesList.append(Allowance) row = db.cur.fetchone() return AllowancesList def getAllowance(val): res = db.SubList("Allowances", "ID", val) for row in res: if row is not None: Allowance = Allowances.Allowances( int(row[0]), int(row[1]), int(row[2]), int(row[3]), str(row[4]), str(row[5]) ) return Allowance def getAllowances(val): res = db.SubList("Allowances", "DetachID", val) AllowanceList = [] for row in res: if row is not None: Allowance = Allowances.Allowances( int(row[0]), int(row[1]), int(row[2]), int(row[3]), str(row[4]), str(row[5]) ) AllowanceList.append(Allowance) row = db.cur.fetchone() return AllowanceList
import db, import_file Receivables = import_file.import_file('Receivables') def getReceivables(): res = db.List("Receivables") ReceivablesList = [] for row in res: if row is not None: Receivable = Receivables.Receivables( str(row[0]), str(row[1]) ) ReceivablesList.append(Receivable) row = cur.fetchone() return ReceivablesList def getReceivable(val): res = db.SubList("Receivables", "ID", val) for row in res: if row is not None: Receivable = Receivables.Receivables( str(row[0]), str(row[1]) ) return Receivables
import import_file objects = import_file.import_file('objects.py') import Tkinter as tk import openpyxl as xl class Application(tk.Frame): def __init__(self, master=None): tk.Frame.__init__(self, master) self.grid() self.createWidgets() self.r = objects.Routers() def createWidgets(self): # Labels self.inputlab = tk.Label(self,text='Input') self.outputlab = tk.Label(self,text='Output') self.inputlab.grid(row=0,column=0) self.outputlab.grid(row=1,column=0) # Input fields self.inputnum = tk.IntVar() self.outputnum = tk.IntVar() self.enterInput = tk.Entry(self,textvariable=self.inputnum,width=5) self.enterOutput = tk.Entry(self,textvariable=self.outputnum,width=5) self.enterInput.grid(row=0,column=1) self.enterOutput.grid(row=1,column=1) # Go button self.gobutton = tk.Button(self,command=self.go,text='Go') self.gobutton.grid(row=0,column=2,rowspan=2)
import os from import_file import import_file import json from pweb.globe import default_globe from pweb.globe import get_globe mplugins = {} plugins = [] plugin_files=os.listdir('plugins') for file in plugin_files: if file[-2:]=='py': mplugin=import_file('plugins/'+file) plugins.append(mplugin.settings) mplugins[mplugin.settings['ID']]=mplugin def plugin_settings(): return json.loads(open('plugin_settings.json').read()) def save_plugin_settings( settings ): with open('plugin_settings.json', 'w') as outfile: json.dump( settings, outfile ) def init_globe(): default_globe() globe_settings = get_globe() settings=plugin_settings() for k in mplugins: if k in settings and settings[k]=="active": if mplugins[k].init_globe: mplugins[k].init_globe( globe_settings )
import db, import_file SSSContributions = import_file.import_file('SSSContributions') def getSSSContributions(): res = db.List("SSSContributions") SSSContributionsList = [] for row in res: if row is not None: SSSContribution = SSSContributions.SSSContributions( str(row[0]), str(row[1])) SSSContributionsList.append(SSSContribution) row = cur.fetchone() return SSSContributionsList def getSSSContribution(val): res = db.SubList("SSSContributions", "ID", val) for row in res: if row is not None: SSSContribution = SSSContributions.SSSContributions( str(row[0]), str(row[1])) return SSSContributions
def import_file(self, path): return import_file(path)
import db, import_file Rates = import_file.import_file('Rates') def getRates(): res = db.List("Rates") RatesList = [] for row in res: if row is not None: Rate = Rates.Rates( int(row[0]), int(row[1]), str(row[2]), str(row[3]), str(row[4]), int(row[5]), int(row[6]), int(row[7]), int(row[8]), int(row[9]), int(row[10]), int(row[11]), int(row[12]), int(row[13]), int(row[14]), str(row[15]), ) RatesList.append(Rate) row = cur.fetchone() return RatesList
import db, import_file PayrollRecord = import_file.import_file('PayrollRecord') def getPayrollRecords(): res = db.List("PayrollRecord") PayrollRecordList = [] for row in res: if row is not None: PayrollRecord = PayrollRecord.PayrollRecord( int(row[0]), int(row[1]), int(row[2]), int(row[3]), int(row[4]), int(row[5]), int(row[6]), int(row[7]), int(row[8]), int(row[9]), int(row[10]), int(row[11]), int(row[12]), int(row[13]), int(row[14]), int(row[15])) PayrollRecordList.append(PayrollRecord) row = cur.fetchone() return PayrollRecordList def getPayrollRecord(val): res = db.SubList("PayrollRecord", "ID", val) for row in res: if row is not None: PayrollRecord = PayrollRecord.PayrollRecord( int(row[0]), int(row[1]), int(row[2]), int(row[3]), int(row[4]), int(row[5]), int(row[6]), int(row[7]), int(row[8]), int(row[9]), int(row[10]), int(row[11]), int(row[12]), int(row[13]), int(row[14]), int(row[15])) return PayrollRecord
def get_drill_down_detail(self, col_id, **filters): lib = import_file(os.path.join(self.flask_report.report_dir, str(self.id_), 'drill_downs', str(col_id), 'objects.py')) return lib.objects(self.flask_report.db, self.flask_report.model_map, **filters)
import vtk, visibility, import_file import numpy as np scene, objects = import_file.import_file("cornell ext en r vlakken") kdtree = visibility.build_kdtree(objects, len(scene)) polygons = [] def add_split_polygon(polygon, node): print(node.data) if not isinstance(node.data, float): print("is in leaf") return polygon else: V = node.bounding_box axis = V[3] % 3 new_polygon = [] if axis == 0: new_polygon.append([[node.data,V[1][0],V[2][0]],[node.data,V[1][1],V[2][0]],[node.data,V[1][0],V[2][1]],\ [node.data,V[1][1],V[2][1]]]) elif axis == 1: new_polygon.append([[V[0][0],node.data,V[2][0]],[V[0][1],node.data,V[2][0]],[V[0][0],node.data,V[2][1]],\ [V[0][1],node.data,V[2][1]]]) elif axis == 2: new_polygon.append([[V[0][0],V[1][0],node.data],[V[0][0],V[1][1],node.data],[V[0][0],V[1][0],node.data],\ [V[0][0],V[1][1],node.data]]) add_split_polygon(polygon, node.left_child) add_split_polygon(polygon, node.right_child) add_split_polygon(polygons, kdtree)
import db, import_file, MySQLdb Clients = import_file.import_file('Clients') #OK def getAllClients(): res = db.List("Clients") ClientList = [] for row in res: if row is not None: Client = Clients.Clients( int(row[0]), str(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5]) ) ClientList.append(Client) row = db.cur.fetchone() return ClientList #OK def getClient(val): res = db.SubList("Clients", "ID", val) for row in res: if row is not None: Client = Clients.Clients( str(row[0]), str(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5])) return Client #OK def saveClient(client): sql = "update Clients set Name=%s, BillingAddress=%s, City=%s, Landline=%s where ID=%s" params = (client.Name, client.BillingAddress, client.City, client.Landline, client.ID) try: db.cur.execute(sql, params) db.mysql.commit() except: print 'Error saving client'
import db, import_file PersonalPayables = import_file.import_file('PersonalPayables') def getPersonalPayables(): res = db.List("PersonalPayables") PersonalPayablesList = [] for row in res: if row is not None: PersonalPayable = PersonalPayables.PersonalPayables( int(row[0]), int(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5]) ) PersonalPayablesList.append(PersonalPayable) row = cur.fetchone() return PersonalPayables def getPayrollRecord(val): res = db.SubList("PersonalPayables", "ID", val) for row in res: if row is not None: PersonalPayable = PersonalPayables.PersonalPayables( int(row[0]), int(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5]) ) return PersonalPayables def listPayables(): res = db.ListByPeriod("PersonalPayables") PayableList = [] for row in res: if row is not None: Payable = PersonalPayables.PersonalPayables( int(row[0]), int(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5]) ) PayableList.append(Payable) row = db.cur.fetchone() return PayableList def getPayables(val):
抓取議員甲乙的新聞與議案作為subset 只用這個subset來計算四個attribute的值總和來對打 ''' client = MongoClient('mongodb://localhost:27017/') db = client['ntp_councilor'] collection_crs = db['ntp_crs'] collection_cr_plat = db['ntp_platform'] collection_bill = db['ntp_bills'] collection_news = db['ntp_news_url_list_ckip'] collection_plat_bill_cor = db["ntp_platform_bill_cor"] collection_plat_news_cor = db["ntp_platform_news_cor"] collection_cr_for_all_plats = db['ntp_crs_compare'] plat_bill_cor = import_file( "../plats_all_relation_computing/ntp_plat_bill_cor.py") plat_news_cor = import_file( "../plats_all_relation_computing/ntp_plat_news_cor.py") plat_bill_join_cor = import_file( "../plats_all_relation_computing/ntp_plat_bill_join_cor.py") plat_news_pn_cor = import_file( "../plats_all_relation_computing/ntp_plat_news_pn_cor.py") crs = list(collection_crs.find()) plat_list = list(collection_cr_plat.find()) cr_plat_bill_list_cor = list(collection_plat_bill_cor.find()) cr_plat_news_list_cor = list(collection_plat_news_cor.find()) count = 0 for cr in crs: cr_dict = {} cr_id = cr["_id"]
proto = os.path.join(protopath, protofile) Logger.logInfo('Protobuf-compiling: ' + proto) subprocess.check_call([self.protoc, str('--python_out=' + script_path + '/proto/.'), proto, "--proto_path=" + str(protopath)]) # --proto_path return [os.path.join(script_path, "proto"), protofile.replace('.proto', '_pb2.py')] protos = list() proto_dict = dict() builder = ProtoBuild() for path in range(1, len(sys.argv)): for dirpath, dirnames, filenames in os.walk(sys.argv[path]): for filename in [f for f in filenames if f.endswith(".proto")]: current_proto = (builder.run(dirpath, filename)) proto_python_file = os.path.abspath(os.path.join(current_proto[0], current_proto[1])) proto_module = import_file(proto_python_file) file = os.path.join(dirpath, filename) with open(file, 'r') as File: lines = File.readlines() for line_nr, line in enumerate(lines): if line.startswith("// Message identifier: "): id = int(line.replace("// Message identifier: ", "").rstrip().replace(".", "")) message = lines[line_nr + 1].split(" ")[1] proto_dict[id] = [str(proto_python_file), str(message)] if not 0 in proto_dict.keys(): Logger.logError("ID: 0 --> odcore_data_MessageContainer not found in Protofiles!! Can't continue..") sys.exit(-1) python_version = sys.version_info.major
import db, import_file ClientContactPersons = import_file.import_file('ClientContactPersons') def getAllClientContactPersons(): res = db.List("ClientContactPersons") ClientContactPersonsList = [] for row in res: if row is not None: ClientContactPerson = ClientContactPersons.ClientContactPersons( int(row[0]), str(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5])) ClientContactPersonsList.append(ClientContactPerson) row = db.cur.fetchone() return ClientContactPersonsList #OK def getClientContactPersons(val): res = db.SubList("ClientContactPersons", "ClientID", val) ClientContactPersonsList = [] for row in res: if row is not None: ClientContactPerson = ClientContactPersons.ClientContactPersons( str(row[0]), str(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5]), str(row[6]), str(row[7]), str(row[8])) ClientContactPersonsList.append(ClientContactPerson) row = db.cur.fetchone() return ClientContactPersonsList
import db, import_file Allowances = import_file.import_file('Allowances') def getAllowances(): res = db.List("Allowances") AllowancesList = [] for row in res: if row is not None: Allowance = Allowances.Allowances(int(row[0]), int(row[1]), int(row[2]), int(row[3]), str(row[4]), str(row[5])) AllowancesList.append(Allowance) row = db.cur.fetchone() return AllowancesList def getAllowance(val): res = db.SubList("Allowances", "ID", val) for row in res: if row is not None: Allowance = Allowances.Allowances(int(row[0]), int(row[1]), int(row[2]), int(row[3]), str(row[4]), str(row[5])) return Allowance def getAllowances(val): res = db.SubList("Allowances", "DetachID", val) AllowanceList = [] for row in res:
def get_drill_down_detail(self, col_id, **filters): lib = import_file(os.path.join(self.report_view.report_dir, str(self.id_), "drill_downs", str(col_id), "objects.py")) return lib.objects(self.report_view.db, self.report_view.model_map, **filters)
def run(self): gc.collect() flat_files =[] non_flat = [] xls = [] count = 0 sheets = {} try: dfExcel = import_file('K:/A & A/Cardiff/Audit/Clients/Open/S/Spotlight/2. Staff Folders/JWalters/__Python/Exe/dfExcel.py') start = time.time() file_list = [filename for filename in self.files] self.value.emit("Importing Files:") for i in file_list: self.value.emit(" " + str(basename(i))) self.startValue.emit(35) if int(self.threads) == 1: formatTime = time.time() flat_files =[] non_flat = [] xls = [] count = 0 #ADD DELIM SELECTION USING SELF.DELIM for i in file_list: if os.path.splitext(i)[1].lower() == '.csv' or os.path.splitext(i)[1].lower() == '.txt': flat_files.append(i) elif os.path.splitext(i)[1].lower() == '.xls': xls.append(i) else: non_flat.append(i) sheets = {} for i in non_flat: wb= load_workbook(i, read_only=True) for j in wb.sheetnames: sheets[count] = [i,j] count +=1 if self.noForm == True: for i in sheets.items(): dfExcel.make_df_from_excelNoForm(i) for i in flat_files: dfExcel.make_df_from_csvNoForm(i, delim = self.delimIn) for i in xls: dfExcel.make_df_from_excelXLSNoForm(i) elif self.sheets == True: for i in non_flat: dfExcel.make_df_from_excelDepen(i, formatting = self.system) for i in xls: dfExcel.make_df_from_excelXLSDepen(i, formatting = self.system) for i in flat_files: dfExcel.make_df_from_csv(i, formatting = self.system, delim = self.delimIn) else: for i in sheets.items(): dfExcel.make_df_from_excel(i, formatting = self.system) for i in xls: dfExcel.make_df_from_excelXLS(i, formatting = self.system) for i in flat_files: dfExcel.make_df_from_csv(i, formatting = self.system, delim = self.delimIn) self.timerValue.emit(35) self.endValue.emit(1) self.value.emit('\nRead and Format Time: ' + str(round(time.time() - formatTime,2))) try: os.remove(basename('; '.join(self.filename))) except: pass writestart = time.time() if self.database != '' and self.saveFile != '': if '.' in basename(self.saveFile): fileName = os.path.splitext(self.saveFile)[0] filePath = left(fileName, len(fileName) - len(basename(fileName))) else: fileName = self.saveFile filePath = left(fileName, len(fileName) - len(basename(fileName))) if self.combine == True: if self.zipped == True: self.value.emit('\nSaving File to csv.gz...') dfExcel.combineCSV(fileName, filePath, zipped = self.zipped, delim = self.delimOut) else: self.value.emit('\nSaving File to csv...') dfExcel.combineCSV(fileName, filePath, zipped = self.zipped, delim = self.delimOut) self.value.emit('\nSaving File to Database...') engine = "DRIVER={ODBC Driver 13 for SQL Server};SERVER="+self.server+";DATABASE="+self.database+";Trusted_Connection=yes" dfExcel.uploadCSV(basename(fileName), engine, 50000) else: self.value.emit('\nSaving File to Database...') engine = "DRIVER={ODBC Driver 13 for SQL Server};SERVER="+self.server+";DATABASE="+self.database+";Trusted_Connection=yes" dfExcel.uploadCSV('pass', engine, 50000) if self.zipped == True: csv_files = [i for i in glob.glob('*.{}'.format('gz'))] for i in csv_files: move(i, filePath + i) else: csv_files = [i for i in glob.glob('*.{}'.format('csv'))] for i in csv_files: move(i, filePath + i) elif self.database != '': if '.' in basename(self.tableName): fileName = os.path.splitext(self.tableName)[0] filePath = left(fileName, len(fileName) - len(basename(fileName))) else: fileName = self.tableName filePath = left(fileName, len(fileName) - len(basename(fileName))) if self.combine == True: self.value.emit('\nSaving File to Database...') engine = "DRIVER={ODBC Driver 13 for SQL Server};SERVER="+self.server+";DATABASE="+self.database+";Trusted_Connection=yes" dfExcel.uploadCSV(basename(fileName), engine, 50000) else: self.value.emit('\nSaving File to Database...') engine = "DRIVER={ODBC Driver 13 for SQL Server};SERVER="+self.server+";DATABASE="+self.database+";Trusted_Connection=yes" dfExcel.uploadCSV('pass', engine, 50000) else: if '.' in basename(self.saveFile): fileName = os.path.splitext(self.saveFile)[0] filePath = left(fileName, len(fileName) - len(basename(fileName))) else: fileName = self.saveFile filePath = left(fileName, len(fileName) - len(basename(fileName))) if self.combine == True: if self.zipped == True: self.value.emit('\nSaving File to csv.gz...') dfExcel.combineCSV(fileName, filePath, zipped = self.zipped, delim = self.delimOut) else: self.value.emit('\nSaving File to csv...') dfExcel.combineCSV(fileName, filePath, zipped = self.zipped, delim = self.delimOut) else: if self.zipped == True: csv_files = [i for i in glob.glob('*.{}'.format('gz'))] for i in csv_files: move(i, filePath + i) else: csv_files = [i for i in glob.glob('*.{}'.format('csv'))] for i in csv_files: move(i, filePath + i) else: with Pool(int(self.threads), maxtasksperchild = 12) as pool: formatTime = time.time() flat_files =[] non_flat = [] xls = [] count = 0 for i in file_list: if os.path.splitext(i)[1].lower() == '.csv' or os.path.splitext(i)[1].lower() == '.txt': flat_files.append(i) elif os.path.splitext(i)[1].lower() == '.xls' : xls.append(i) else: non_flat.append(i) sheets = {} for i in non_flat: wb= load_workbook(i, read_only=True) for j in wb.sheetnames: sheets[count] = [i,j] count +=1 if self.noForm == True: flatCount = len(sheets) + 1 for i in flat_files: sheets[flatCount] = [i, 'FLAT'] flatCount +=1 for i in xls: sheets[flatCount] = [i, 'XLS'] flatCount +=1 pool.map(partial(dfExcel.methodSelection, delim = self.delimIn), sheets.items()) elif self.sheets == True: pool.map(partial(dfExcel.make_df_from_excelDepen, formatting = self.system), non_flat) pool.map(partial(dfExcel.make_df_from_excelXLSDepen, formatting = self.system), xls) pool.map(partial(dfExcel.make_df_from_csv, formatting = self.system, delim = self.delimIn), flat_files) else: pool.map(partial(dfExcel.make_df_from_excel, formatting = self.system), sheets.items()) pool.map(partial(dfExcel.make_df_from_excelXLS, formatting = self.system), xls) pool.map(partial(dfExcel.make_df_from_csv, formatting = self.system, delim = self.delimIn), flat_files) self.timerValue.emit(35) self.endValue.emit(1) self.value.emit('\nRead and Format Time: ' + str(round(time.time() - formatTime,2))) try: os.remove(basename('; '.join(self.filename))) except: pass writestart = time.time() if self.database != '' and self.saveFile != '': if '.' in basename(self.saveFile): fileName = os.path.splitext(self.saveFile)[0] filePath = left(fileName, len(fileName) - len(basename(fileName))) else: fileName = self.saveFile filePath = left(fileName, len(fileName) - len(basename(fileName))) if self.combine == True: if self.zipped == True: self.value.emit('\nSaving File to csv.gz...') dfExcel.combineCSV(fileName, filePath, zipped = self.zipped, delim = self.delimOut) else: self.value.emit('\nSaving File to csv...') dfExcel.combineCSV(fileName, filePath, zipped = self.zipped, delim = self.delimOut) self.value.emit('\nSaving File to Database...') engine = "DRIVER={ODBC Driver 13 for SQL Server};SERVER="+self.server+";DATABASE="+self.database+";Trusted_Connection=yes" dfExcel.uploadCSV(basename(fileName), engine, 50000) else: self.value.emit('\nSaving File to Database...') engine = "DRIVER={ODBC Driver 13 for SQL Server};SERVER="+self.server+";DATABASE="+self.database+";Trusted_Connection=yes" dfExcel.uploadCSV('pass', engine, 50000) if self.zipped == True: csv_files = [i for i in glob.glob('*.{}'.format('gz'))] for i in csv_files: move(i, filePath + i) else: csv_files = [i for i in glob.glob('*.{}'.format('csv'))] for i in csv_files: move(i, filePath + i) elif self.database != '': if '.' in basename(self.tableName): fileName = os.path.splitext(self.tableName)[0] filePath = left(fileName, len(fileName) - len(basename(fileName))) else: fileName = self.tableName filePath = left(fileName, len(fileName) - len(basename(fileName))) if self.combine == True: self.value.emit('\nSaving File to Database...') engine = "DRIVER={ODBC Driver 13 for SQL Server};SERVER="+self.server+";DATABASE="+self.database+";Trusted_Connection=yes" dfExcel.uploadCSV(basename(fileName), engine, 50000) else: self.value.emit('\nSaving File to Database...') engine = "DRIVER={ODBC Driver 13 for SQL Server};SERVER="+self.server+";DATABASE="+self.database+";Trusted_Connection=yes" dfExcel.uploadCSV('pass', engine, 50000) else: if self.combine == True: pass elif self.combine == False and self.saveFile != '': csv_files = [i for i in glob.glob('*.{}'.format('gz'))] for i in csv_files: move(i, filePath + i) if '.' in basename(self.saveFile): fileName = os.path.splitext(self.saveFile)[0] filePath = left(fileName, len(fileName) - len(basename(fileName))) else: fileName = self.saveFile filePath = left(fileName, len(fileName) - len(basename(fileName))) if self.combine == True: if self.zipped == True: self.value.emit('\nSaving File to csv.gz...') dfExcel.combineCSV(fileName, filePath, zipped = self.zipped, delim = self.delimOut) else: self.value.emit('\nSaving File to csv...') dfExcel.combineCSV(fileName, filePath, zipped = self.zipped, delim = self.delimOut) else: if self.zipped == True: csv_files = [i for i in glob.glob('*.{}'.format('gz'))] for i in csv_files: move(i, filePath + i) else: csv_files = [i for i in glob.glob('*.{}'.format('csv'))] for i in csv_files: move(i, filePath + i) writeend= time.time() self.value.emit('\nSave Complete.') self.value.emit('\nWrite time: ' + str(round(writeend - writestart,2))) self.value.emit("\nFormatting Complete.") end = time.time() csv_files = [i for i in glob.glob('*.{}'.format('csv'))] for i in csv_files: os.remove(i) flat_files =[] non_flat = [] xls = [] count = 0 sheets = {} self.value.emit("\nTotal Time Taken: " + str(round(end - start, 2)) + " Seconds.") self.runCont.emit() self.timerValue.emit(35) self.endValue.emit(1) gc.collect() except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] print(exc_type, fname, exc_tb.tb_lineno) self.value.emit(str(e)) self.endValue.emit(1) self.runCont.emit()
import db, import_file FieldEmployees = import_file.import_file('FieldEmployees') #OK def getAllFieldEmployees(): res = db.List("FieldEmployees") FieldEmployeesList = [] for row in res: if row is not None: FieldEmployee = FieldEmployees.FieldEmployees( str(row[0]), str(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5]), str(row[6]), str(row[7]), str(row[8]), str(row[9]), str(row[10]), str(row[11]), str(row[12]), str(row[13]), int(row[14]), str(row[15]), str(row[16]), str(row[17]), str(row[18]), str(row[19]), str(row[20]) ) FieldEmployeesList.append(FieldEmployee) row = db.cur.fetchone() return FieldEmployeesList #OK def getFieldEmployee(val): res = db.SubList("FieldEmployees", "ID", val) for row in res: if row is not None: FieldEmployee = FieldEmployees.FieldEmployees( str(row[0]), str(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5]), str(row[6]), str(row[7]), str(row[8]), str(row[9]), str(row[10]), str(row[11]), str(row[12]), str(row[13]), int(row[14]), str(row[15]), str(row[16]), str(row[17]), str(row[18]), str(row[19]), str(row[20]) ) return FieldEmployee def getName(val): res = db.get("Select * from FieldEmployees where ID=%s" % val) return res
import sqlalchemy as sa import pandas as pd import pyodbc import glob import time import sys import os import itertools from import_file import import_file from openpyxl import load_workbook from pandas.io import sql import gc #from dfExcel import make_df_from_excel, make_df_from_excelDepen, methodSelection, saveToCsv, write_df_to_sql dfExcel = import_file('K:/A & A/Cardiff/Audit/Clients/Open/S/Spotlight/2. Staff Folders/JWalters/__Python/Exe/dfExcel.py') def left(s, amount): return s[:amount] def right(s, amount): return s[-amount:] def mid(s, offset, amount): return s[offset:offset+amount] freeze_support() class App(QWidget):
import db, import_file, MySQLdb Detachments = import_file.import_file('Detachments') #OK def getAllDetachments(): res = db.List("Detachments") DetachmentsList = [] for row in res: if row is not None: Detachment = Detachments.Detachments( int(row[0]), int(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5]), str(row[6]), str(row[7]), str(row[8]) ) DetachmentsList.append(Detachment) row = db.cur.fetchone() return DetachmentsList #OK def getAllDetachmentsbyID(val): res = db.SubList("Detachments", "ClientID", val) DetachmentList = [] for row in res: if row is not None: Detachment = Detachments.Detachments( int(row[0]), int(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5]), str(row[6]), str(row[7]), str(row[8]) ) DetachmentList.append(Detachment) row = db.cur.fetchone() return DetachmentList #OK def getDetachment(val): res = db.SubList("Detachments", "ID", val) for row in res: if row is not None:
import db, import_file PayrollRecord = import_file.import_file('PayrollRecord') def getPayrollRecords(): res = db.List("PayrollRecord") PayrollRecordList = [] for row in res: if row is not None: PayrollRecord = PayrollRecord.PayrollRecord( int(row[0]), int(row[1]), int(row[2]), int(row[3]), int(row[4]), int(row[5]), int(row[6]), int(row[7]), int(row[8]), int(row[9]), int(row[10]), int(row[11]), int(row[12]), int(row[13]), int(row[14]), int(row[15]),int(row[16]),int(row[17]),int(row[18]) ) PayrollRecordList.append(PayrollRecord) row = cur.fetchone() return PayrollRecordList def getPayrollRecord(val): res = db.SubList("PayrollRecord", "ID", val) for row in res: if row is not None: PayrollRecord = PayrollRecord.PayrollRecord( int(row[0]), int(row[1]), int(row[2]), int(row[3]), int(row[4]), int(row[5]), int(row[6]), int(row[7]), int(row[8]), int(row[9]), int(row[10]), int(row[11]), int(row[12]), int(row[13]), int(row[14]), int(row[15]),int(row[16]),int(row[17]),int(row[18]) ) return PayrollRecord
def get_last_descriptor_views_diff(self): last_filename = self.get_last_filename() return ( import_file(os.path.join(settings.PG_VIEWS_MIGRATIONS_ROOT, last_filename)).views_diff if last_filename else {} )
import db, import_file Receivables = import_file.import_file('Receivables') def getReceivables(): res = db.List("Receivables") ReceivablesList = [] for row in res: if row is not None: Receivable = Receivables.Receivables(str(row[0]), str(row[1])) ReceivablesList.append(Receivable) row = cur.fetchone() return ReceivablesList def getReceivable(val): res = db.SubList("Receivables", "ID", val) for row in res: if row is not None: Receivable = Receivables.Receivables(str(row[0]), str(row[1])) return Receivables
import db, import_file IncentiveMOR = import_file.import_file('IncentiveMOR') def getAllIncentiveMOR(): res = db.List("IncentiveMOR") IncentiveMORList = [] for row in res: if row is not None: IncentiveMOR = IncentiveMOR.IncentiveMOR(str(row[0]), str(row[1])) IncentiveMOR.append(IncentiveMOR) row = cur.fetchone() return IncentiveMORList def getIncentiveMOR(val): res = db.SubList("IncentiveMOR", "ID", val) for row in res: if row is not None: IncentiveMOR = IncentiveMOR.IncentiveMOR(str(row[0]), str(row[1])) return IncentiveMOR
import db, import_file PagibigSalaryLoans = import_file.import_file('PagibigSalaryLoans') def getAllPagibigSalaryLoans(): res = db.List("PagibigSalaryLoans") PagibigSalaryLoansList = [] for row in res: if row is not None: PagibigSalaryLoan = PagibigSalaryLoans.PagibigSalaryLoans( int(row[0]), int(row[1]), int(row[2]), int(row[3]), int(row[4]), str(row[5]) ) PagibigSalaryLoansList.append(PagibigSalaryLoan) row = cur.fetchone() return PagibigCalamityLoansList def getPagibigSalaryLoan(val): res = db.SubList("PagibigSalaryLoans", "ID", val) for row in res: if row is not None: PagibigSalaryLoans = PagibigSalaryLoans.PagibigSalaryLoans( int(row[0]), int(row[1]), int(row[2]), int(row[3]), int(row[4]), str(row[5]) ) return PagibigSalaryLoans
import db, import_file DetachmentContactPersons = import_file.import_file('DetachmentContactPersons') def getDetachmentContactPersons(val): res = db.SubList("DetachmentContactPersons", "DetachID", val) DetachmentContactPersonsList = [] for row in res: if row is not None: DetachmentContactPerson = DetachmentContactPersons.DetachmentContactPersons( int(row[0]), int(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5]), str(row[6]), str(row[7]), str(row[8]) ) DetachmentContactPersonsList.append(DetachmentContactPerson) row = db.cur.fetchone() return DetachmentContactPersonsList #OK def getDetachmentContactPerson(val): res = db.SubList("DetachmentContactPersons", "ID", val) for row in res: if row is not None: Contact = DetachmentContactPersons.DetachmentContactPersons( str(row[0]), str(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5]), str(row[6]), str(row[7]), str(row[8]) ) return Contact #OK def insertContact(contact): sql = "call addDetachmentCP(%s, %s, %s, %s, %s, %s, %s, %s)" params = (contact.DetachID, contact.Suffix, contact.LastName, contact.FirstName, contact.MiddleName, contact.Landline, contact.MobileNo, contact.BirthDate) try: db.cur.execute(sql, params) db.mysql.commit() except: print 'Error inserting contact'
import db, import_file SSSContributions = import_file.import_file('SSSContributions') def getSSSContributions(): res = db.List("SSSContributions") SSSContributionsList = [] for row in res: if row is not None: SSSContribution = SSSContributions.SSSContributions( str(row[0]), str(row[1]) ) SSSContributionsList.append(SSSContribution) row = cur.fetchone() return SSSContributionsList def getSSSContribution(val): res = db.SubList("SSSContributions", "ID", val) for row in res: if row is not None: SSSContribution = SSSContributions.SSSContributions( str(row[0]), str(row[1]) ) return SSSContributions
def get_drill_down_detail(self, col_id, **filters): lib = import_file( os.path.join(self.flask_report.report_dir, str(self.id_), 'drill_downs', str(col_id), 'objects.py')) return lib.objects(self.flask_report.db, self.flask_report.model_map, **filters)
import db, import_file RateTypes = import_file.import_file('RateTypes') def getRateTypes(): res = db.List("RateTypes") RateTypesList = [] for row in res: if row is not None: RateType = RateTypes.RateTypes( str(row[0]), str(row[1]) ) RateTypesList.append(RateType) row = cur.fetchone() return RateTypesList def getRateType(val): res = db.SubList("RateTypes", "ID", val) for row in res: if row is not None: RateType = RateTypes.RateTypes( str(row[0]), str(row[1]) ) return RateType
import db, hashlib, import_file OfficeEmployees = import_file.import_file('OfficeEmployees') def getOfficeEmployees(): res = db.List("OfficeEmployees") OfficeEmployeesList = [] for row in res: if row is not None: OfficeEmployee = OfficeEmployees.OfficeEmployees( int(row[0]), str(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5]), str(row[6]), str(row[7]), str(row[8]), str(row[9]), str(row[10]), str(row[11]), str(row[12]), str(row[13]), int(row[14]), str(row[15]), str(row[16]), str(row[17]), str(row[18]) ) OfficeEmployeesList.append(OfficeEmployee) row = db.cur.fetchone() return OfficeEmployeesList def getOfficeEmployee(val): res = db.SubList("OfficeEmployees", "Username", val) for row in res: if row is not None: OfficeEmployee = OfficeEmployees.OfficeEmployees( int(row[0]), str(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5]), str(row[6]), str(row[7]), str(row[8]), str(row[9]), str(row[10]), str(row[11]), str(row[12]), str(row[13]), int(row[14]), str(row[15]), str(row[16]), str(row[17]), str(row[18]) ) return OfficeEmployee def login(username, password): sql = "SELECT Username, Password FROM OfficeEmployees WHERE Username = '******'" % username res = db.get(sql) for row in res: if row is not None: if hashlib.sha1(password).hexdigest() == row: return True return None
import db, import_file PersonalPayables = import_file.import_file('PersonalPayables') def getPersonalPayables(): res = db.List("PersonalPayables") PersonalPayablesList = [] for row in res: if row is not None: PersonalPayable = PersonalPayables.PersonalPayables( int(row[0]), int(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5])) PersonalPayablesList.append(PersonalPayable) row = cur.fetchone() return PersonalPayables def getPayrollRecord(val): res = db.SubList("PersonalPayables", "ID", val) for row in res: if row is not None: PersonalPayable = PersonalPayables.PersonalPayables( int(row[0]), int(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5])) return PersonalPayables def listPayables(): res = db.ListByPeriod("PersonalPayables") PayableList = [] for row in res:
def literal_filter_condition(self): filter_def_file = os.path.join(self.report_view.report_dir, str(self.id_), "filter_def.py") if os.path.exists(filter_def_file): lib = import_file(filter_def_file) return lib.get_filter(self.report_view.db, self.report_view.model_map)