コード例 #1
0
class Programme(object):

    def __init__(self):
        self.recherche = Recherche()
        self.fichier = Excel()
        self.idinse = self.recherche.getCommune()
        
    def mainloop(self):
        idv2,idv3 = self.fichier.lire()

        indicev3 = 1
        for i in range(len(idv3)):
            #time pour token 
            start_time = time.time()
            a = str(idv3[i][0]).split('.')[0]
            for j in range(len(self.idinse)):
                companies = self.recherche.getRecherche(self.idinse[j][0],a)
                indicev3 = self.fichier.EcrireV3(indicev3,idv3[i],companies)
                end_time = time.time()
                if( end_time - start_time >= 1000):   
                    self.recherche.setToken()

        self.recherche.setToken()

        indicev2 = 1
        for i in range(len(idv2)):
            #time pour token 
            start_time = time.time()
            a = str(idv2[i][0]).split('.')[0]
            for j in range(len(self.idinse)):
                companies = self.recherche.getRecherche(self.idinse[j][0],a)
                indicev2 = self.fichier.EcrireV2(indicev2,idv2[i],companies)
                end_time = time.time()
                if( end_time - start_time >= 1000):
                    self.recherche.setToken()
コード例 #2
0
    def __init__(self, roadSgtShp, facilityShp, direction, excelPath,
                 excelSheet):
        #1. reading road segments and facilities

        # open a shapefile with pyshp
        roadSgt0 = shapefile.Reader(roadSgtShp)

        # access geometry of the Shapefile
        roadSgt1 = roadSgt0.shapeRecords()

        # convert pyshp object to shapely
        self.roadSgtList = []

        for feature in roadSgt1:
            roadSgt2 = asShape(feature.shape.__geo_interface__)
            self.roadSgtList.append(roadSgt2)

        facility0 = shapefile.Reader(facilityShp)
        facility1 = facility0.shapeRecords()
        self.facilityList = []

        for feature in facility1:
            facility2 = asShape(feature.shape.__geo_interface__)
            self.facilityList.append(facility2)

        #2. setting order
        nearFrontList, facStartNo, rdSegList, facilList = self.setInitialSgt(
            direction)

        #3. finding the nearest front facility
        nxtRd = 0
        for indf, fac in enumerate(facilList):
            for indr, rsgt in enumerate(rdSegList[nxtRd:]):
                intersects = fac.within(rsgt.buffer(1.0))
                nearFrontList[indr + nxtRd][1] = [
                    abs(facStartNo - indf), fac.coords[0][0], fac.coords[0][1]
                ]

                if intersects == True:
                    #                     print "facility="+str(abs(facStartNo-indf))+", road="+str(indr+nxtRd)
                    nxtRd += indr + 1
                    break

        # list to dict
        nearFrontDict = {}
        for rd in nearFrontList:
            nearFrontDict[rd[0]] = rd[1]
        print nearFrontDict

        #4. writing excel
        exCol = []
        for rd in nearFrontDict.items():
            exCol.append(rd[1])

        ex.excelWriteOnExistingFileCol(excelPath, excelSheet, 2, exCol)
コード例 #3
0
    def _on_extract(self, event):
        """ Action governing what happens when we press the 'floppy disc' icon

        Current set of analyses are saved to an excel file.

        @type self: Toolbar
        @type event: Event
        @rtype: None
        """
        Excel.generate_analysis(self.frame_object.experiment)
        event.Skip()
        self.frame_object.Destroy()
コード例 #4
0
    def _on_extract(self, event):
        """ Action governing what happens when we press the 'floppy disc' icon

        Current set of analyses are saved to an excel file.

        @type self: Toolbar
        @type event: Event
        @rtype: None
        """
        Excel.generate_analysis(self.frame_object.experiment)
        event.Skip()
        self.frame_object.Destroy()
コード例 #5
0
def idList(filename, sheetname, startRowNum=1):
    idExcel = ex.excelRead(filename, sheetname)
    idList = []
    for stationID in idExcel[startRowNum - 1:]:
        stationID = str(int(stationID[0].value))
        idList.append(stationID)
    return idList
コード例 #6
0
    def onExport(self, event):
        fileDlg = wx.FileDialog(self.mainWindow, 'Choose a save file location',
                                '', '', 'Excel files (*.xlsx)|*.xlsx',
                                wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)

        if fileDlg.ShowModal() == wx.ID_CANCEL: return

        reportFile = fileDlg.GetPath()
        wb = Excel.Workbook()
        report = wb.create_worksheet(
            'Integration log time report',
            columnWidths=[25, 20, 25, 20, 25, 30, 30, 25, 25, 25, 20])
        # sets header cells
        report.add_header('A1')
        report.add_header('B1')
        report.add_header('C1')
        report.add_header('D1')
        report.add_header('E1')
        report.add_header('F1')
        report.add_header('G1')
        report.add_header('H1')
        report.add_header('I1')
        report.add_header('J1')
        report.add_header('K1')
        report.append([
            'Filename', 'IPEM Time', 'Teamcenter Time', 'User Time',
            'Download Time', 'Total Operation Time', 'Total Op w/o User Time',
            'Total Save Ops', 'Total Open Ops', 'Total Manage Ops', 'Total Ops'
        ])
        startcount = rowcounter = 1

        for row in xrange(
                self.mainWindow.displaySummaryListCtrl.GetItemCount()):
            rowcounter += 1
            iteminfo = []
            for col in xrange(
                    self.mainWindow.displaySummaryListCtrl.GetColumnCount()):
                iteminfo.append(
                    self.mainWindow.displaySummaryListCtrl.GetItem(
                        row, col).GetText())
            report.append(iteminfo)

        report.add_table(
            'Table' + str(1), 'A%s:K%s' % (str(startcount), str(rowcounter)), [
                'Filename', 'IPEM Time', 'Teamcenter Time', 'User Time',
                'Download Time', 'Total Operation Time',
                'Total Op w/o User Time', 'Total Save Ops', 'Total Open Ops',
                'Total Manage Ops', 'Total Ops'
            ])

        try:
            wb.save(reportFile)
            wx.MessageBox('File successfully saved.', 'Success!',
                          wx.ICON_INFORMATION | wx.OK, self.mainWindow)
        except IOError:
            wx.MessageBox(
                'The action cannot be completed because the file is in use.  Close the file and try again.',
                'File In Use', wx.ICON_ERROR | wx.OK, self.mainWindow)
コード例 #7
0
ファイル: Window.py プロジェクト: Shivaru/waybill
 def out_to_excel(self):
     datefrom = self.dateFrom.dateTime().toString('dd-MM-yyyy')
     dateto = self.dateTo.dateTime().toString('dd-MM-yyyy')
     print(datefrom, dateto)
     items = self.listWidget.selectedItems()
     caridname = items[0].text()
     for i in cars['items']:
         name = i['nm']
         if name == caridname:
             carid = i['id']
             print(name, " FOUNDED ", caridname, carid)
             car = str(carid)
             print(car)
         else:
             carid = 0
     global data
     data = GetDataWialon.get_data_cars(car, datefrom, dateto)
     #print(data)
     Excel.add_date_tofile(data)
     '''
コード例 #8
0
def main():
    url = "https://www.freemaptools.com/find-population.htm"
    Log.clearLog()
    try:
        locations = Excel.makeLocationArray()
        radius_array = ['1', '2']
        # radius_array = Helpers.getRadiusArray()
        driver = startDriver(True)
        driver.get(url)
        getAddressPopulations(driver, locations, radius_array, url)
        driver.quit()
    except Exception as e:
        Error.printError(e)
        Error.saveError()
コード例 #9
0
def getAddressPopulations(driver, locations, radius_array, url):
    for x in range(0, len(radius_array)):  # x+1 = x in excel sheet
        driver.get(url)
        for y in range(0, len(locations)):  # y+1 = y in excel sheet
            address = locations[y].address
            Error.setError(address, radius_array[x], x + 1, y + 1)
            Helpers.putRadius(driver, radius_array[x])
            success = Helpers.putAddress(driver, address)
            if success:
                time.sleep(2)
                population = Helpers.getPopulation(driver)
                message = "Address: {0:38} | radius: {1:4} | population: {2:10}".format(
                    address, radius_array[x], population)
                print(message)
                Log.writeToLog(message)
                Excel.updatePopulationForAddress(radius_array[x], y + 1,
                                                 population)
            else:
                message = "Address: {0:38} | radius: {1:4} | population: {2:10}".format(
                    address, radius_array[x], "NULL")
                print(message)
                Log.writeToLog(message)
                Excel.updatePopulationForAddress(radius_array[x], y + 1,
                                                 "ERROR")
コード例 #10
0
def idList4CHP(filename, sheetname, startRowNum=2):
    ## input- peMS_Incidents_ID_Mainline.xlsx containing [ID fwy direction abs_pm start end distance]
    idExcel = ex.excelRead(filename, sheetname)
    idList = []
    for stationID in idExcel[startRowNum - 1:]:
        stationIDList = []
        stationIDList.append(str(int(stationID[0].value)))
        stationIDList.append(str(int(stationID[1].value)))
        stationIDList.append(str(stationID[2].value))
        stationIDList.append(str(float(stationID[3].value)))
        stationIDList.append(str(float(stationID[4].value)))
        stationIDList.append(str(float(stationID[5].value)))
        stationIDList.append(str(float(stationID[6].value)))
        idList.append(stationIDList)
    return idList  ##Doulbe list
コード例 #11
0
def agregarUsuarios(request):
    template = loader.get_template('home/admin.html')

    file = request.FILES.get("archivo")

    elementos_excel = Excel.manejar_excel(file)

    print(elementos_excel)

    cur = connection.cursor()
    for i in (elementos_excel):
        usuario = i[0]
        contrasena = i[3]
        correo = i[1]
        media = 1
        tipo_usuario = str(i[2])
        cur.callproc('obtener_id_tipo_usuario', [tipo_usuario])
        tipo_usuario_nu = cur.fetchall()
        tipo_usuario_numero = tipo_usuario_nu[0][0]

        #se pregunta si el usuario ya existe
        cur.nextset()
        cur.callproc('obtener_usuario_existente', [correo])
        us = cur.fetchall()

        if us != ():
            cur.nextset()
            cur.callproc('editar_usuario_existente',
                         [correo, usuario, tipo_usuario_numero])
            cur.nextset()

        else:
            cur.nextset()
            cur.callproc(
                'insertar_usuario',
                [usuario, contrasena, correo, media, tipo_usuario_numero])
            cur.nextset()

    cur.close

    for i in (elementos_excel):
        correo = i[1]
        mensaje = "Se le notifica que su cuenta de usuario para nuestro sistema fue creada con éxito. Correo: " + i[
            1] + ". Contraseña: " + i[3]
        send_mail('Notificación de creación de la cuenta solicitada', mensaje,
                  '*****@*****.**', [correo])

    return HttpResponseRedirect(reverse('home:admin'))
コード例 #12
0
ファイル: Main.py プロジェクト: GyuBa/InterestedItem
    def button_clicked(self):
        self.statusLabel.setText("Run")
        print("Run")
        excel = Excel.Excel(self.fileNameEdit.text())
        self.fileNameEdit.text()
        startDate = self.startDateEdit.text()
        endDate = self.endDateEdit.text()
        webData = WebData.WebData(startDate, endDate)

        print("Web Data Run")

        print(webData.run())
        myDict = webData.getDict()
        excel.write(1, 1, webData.run())
        excel.write(1, 2, webData.getStockTitleList())
        excel.write(1, 3, webData.getStockIDList())
        excel.write(1, 4, myDict.values())
        excel.write(1, 5, myDict.keys())

        excel.save()
        print("Save")
        self.statusLabel.setText("Ready")
コード例 #13
0
def exportSummaryResult(excelPath, smmaryResultDict):
    print("excelPath = " + excelPath.replace("/", "\\"))
    #excel = Excel(show=self.blnShowExcel, ifFailForceRestart=self.blnForceRestartExcel)
    excel = Excel(show=True, ifFailForceRestart=False)
    excel.get_sheet(1)

    lineNum = 0

    for (mainClass_k, mainClass_v) in smmaryResultDict.items():
        lineNum += 1
        excel.set_cell(lineNum, 1, mainClass_k)
        colNum = 1
        for (subClass_k, subClass_v) in mainClass_v.items():
            colNum += 1
            excel.set_cell(lineNum, colNum, subClass_k)
            #for (sln_k, sln_v) in subClass_v:

        subClass_key1 = mainClass_v.keys()[0]
        subClass_v1 = mainClass_v[subClass_key1]
        sln_keys = subClass_v1.keys()

        for sln_key in sln_keys:
            lineNum += 1
            excel.set_cell(lineNum, 1, sln_key)
            colNum = 1
            for (subClass_k, subClass_v) in mainClass_v.items():
                colNum += 1
                print(subClass_v[sln_key])
                excel.set_cell(lineNum, colNum, subClass_v[sln_key])

        lineNum += 1

    #excel.set_cell(1,1,"test")
    excel.save(excelPath)
    excel.close()
    pass
コード例 #14
0
# 通过Excel名,直接执行Excel中配置的WorkFlow,Excel的名字可以直接写成汉子[比如 : 打包.xlsx,热更.xlsx,服务端代码上传.xlsx]
# 	然后在Jenkins 下面 配置一个公共的 excelFolderPath 参数。
# 	再配置一个可选参数 对应执行的 Excel [比如 : 打包、热更、服务端代码上传]。
# 选择参数,构成以下格式,就可以了
# 	python ExcuteWorkFlowByExcel.py --excelFolderPath excel所在文件夹 --excelName 打包/热更/服务端代码上传 --jenkinsParameters aKey:aValue,bKey:bValue
# eg: 执行
# 	python /Users/jiasy/Documents/sourceFrame/pyWorkFlow/pythonCode/CommonTools/Jenkins/ExcuteWorkFlowByExcel.py --excelFolderPath /Users/jiasy/Documents/sourceFrame/pyWorkFlow/excel/build --excelName CocosCreatorBuild_Jenkins --jenkinsParameters debug:false,platform:web-mobile
#     Jenkins <WORKSPACE 自带的环境参数,标示工程迁出的路径,DEBUG 自定义参数,PLATFORM 自定义参数>
#   python $WORKSPACE/pythonCode/CommonTools/Jenkins/ExcuteWorkFlowByExcel.py --excelFolderPath $WORKSPACE/excel/build --excelName CocosCreatorBuild_Jenkins --jenkinsParameters debug:$DEBUG,platform:$PLATFORM
# ------------------------------------测试用例---------------------------------------------------------------------------------------
if __name__ == '__main__':
    _ops = SysInfo.getOps(opsDict, OptionParser())
    _currentFolder = SysInfo.fixFolderPath(os.path.dirname(os.path.realpath(__file__)))
    _excelPath = os.path.join(_ops.excelFolderPath, _ops.excelName + ".xlsx")
    _cmd = Excel.getExcuteCmd(_excelPath)

    _jenkinsParametersDict = {}
    if _ops.jenkinsParameters :
        # 当前 Jenkins 全局参数的留存文件。独立脚本执行的时候内存不共享,所以,参数存到临时目录。
        _jenkinsParametersDict = CommonUtils.strListToDict(_ops.jenkinsParameters)
        # 将其中的value参数话一下。
        for _key in _jenkinsParametersDict:
            print  _key + " : " + _jenkinsParametersDict[_key]
            _jenkinsParametersDict[_key] = SysInfo.setCmdStr(_jenkinsParametersDict[_key])

    # 重新创建临时路径
    _tempFolder = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, os.pardir, "temp"))
    FileReadWrite.reCreateFolder(_tempFolder)

    # 转换后的键值对,作为当前jenkins执行的共享参数保存到Temp文件夹。
コード例 #15
0
ファイル: final_v.py プロジェクト: kev12123/Dwac-Users
def load_connection( driver ='SQL Server Native Client 10.0',server='**********',database = '**********'):
        #Function to load sessiona and MetaData
        quoted = urllib.quote_plus('DRIVER={0};Server={1};Database={2};Trusted_Connection=yes;'.format(driver,server,database))
        connection =create_engine('mssql+pyodbc:///?odbc_connect={}'.format(quoted))
        return connection

def equals(a,b):
    if str(a).strip().lower() == str(b).strip().lower():
        return True
    return False

conn = load_connection()
query = conn.execute("SELECT * FROM ceridian")
ceridian_data = [data for data in query]
ceridian_wb = Excel()
ceridian_wb.transform_to_excel_spreadSheet(query,'C:\Users\kgiraldo\Desktop\practice_table.xlsx')


# output_sql_table_to_excel(query,'C:\Users\kgiraldo\Desktop\ceridian_table.xlsx')
query = conn.execute('SELECT * FROM employees')
dwack_data = [data for data in query]
# output_sql_table_to_excel('query,'C:\Users\kgiraldo\Desktop\dwack_users.xlsx')
book = load_workbook('C:\Users\kgiraldo\Desktop\dwack_users.xlsx')
ws=book.get_sheet_by_name('Sheet')

#EMPLOYEES WITH USER IDS IN THE CERIDIAN  TABLE
for i in ceridian_data:
    user_ids = i[13]
    for row in ws.iter_rows('C2:C99'):
        for cell in row:
コード例 #16
0
import queue

from Excel import *
from Menu import *
import datetime
from multiprocessing import Queue

Menu_Instructions()

# Loading Excel Files
oldfile, newfile = getXLfiles()
oldfile = oldfile
newfile = newfile

# Open Excel Files
OldExcel = Excel(oldfile)
NewExcel = Excel(newfile)

# Set Up Changes Tracked File
currentdata = x = datetime.datetime.now().strftime("%m.%d.%Y")
ChangeExcel = Excel(currentdata + " Changes Tracked")

ChangeExcel.setupfile()

print("Loading", oldfile, "....")
OldExcel.loadfile()
OldExcel.getIDs()
print(oldfile, "has been loaded!\n")
print("Loading", newfile, "....")
NewExcel.loadfile()
NewExcel.getIDs()
コード例 #17
0
# coding=utf-8
#----------------------------------------
#2016.9.9 增加无需求文档时的适配
#----------------------------------------
import Excel
import codecs
import re
import os

excel = Excel.Excel()
reqexcel = Excel.Excel()
#测试用例excel名称
featurename = u"D:/资料/测试用例/test2/IDEALENS VR Web1.1-安全测试.xlsx"
#需求模板Excel名称
requirename = u"D:/资料/测试用例/test2/IDEALENS VR Web1.1-安全测试需求-30.xls"
#requirenamelist=featurename.split("/")
#requirenamelist[len(requirenamelist)-1]=requirenamelist[len(requirenamelist)-1].replace(u"测试用例",u"需求模板")
#requirename="/".join(requirenamelist)
#if os.path.exists(requirename):
#    pass
#else:
#    requirename=requirename[:-1]

reqList = {}
reqstring = u"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
reqstring += u"<requirements>\n"
if os.path.exists(requirename):
    reqexcel.open_excel(requirename)
    requireNumber = reqexcel.worksheet.nrows
    statusList = {"4.0": "F", "": "R"}
    for i in range(1, requireNumber):
コード例 #18
0
ファイル: Objects.py プロジェクト: rubenflamshepherd/vaCATE
		@type x_series: [float]
			y_series of data. elut_cpms_log data is curve-stripped forms.
		@type k: float
			Rate constant of the phase (slope *2.303).
		@type t05: float
			Half-life of exchange of the phase (0.693/k).
		@type r0: float
			Rate of radioisotope release from compartment at time = 0 (antilog 
				of intercept).
		@type efflux: float
			Efflux from compartment (r0/SA).
		@rtype: None
		"""
        self.xs = xs  # paired tuple (x, y)
        self.xy1, self.xy2 = xy1, xy2  # Each is a paired tuple
        self.r2, self.slope, self.intercept = r2, slope, intercept
        self.x_series, self.y_series = x_series, y_series
        self.k, self.t05, self.r0, self.efflux = k, t05, r0, efflux


if __name__ == "__main__":
    import Excel
    import os
    directory = os.path.dirname(os.path.abspath(__file__))
    file_path = os.path.join(directory, "Tests/1/Test_SingleRun1.xlsx")
    temp_data = Excel.grab_data(file_path)

    temp_analysis = temp_data.analyses[0]
    temp_analysis.kind = 'obj'
    temp_analysis.obj_num_pts = 4
    temp_analysis.analyze()
コード例 #19
0
ファイル: ShoulderEvents.py プロジェクト: Mstockford77/Arc
join_field2 = "ID"
join_type = "KEEP_COMMON"
saveLayer = reviewer_db + "/Shoulder_Line_Review"

# Make a layer and join the tables with the errors and the route events.
# For line events:
now = datetime.datetime.now()
writeMsg("\nStarting Line Table Join at: " + str(now)[:-7])
arcpy.MakeFeatureLayer_management(in_features, layerName)
arcpy.AddJoin_management(layerName, in_field, join_table, join_field)
arcpy.AddJoin_management(layerName, in_field2, join_table2, join_field2, join_type)

# Save the layer to the gdb.
arcpy.CopyFeatures_management(layerName, saveLayer)

# Write Excel file
xls.makeExcel('Event_Checks.gdb')

# Check against valid list
valid.check_valid(reviewer_db)

# Script finish
now = datetime.datetime.now()
writeMsg("*********************************************")
writeMsg("\nScript finished running at: " + str(now)[:-7])

# Release data reviewer extension.
arcpy.CheckInExtension("datareviewer")

# Delete connection
arcpy.ClearWorkspaceCache_management()
コード例 #20
0
ファイル: test.py プロジェクト: link5201314/PearsonTest
def exportSummaryResult(excelPath, smmaryResultDict):
    print ("excelPath = " + excelPath.replace("/","\\"))
    #excel = Excel(show=self.blnShowExcel, ifFailForceRestart=self.blnForceRestartExcel)
    excel = Excel(show=True, ifFailForceRestart=False)
    excel.get_sheet(1)

    lineNum = 0

    for (mainClass_k, mainClass_v) in smmaryResultDict.items():
        lineNum+=1
        excel.set_cell(lineNum,1, mainClass_k)
        colNum = 1
        for (subClass_k, subClass_v) in mainClass_v.items():
            colNum+=1
            excel.set_cell(lineNum, colNum, subClass_k)
            #for (sln_k, sln_v) in subClass_v:

        subClass_key1 = mainClass_v.keys()[0]
        subClass_v1 = mainClass_v[subClass_key1]
        sln_keys = subClass_v1.keys()

        for sln_key in sln_keys:
            lineNum+=1
            excel.set_cell(lineNum, 1, sln_key)
            colNum = 1
            for (subClass_k, subClass_v) in mainClass_v.items():
                colNum+=1
                print(subClass_v[sln_key])
                excel.set_cell(lineNum, colNum, subClass_v[sln_key])

        lineNum+=1



    #excel.set_cell(1,1,"test")
    excel.save(excelPath)
    excel.close()
    pass
コード例 #21
0
# 每一个数列value形成一个单一的文件,放置到 outputJsonFolderPath/[Sheet名称]/[数列名称].xlsx
# ------------------------------------测试用例---------------------------------------------------------------------------------------
if __name__ == '__main__':
    _ops = SysInfo.getOps(opsDict, OptionParser())
    _currentFolder = SysInfo.fixFolderPath(
        os.path.dirname(os.path.realpath(__file__)))
    # 重新创建输出文件夹
    FileReadWrite.reCreateFolder(_ops.outputJsonFolderPath)

    # 解析每一个Excel文件
    _excelPaths = _ops.excelPaths.split(",")
    for _i in range(len(_excelPaths)):
        _excelName = SysInfo.justName(_excelPaths[_i])
        _excelFolderPath = os.path.join(_ops.outputJsonFolderPath, _excelName)
        FileReadWrite.makeDirPlus(_excelFolderPath)
        _excelDict = Excel.dictFromExcelFile(_excelPaths[_i])
        for _sheetName in _excelDict:
            # 创建每一个Sheet 的文件夹
            _sheetFolderPath = os.path.join(_excelFolderPath, _sheetName)
            FileReadWrite.makeDirPlus(_sheetFolderPath)
            _sheetDict = _excelDict[_sheetName]
            for _colName in _sheetDict:
                # 创建每一个数列的文件
                _colDict = _sheetDict[_colName]
                FileReadWrite.writeFileWithStr(
                    os.path.join(_sheetFolderPath, _colName + ".json"),
                    str(
                        json.dumps(_colDict,
                                   indent=4,
                                   sort_keys=False,
                                   ensure_ascii=False)))
コード例 #22
0
        idList.append(stationID)
    return idList

if __name__ == '__main__':
    pems2=pms2.PeMS2()
    driver = pems2.initSession()
    print "start!"
    
    idList = idList("peMS_ID_HOV.xlsx", "LA", startRowNum=2)
    print idList
    for stationID in idList:
        acci = pms2.DayOfWeek(driver, stationID)
        rowList = rowtoList(acci)
        
        print rowList[0][0]
        ex.excelWriteOnExistingFile("peMS_Accident_HOV.xlsx", "LA", 'A', rowList)
      
            
#     pems = pms.PeMS()
#     r, session = pems.initSession()
#     print "start!"
    
#     idList = idList("peMS_ID_Mainline.xlsx", "LA", startRowNum=2)
#     print idList
#     for stationID in idList:
#         cl = pms.ChangeLog(session, stationID)
#         rowList = rowtoList(cl)
#           
#         print rowList[0][0]        
#         ex.excelWriteOnExistingFile("peMS_Station_Mainline.xlsx", "LA", 'A', rowList)
コード例 #23
0
    #         print rowList[0][0]
    #         ex.excelWriteOnExistingFile("test_changelog.xlsx", "Sheet1", 'A', rowList)

    #     ##AADT##
    #     idList = idList("peMS_ID_Mainline.xlsx", "LA", startRowNum=2)
    #     print idList
    #     for stationID in idList:
    #         aadt = pms.AADT(session, stationID, '20160101', '20161231')
    #         rowList = rowtoList(aadt)
    #
    #         print rowList[0][4]
    #         ex.excelWriteOnExistingFile("peMS_AADT_Mainline.xlsx", "LA", 'A', rowList)

    #     ##Incidents##
    #     idList = idList4CHP("peMS_Incidents_ID_Mainline.xlsx", "OC", startRowNum=2)
    #     for row in idList:
    #         chp = pms.CHPIncidents(session, row[0], row[1], row[3], row[4], row[5], row[6], "accident")
    #         rowList = rowtoList(chp)
    #
    #         print rowList[0][0]
    #         ex.excelWriteOnExistingFile("peMS_Incidents_Accidents_Mainline.xlsx", "OC", 'A', rowList)

    ## RawData
    idList = idList("test.xlsx", "Sheet1", startRowNum=2)
    print idList
    for stationID in idList:
        rd = pms.RawData(session, stationID, "201705100700", "201705110900",
                         "flow", "sec")
        rdList = rd[0]
        ex.excelWriteOnExistingFileCol("test2.xlsx", "Sheet1", 1, rdList)
コード例 #24
0
def test_analysis(file_name, analysis_data):
    directory = os.path.dirname(os.path.abspath(__file__))
    question_path = os.path.join(directory, file_name)
    print question_path
    question_exp = Excel.grab_data(question_path)
    answer_exp = grab_answers(directory, file_name, \
                              question_exp.analyses[0].run.elut_ends)
    for index, question in enumerate(question_exp.analyses):
        if 'Subj' in file_name:
            question.kind = 'subj'
            question.xs_p3 = analysis_data[2]
            question.xs_p2 = analysis_data[1]
            question.xs_p1 = analysis_data[0]
            question.analyze()
            answer = answer_exp.analyses[index]
        else:
            question.kind = 'obj'
            question.obj_num_pts = analysis_data
            question.analyze()
            answer = answer_exp.analyses[index]
            for counter in range(0, len(question.r2s)):
                assert_equals(
                    "{0:.9f}".format(question.r2s[counter]),
                    "{0:.9f}".format(answer.r2s[counter]))

        assert_equals(question.run.SA, answer.SA)
        assert_equals(question.run.name, answer.name)
        assert_equals(question.run.rt_cnts, answer.rt_cnts)
        assert_equals(question.run.sht_cnts, answer.sht_cnts)
        assert_equals(question.run.rt_wght, answer.rt_wght)
        assert_equals(question.run.gfact, answer.gfact)
        assert_equals(question.run.load_time, answer.load_time)
        assert_equals(question.run.elut_ends, answer.elut_ends)
        assert_equals(question.run.elut_cpms, answer.elut_cpms)
        assert_equals(question.run.elut_starts, answer.elut_starts)
        for index, item in enumerate(question.run.elut_cpms_gfact):
            assert_equals(
                "{0:.10f}".format(question.run.elut_cpms_gfact[index]),
                "{0:.10f}".format(answer.elut_cpms_gfact[index]))
        assert_equals(question.run.elut_cpms_gRFW, answer.elut_cpms_gRFW)
        assert_equals(question.run.elut_cpms_log, answer.elut_cpms_log)

        assert_equals(question.phase3.xs[0], answer.phase3.xs[0])
        assert_equals(question.phase3.xs[1], answer.phase3.xs[1])
        assert_equals(
            "{0:.7f}".format(question.phase3.slope),
            "{0:.7f}".format(answer.phase3.slope))
        assert_equals(
            "{0:.7f}".format(question.phase3.intercept),
            "{0:.7f}".format(answer.phase3.intercept))
        assert_equals(
            "{0:.7f}".format(question.phase3.k),
            "{0:.7f}".format(answer.phase3.k))
        assert_equals(
            "{0:.7f}".format(question.phase3.r0),
            "{0:.7f}".format(answer.phase3.r0))
        assert_equals(
            "{0:.7f}".format(question.phase3.efflux),
            "{0:.7f}".format(answer.phase3.efflux))
        assert_equals(
            "{0:.7f}".format(question.phase3.t05),
            "{0:.7f}".format(answer.phase3.t05))
        assert_equals(
            "{0:.7f}".format(question.phase3.r2),
            "{0:.7f}".format(answer.phase3.r2))

        assert_equals(
            "{0:.7f}".format(question.netflux),
            "{0:.7f}".format(answer.netflux))
        assert_equals(
            "{0:.7f}".format(question.influx),
            "{0:.7f}".format(answer.influx))
        assert_equals(
            "{0:.7f}".format(question.ratio),
            "{0:.7f}".format(answer.ratio))
        assert_equals(
            "{0:.7f}".format(question.poolsize),
            "{0:.7f}".format(answer.poolsize))
        assert_equals(
            "{0:.7f}".format(question.tracer_retained),
            "{0:.7f}".format(answer.tracer_retained))

        assert_equals(question.phase2.xs[0], answer.phase2.xs[0])
        assert_equals(question.phase2.xs[1], answer.phase2.xs[1])
        if question.phase2.xs != ('', ''):
            assert_equals(
                "{0:.7f}".format(question.phase2.slope),
                "{0:.7f}".format(answer.phase2.slope))
            assert_equals(
                "{0:.7f}".format(question.phase2.intercept),
                "{0:.7f}".format(answer.phase2.intercept))
            assert_equals(
                "{0:.7f}".format(question.phase2.k),
                "{0:.7f}".format(answer.phase2.k))
            assert_equals(
                "{0:.2f}".format(question.phase2.r0),
                "{0:.2f}".format(answer.phase2.r0))
            assert_equals(
                "{0:.4f}".format(question.phase2.efflux),
                "{0:.4f}".format(answer.phase2.efflux))
            assert_equals(
                "{0:.7f}".format(question.phase2.t05),
                "{0:.7f}".format(answer.phase2.t05))
            assert_equals(
                "{0:.7f}".format(question.phase2.r2),
                "{0:.7f}".format(answer.phase2.r2))
        else:
            assert_equals(question.phase2.slope, answer.phase2.slope)
            assert_equals(question.phase2.intercept, answer.phase2.intercept)
            assert_equals(question.phase2.k, answer.phase2.k)
            assert_equals(question.phase2.r0, answer.phase2.r0)
            assert_equals(question.phase2.efflux, answer.phase2.efflux)
            assert_equals(question.phase2.t05, answer.phase2.t05)
            assert_equals(question.phase2.r2, answer.phase2.r2)

        assert_equals(question.phase1.xs[0], answer.phase1.xs[0])
        assert_equals(question.phase1.xs[1], answer.phase1.xs[1])
        if question.phase1.xs != ('', ''):
            assert_equals(
                "{0:.7f}".format(question.phase1.slope),
                "{0:.7f}".format(answer.phase1.slope))
            assert_equals(
                "{0:.7f}".format(question.phase1.intercept),
                "{0:.7f}".format(answer.phase1.intercept))
            assert_equals(
                "{0:.7f}".format(question.phase1.k),
                "{0:.7f}".format(answer.phase1.k))
            assert_equals(
                "{0:.4f}".format(question.phase1.r0),
                "{0:.4f}".format(answer.phase1.r0))
            assert_equals(
                "{0:.6f}".format(question.phase1.efflux),
                "{0:.6f}".format(answer.phase1.efflux))
            assert_equals(
                "{0:.7f}".format(question.phase1.t05),
                "{0:.7f}".format(answer.phase1.t05))
            assert_equals(
                "{0:.7f}".format(question.phase1.r2),
                "{0:.7f}".format(answer.phase1.r2))
        else:
            assert_equals(question.phase1.slope, answer.phase1.slope)
            assert_equals(question.phase1.intercept, answer.phase1.intercept)
            assert_equals(question.phase1.k, answer.phase1.k)
            assert_equals(question.phase1.r0, answer.phase1.r0)
            assert_equals(question.phase1.efflux, answer.phase1.efflux)
            assert_equals(question.phase1.t05, answer.phase1.t05)
            assert_equals(question.phase1.r2, answer.phase1.r2)
コード例 #25
0
        else:
            assert_equals(question.phase1.slope, answer.phase1.slope)
            assert_equals(question.phase1.intercept, answer.phase1.intercept)
            assert_equals(question.phase1.k, answer.phase1.k)
            assert_equals(question.phase1.r0, answer.phase1.r0)
            assert_equals(question.phase1.efflux, answer.phase1.efflux)
            assert_equals(question.phase1.t05, answer.phase1.t05)
            assert_equals(question.phase1.r2, answer.phase1.r2)


if __name__ == '__main__':
    import Excel

    directory = os.path.dirname(os.path.abspath(__file__))
    # temp_data = Excel.grab_data(directory, "/Tests/Edge Cases/Test_SubjMissMidPtPh123.xlsx")
    temp_data = Excel.grab_data(directory, "/Tests/4/Test_SingleRun7.xlsx")
    temp_question = temp_data.analyses[0]
    # temp_question.kind = 'subj'
    # temp_question.xs_p1 = (1,3)
    # temp_question.xs_p2 = (4,10)
    # temp_question.xs_p3 = (11.5,40)
    temp_question.kind = 'obj'
    temp_question.obj_num_pts = 8
    temp_question.analyze()

    # temp_exp = grab_answers(directory, "/Tests/Edge Cases/Test_SubjMissMidPtPh123.xlsx", temp_question.run.elut_ends)
    temp_exp = grab_answers(directory, "/Tests/4/Test_SingleRun7.xlsx", temp_question.run.elut_ends)
    temp_answer = temp_exp.analyses[0]

    print "ANSWERS"
    # print temp_answer.phase1.x
コード例 #26
0
ファイル: Main.py プロジェクト: nmskate/xinlangweibo
    # 初始化日志
    logging.basicConfig(level=logging.DEBUG,
                        format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
                        datefmt='%a, %d %b %Y %H:%M:%S',
                        filename=file,
                        filemode='w')


if __name__ == '__main__':
    # 程序开始时间
    start_time = datetime.now()

    init_logging()
    init_http_client()

    # 解析excel中的原始数据
    excel_file = Excel.read_excel_file(INPUT_FILE)

    # # 请求每一个sheet
    # final_data = OrderedDict()
    # for sheet in excel_file.sheets:
    #     final_data[sheet.name] = MicroBlog.fetch_weibo(sheet)
    #
    # # 将结果写入文件
    # workbook = Workbook()
    # for sheet_name, item_data in final_data.items():
    #     Excel.output_weibo_data(item_data, sheet_name, workbook)
    # workbook.save(OUTPUT_FILE)

    print('共耗时:', (datetime.now() - start_time).seconds, '秒')
コード例 #27
0
    def run(self):
        const.start_time_style = time.strftime('%Y-%m-%d %H:%M:%S',
                                               time.localtime())
        const.start_time = datetime.datetime.now()
        # if self.env.lower() == "online" and info.iterm == "yiqi":
        #     # 先将token变量值写入字典
        #     const.var_dict["${token}"] = login.login(y_con.username_online_yiqi, y_con.password_online_yiqi, "GET")
        # elif self.env.lower() == "online" and info.iterm == "qingdao":
        #     const.var_dict["${token}"] = login.login(y_con.username_online_qingdao, y_con.password_online_qingdao, "GET")
        # else:
        #     const.var_dict["${token}"] = login.login(y_con.username, y_con.password, "GET")

        if self.env.lower() == "online" and info.iterm == "yiqi":
            const.var_dict["${token}"] = login.login(
                y_con.username_online_yiqi, y_con.password_online_yiqi, "GET")
        else:
            const.var_dict["${token}"] = login.login(y_con.username,
                                                     y_con.password, "GET")

        excel = Excel(self.dir_case, self.dir_case_result)
        # html_report = htmlGenerator.report(self.dir_result)
        cases = excel.get_cases()
        for case in cases:
            # UAT环境,UAT一列,为No时不予执行
            if self.env.lower() == "uat":
                if case.uat_env != 'Yes':
                    continue

                case.run_case()
                if info.write_back == 0:
                    excel.set_out_cell(
                        excel.ROW_G, 9,
                        '{}={}'.format(case.va_in_para,
                                       case.get_variable_in_params()))
                    all_result = [
                        case.entire_url, case.response, case.result_value,
                        case.fail_time, case.elapsed, case.actual_params
                    ]
                    index = 0
                    for i in range(13, 19):
                        excel.set_out_cell(case.line, i, all_result[index])
                        excel.save_excel()
                        index += 1
                    excel.save_excel()

                elif info.write_back == 1:
                    if case.result_value == 'Fail':
                        excel.set_out_cell(
                            excel.ROW_G, 9,
                            '{}={}'.format(case.va_in_para,
                                           case.get_variable_in_params()))
                        all_result = [
                            case.entire_url, case.response, case.result_value,
                            case.fail_time, case.elapsed, case.actual_params
                        ]
                        index = 0
                        for i in range(13, 19):
                            excel.set_out_cell(case.line, i, all_result[index])
                            excel.save_excel()
                            index += 1
                    excel.save_excel()
コード例 #28
0
ファイル: checkbackup.py プロジェクト: private-love/scripts
        d = (datetime.now() - timedelta(days=1)).strftime('%d')

    h = (datetime.now() - timedelta(days=1)).strftime('%H')
    M = (datetime.now() - timedelta(days=1)).strftime('%M')
    S = (datetime.now() - timedelta(days=1)).strftime('%S')

    isSendMail = func.get_config('global_value', 'isSendMail')
    #主题调用函数
    result, err_result, total_size_str = check_backup(Y, y, m, d)

    if len(result) > 1:
        att_filename = "xls/backuplist_" + str(Y) + "-" + str(m) + "-" + str(
            d) + "_" + str(h) + str(M) + str(S) + ".xlsx"
        hdngs = ['服务器IP', '备份昵称', '备份日期', '备份大小', '备份文件名', '备份目录']
        exc_ins = Excel.Excel(att_filename,
                              sheet_name='ciSheet',
                              encoding='utf-8')
        exc_ins.backup_xlsx_write(hdngs, result)

    if is_auto and isSendMail == 'yes' and len(result) > 1:
        content = func.toHtml(result, total_size_str)
        sub = "服务器备份每日统计(" + str(Y) + '-' + str(m) + '-' + str(d) + ")"
        #func.send_mail(func.mailto_list,sub,content)
        func.sendMail(sub, content, func.mailto_list, att_filename)
    else:
        print json.dumps('suc!')

    if len(err_result) > 0 and isSendMail == 'yes':
        err_sub = "备件检查存在错误(" + str(Y) + '-' + str(m) + '-' + str(d) + ")"
        err_content = func.toErrHtml(err_result)
        func.send_mail(func.mailto_list, err_sub, err_content)
コード例 #29
0
saveLayer = reviewer_db + "/Events_Line_Review"

# Make a layer and join the tables with the errors and the route events.
# For line events:
now = datetime.datetime.now()
writeMsg("\nStarting Line Table Join at: " + str(now)[:-7])
arcpy.MakeFeatureLayer_management(in_features, layerName)
arcpy.AddJoin_management(layerName, in_field, join_table, join_field)
arcpy.AddJoin_management(layerName, in_field2, join_table2, join_field2,
                         join_type)

# Save the layer to the gdb.
arcpy.CopyFeatures_management(layerName, saveLayer)

# Write Excel file
xls.makeExcel(reviewer_db)

# Check against valid list
valid.check_valid(reviewer_db)

# Script finish
now = datetime.datetime.now()
writeMsg("*********************************************")
writeMsg("\nScript finished running at: " + str(now)[:-7])

# Release data reviewer extension.
arcpy.CheckInExtension("datareviewer")

# Delete connection
arcpy.ClearWorkspaceCache_management()
コード例 #30
0
ファイル: Tests.py プロジェクト: rubenflamshepherd/vaCATE
def test_analysis(file_name, analysis_data):
    directory = os.path.dirname(os.path.abspath(__file__))
    question_path = os.path.join(directory, file_name)
    print question_path
    question_exp = Excel.grab_data(question_path)
    answer_exp = grab_answers(directory, file_name, \
                              question_exp.analyses[0].run.elut_ends)
    for index, question in enumerate(question_exp.analyses):
        if 'Subj' in file_name:
            question.kind = 'subj'
            question.xs_p3 = analysis_data[2]
            question.xs_p2 = analysis_data[1]
            question.xs_p1 = analysis_data[0]
            question.analyze()
            answer = answer_exp.analyses[index]
        else:
            question.kind = 'obj'
            question.obj_num_pts = analysis_data
            question.analyze()
            answer = answer_exp.analyses[index]
            for counter in range(0, len(question.r2s)):
                assert_equals("{0:.9f}".format(question.r2s[counter]),
                              "{0:.9f}".format(answer.r2s[counter]))

        assert_equals(question.run.SA, answer.SA)
        assert_equals(question.run.name, answer.name)
        assert_equals(question.run.rt_cnts, answer.rt_cnts)
        assert_equals(question.run.sht_cnts, answer.sht_cnts)
        assert_equals(question.run.rt_wght, answer.rt_wght)
        assert_equals(question.run.gfact, answer.gfact)
        assert_equals(question.run.load_time, answer.load_time)
        assert_equals(question.run.elut_ends, answer.elut_ends)
        assert_equals(question.run.elut_cpms, answer.elut_cpms)
        assert_equals(question.run.elut_starts, answer.elut_starts)
        for index, item in enumerate(question.run.elut_cpms_gfact):
            assert_equals(
                "{0:.10f}".format(question.run.elut_cpms_gfact[index]),
                "{0:.10f}".format(answer.elut_cpms_gfact[index]))
        assert_equals(question.run.elut_cpms_gRFW, answer.elut_cpms_gRFW)
        assert_equals(question.run.elut_cpms_log, answer.elut_cpms_log)

        assert_equals(question.phase3.xs[0], answer.phase3.xs[0])
        assert_equals(question.phase3.xs[1], answer.phase3.xs[1])
        assert_equals("{0:.7f}".format(question.phase3.slope),
                      "{0:.7f}".format(answer.phase3.slope))
        assert_equals("{0:.7f}".format(question.phase3.intercept),
                      "{0:.7f}".format(answer.phase3.intercept))
        assert_equals("{0:.7f}".format(question.phase3.k),
                      "{0:.7f}".format(answer.phase3.k))
        assert_equals("{0:.7f}".format(question.phase3.r0),
                      "{0:.7f}".format(answer.phase3.r0))
        assert_equals("{0:.7f}".format(question.phase3.efflux),
                      "{0:.7f}".format(answer.phase3.efflux))
        assert_equals("{0:.7f}".format(question.phase3.t05),
                      "{0:.7f}".format(answer.phase3.t05))
        assert_equals("{0:.7f}".format(question.phase3.r2),
                      "{0:.7f}".format(answer.phase3.r2))

        assert_equals("{0:.7f}".format(question.netflux),
                      "{0:.7f}".format(answer.netflux))
        assert_equals("{0:.7f}".format(question.influx),
                      "{0:.7f}".format(answer.influx))
        assert_equals("{0:.7f}".format(question.ratio),
                      "{0:.7f}".format(answer.ratio))
        assert_equals("{0:.7f}".format(question.poolsize),
                      "{0:.7f}".format(answer.poolsize))
        assert_equals("{0:.7f}".format(question.tracer_retained),
                      "{0:.7f}".format(answer.tracer_retained))

        assert_equals(question.phase2.xs[0], answer.phase2.xs[0])
        assert_equals(question.phase2.xs[1], answer.phase2.xs[1])
        if question.phase2.xs != ('', ''):
            assert_equals("{0:.7f}".format(question.phase2.slope),
                          "{0:.7f}".format(answer.phase2.slope))
            assert_equals("{0:.7f}".format(question.phase2.intercept),
                          "{0:.7f}".format(answer.phase2.intercept))
            assert_equals("{0:.7f}".format(question.phase2.k),
                          "{0:.7f}".format(answer.phase2.k))
            assert_equals("{0:.2f}".format(question.phase2.r0),
                          "{0:.2f}".format(answer.phase2.r0))
            assert_equals("{0:.4f}".format(question.phase2.efflux),
                          "{0:.4f}".format(answer.phase2.efflux))
            assert_equals("{0:.7f}".format(question.phase2.t05),
                          "{0:.7f}".format(answer.phase2.t05))
            assert_equals("{0:.7f}".format(question.phase2.r2),
                          "{0:.7f}".format(answer.phase2.r2))
        else:
            assert_equals(question.phase2.slope, answer.phase2.slope)
            assert_equals(question.phase2.intercept, answer.phase2.intercept)
            assert_equals(question.phase2.k, answer.phase2.k)
            assert_equals(question.phase2.r0, answer.phase2.r0)
            assert_equals(question.phase2.efflux, answer.phase2.efflux)
            assert_equals(question.phase2.t05, answer.phase2.t05)
            assert_equals(question.phase2.r2, answer.phase2.r2)

        assert_equals(question.phase1.xs[0], answer.phase1.xs[0])
        assert_equals(question.phase1.xs[1], answer.phase1.xs[1])
        if question.phase1.xs != ('', ''):
            assert_equals("{0:.7f}".format(question.phase1.slope),
                          "{0:.7f}".format(answer.phase1.slope))
            assert_equals("{0:.7f}".format(question.phase1.intercept),
                          "{0:.7f}".format(answer.phase1.intercept))
            assert_equals("{0:.7f}".format(question.phase1.k),
                          "{0:.7f}".format(answer.phase1.k))
            assert_equals("{0:.4f}".format(question.phase1.r0),
                          "{0:.4f}".format(answer.phase1.r0))
            assert_equals("{0:.6f}".format(question.phase1.efflux),
                          "{0:.6f}".format(answer.phase1.efflux))
            assert_equals("{0:.7f}".format(question.phase1.t05),
                          "{0:.7f}".format(answer.phase1.t05))
            assert_equals("{0:.7f}".format(question.phase1.r2),
                          "{0:.7f}".format(answer.phase1.r2))
        else:
            assert_equals(question.phase1.slope, answer.phase1.slope)
            assert_equals(question.phase1.intercept, answer.phase1.intercept)
            assert_equals(question.phase1.k, answer.phase1.k)
            assert_equals(question.phase1.r0, answer.phase1.r0)
            assert_equals(question.phase1.efflux, answer.phase1.efflux)
            assert_equals(question.phase1.t05, answer.phase1.t05)
            assert_equals(question.phase1.r2, answer.phase1.r2)
コード例 #31
0
 def __init__(self):
     self.recherche = Recherche()
     self.fichier = Excel()
     self.idinse = self.recherche.getCommune()
コード例 #32
0
def exit():
    root.destroy()


text1 = tk.Text(root, height=1, width=32)
text1.place(x=220, y=80)

text2 = tk.Text(root, height=1, width=32)
text2.place(x=220, y=120)

# rt=text1.get('1.0',tk.END)
# print(rt)
# rt2=text2.get('1.0',tk.END)
# print(rt2)
button3 = tk.Button(root,
                    text="Start",
                    command=lambda: Excel.on_open(filename1, filename),
                    height=1,
                    width=10).place(x=250, y=180)
button4 = tk.Button(root, text="Exit", command=exit, height=1,
                    width=10).place(x=350, y=180)

# text1.insert(INSERT,filename)
# text1=Entry(root,width=40)

# rt=text1.get("1.0",'end-1c')
#
# print(rt)
root.mainloop()
コード例 #33
0
			x_series of data. Generally elut_ends_parsed.
		@type x_series: [float]
			y_series of data. elut_cpms_log data is curve-stripped forms.
		@type k: float
			Rate constant of the phase (slope *2.303).
		@type t05: float
			Half-life of exchange of the phase (0.693/k).
		@type r0: float
			Rate of radioisotope release from compartment at time = 0 (antilog 
				of intercept).
		@type efflux: float
			Efflux from compartment (r0/SA).
		@rtype: None
		"""
		self.xs = xs  # paired tuple (x, y)
		self.xy1, self.xy2 = xy1, xy2  # Each is a paired tuple
		self.r2, self.slope, self.intercept = r2, slope, intercept
		self.x_series, self.y_series = x_series, y_series
		self.k, self.t05, self.r0, self.efflux = k, t05, r0, efflux

if __name__ == "__main__":
	import Excel
	import os
	directory = os.path.dirname(os.path.abspath(__file__))
	file_path = os.path.join(directory, "Tests/1/Test_SingleRun1.xlsx")
	temp_data = Excel.grab_data(file_path)
	
	temp_analysis = temp_data.analyses[0]
	temp_analysis.kind = 'obj'
	temp_analysis.obj_num_pts = 4
	temp_analysis.analyze()
コード例 #34
0
    FileType = r"GDB"
#-----------------------------------------------------------------------------------

filePath = str(os.path.dirname(
    workspaceEncoding.encode("cp1256"))).decode("cp1256")
ExcelName = filePath + "\\"
ExcelName = ExcelName + Database.decode("cp1256").rsplit(
    ".", 1)[0] + "_" + strftime("%Y%m%d_%H%M%S") + ".xlsx"
#ExcelName = ExcelName +str(CurrentTime.year).encode("cp1256")+"-"+str(CurrentTime.month).encode("cp1256")+"-"+str(CurrentTime.day).encode("cp1256")+".xlsx"
arcpy.AddMessage(ExcelName)
print(ExcelName)
if ExcelName == '#' or not ExcelName:
    ExcelName = r"ExcelName.xlsx"
#print(ExcelName)
#-----------------------------------------------------------------------------------
xls = Excel(ExcelName, FileType, Database)
#-----------------------------------------------------------------------------------
ErorrsMSG = ""
ErrorCount = 0
FCRow = 2
FCCol = 1
BusinessTableRow = 2
BusinessTableCol = 1
BusTableFildesRow = 2
BusTableFildesCol = 1
FCSheet = 1
BusinessTableSheet = 2
FieldsCount = 0
#-----------------------------------------------------------------------------------
datasets = arcpy.ListDatasets("*", "Feature") + ['']
datasets.sort()
コード例 #35
0
		"""Closes windows when 'x' at top is clicked.
		
		@type self: MainFrame
		@type event: Event
		@rtype: None
		"""
		self.Destroy()


if __name__ == '__main__':
	import os
	import Excel

	directory = os.path.dirname(os.path.abspath(__file__))
	file_path = os.path.join(directory, "Tests/3/Test_MultiRun1.xlsx")
	temp_experiment = Excel.grab_data(file_path)
	for analysis in temp_experiment.analyses:
		analysis.kind = 'subj'
		analysis.xs_p1 = (1, 4)
		analysis.xs_p2 = (5, 10)
		analysis.xs_p3 = (11.5, 40)
		analysis.analyze()
	"""
	temp_experiment.analyses[0].kind = 'obj'
	temp_experiment.analyses[0].obj_num_pts = 8
	temp_experiment.analyses[0].analyze()
	"""
	app = wx.PySimpleApp()
	app.frame = MainFrame(temp_experiment)

	app.frame.Show()
コード例 #36
0
    def getPhenoResult(self, mainClassPath):
        print("getPhenoResult(" + mainClassPath + ")")
        listdir = os.listdir(mainClassPath)

        mainClass = mainClassPath.split("/")[-1]
        print("mainClass = " + mainClass)

        #subClassFolderSet = set()
        com_phenoSet = set()
        for lists in listdir:
            # if self.re_SubClass.match(lists):
            #     subClassFolderSet.add(lists)

            if self.re_Com_Pheno.match(lists):
                com_phenoSet.add(lists)

        subClassFolderList = self.mainClass_subClassMap.get(mainClass, None)

        if subClassFolderList is None:
            subClassFolderList = self.defaultSubClassList

        subClassCount = len(subClassFolderList)
        print("subClassCount = " + str(subClassCount))
        if len(com_phenoSet) != 1:
            raise Exception("com_pheno檔案位於 " + mainClassPath + " 路徑下不只一個,請移除或重新命名非必要項目(搜尋規則:com_pheno.*\.[(xlsx)(xls)(csv)])!")

        print("Get Excel File: " + os.path.join(mainClassPath, list(com_phenoSet)[0]))
        excel = Excel(os.path.join(mainClassPath, list(com_phenoSet)[0]).replace("\\","/"), self.blnShowExcel, self.blnForceRestartExcel)

        #excel.get_sheet(1)
        self.getPhoneSheet(excel)

        dataRecoderCount = excel.sheetRowCounts - 1
        print("dataRecoderCount = " + str(dataRecoderCount))

        randomPickLists = randomPickNGroup(dataRecoderCount, subClassCount)
        print ("randomPickList = ", randomPickLists)
        for randomList in randomPickLists:
            print (len(randomList))

        colNamesList = excel.get_rowData(1)
        colNamesString = list2str(colNamesList)
        #range1 = excel.get_range(1, 1, 1 , excel.sheetColCounts)
        print("list2str=", colNamesString)

        case = 0
        #for randomList in randomPickLists:
        for subClassFolder in subClassFolderList:

            phenoPath =  os.path.join(mainClassPath, "pheno_" + str(subClassFolder) + ".csv").replace("\\","/")
            phenoExceptPath = os.path.join(mainClassPath, "pheno_except_" + str(subClassFolder) + ".csv").replace("\\","/")

            csv = CSVFile(phenoPath, "utf-8")
            csv.writeLine(colNamesString)

            print("len(randomPickLists[case] = ", len(randomPickLists[case]))
            for num in randomPickLists[case]:

                rowData = excel.get_rowData(num+1)
                rowData = list(rowData)

                #if not self.isExcelFirstRowFloat:
                for colNum in self.com_pheno_NotFloatCols:
                    mappingColIdx = None
                    if is_integer(colNum):
                        #print("is_integer")
                        mappingColIdx = colNum - 1
                    else:
                        #print("is not integer")
                        m = -1
                        for colName in colNamesList:
                            #print(colName, colNum)
                            m+=1
                            if str(colNum).strip().lower() == colName.strip().lower():
                                mappingColIdx = m

                    #print("mappingColIdx = " + str(mappingColIdx))

                    try:
                        rowData[mappingColIdx] = int(rowData[mappingColIdx])
                    except Exception:
                        print("Change Excel Value Warning(int(rowData[" + str(mappingColIdx) + "])): " + str(rowData[mappingColIdx]), "You can check self.com_pheno_NotFloatCols, ignore this warning  if expected !!")
                        rowData[mappingColIdx] = str(rowData[mappingColIdx])


                csv.writeLine(list2str(rowData))

            csv = CSVFile(phenoExceptPath, "utf-8")
            csv.writeLine(colNamesString)

            differenceList = []
            i = 0
            for it in randomPickLists:
                if i != case:
                    differenceList.extend(it)
                i+=1

            print("differenceList", differenceList)
            print("differenceList len = " + str(len(differenceList)))


            for num in differenceList:
                rowData = excel.get_rowData(num+1)
                rowData = list(rowData)

                #if not self.isExcelFirstRowFloat:
                for colNum in self.com_pheno_NotFloatCols:
                    mappingColIdx = None
                    if is_integer(colNum):
                        #print("is_integer")
                        mappingColIdx = colNum - 1
                    else:
                        #print("is not integer")
                        m = -1
                        for colName in colNamesList:
                            #print(colName, colNum)
                            m+=1
                            if str(colNum).strip().lower() == colName.strip().lower():
                                mappingColIdx = m

                    #print("mappingColIdx = " + str(mappingColIdx))

                    try:
                        rowData[mappingColIdx] = int(rowData[mappingColIdx])
                    except Exception:
                        print("Change Excel Value Warning(int(rowData[" + str(mappingColIdx) + "])): " + str(rowData[mappingColIdx]), "You can check self.com_pheno_NotFloatCols, ignore this warning  if expected !!")
                        rowData[mappingColIdx] = str(rowData[mappingColIdx])


                csv.writeLine(list2str(rowData))

            case+=1

        excel.close()
コード例 #37
0
ファイル: Tests.py プロジェクト: rubenflamshepherd/vaCATE
        else:
            assert_equals(question.phase1.slope, answer.phase1.slope)
            assert_equals(question.phase1.intercept, answer.phase1.intercept)
            assert_equals(question.phase1.k, answer.phase1.k)
            assert_equals(question.phase1.r0, answer.phase1.r0)
            assert_equals(question.phase1.efflux, answer.phase1.efflux)
            assert_equals(question.phase1.t05, answer.phase1.t05)
            assert_equals(question.phase1.r2, answer.phase1.r2)


if __name__ == '__main__':
    import Excel

    directory = os.path.dirname(os.path.abspath(__file__))
    # temp_data = Excel.grab_data(directory, "/Tests/Edge Cases/Test_SubjMissMidPtPh123.xlsx")
    temp_data = Excel.grab_data(directory, "/Tests/4/Test_SingleRun7.xlsx")
    temp_question = temp_data.analyses[0]
    # temp_question.kind = 'subj'
    # temp_question.xs_p1 = (1,3)
    # temp_question.xs_p2 = (4,10)
    # temp_question.xs_p3 = (11.5,40)
    temp_question.kind = 'obj'
    temp_question.obj_num_pts = 8
    temp_question.analyze()

    # temp_exp = grab_answers(directory, "/Tests/Edge Cases/Test_SubjMissMidPtPh123.xlsx", temp_question.run.elut_ends)
    temp_exp = grab_answers(directory, "/Tests/4/Test_SingleRun7.xlsx",
                            temp_question.run.elut_ends)
    temp_answer = temp_exp.analyses[0]

    print "ANSWERS"
コード例 #38
0
    def getAnalysisResult(self, mainClassPath):

        print("getAnalysisResult(" + mainClassPath + ")")
        pearsonResultPath = os.path.join(mainClassPath, "PearsonResult.csv").replace("\\","/")
        listdir = os.listdir(mainClassPath)

        mainClass = mainClassPath.split("/")[-1]
        #mainClassNum = mainClass[0]
        print("mainClass = " + mainClass)
        #print("mainClassNum = " + mainClassNum)
        mainClassDictValue = self.mainClassDict.get(mainClass, None)
        # if mainClassDictValue is None:
        #     mainClassDictValue = self.mainClassDict.get(mainClass, None)

        if mainClassDictValue is None:
            mainClassDictValue = self.defaultSelectProperty

        print("mainClassDictValue = ", mainClassDictValue)
        if mainClassDictValue is None:
            raise Exception("getSlnFiles(" + mainClass + "): 未正確設定欲做為皮爾森分析的屬性,請正確設定 self.mainClassDict 或 self.defaultSelectProperty !!")

        subClassFolderSet = set()
        com_phenoSet = set()
        for lists in listdir:
            if self.re_SubClass.match(lists):
                subClassFolderSet.add(lists)

            if self.re_Com_Pheno.match(lists):
                com_phenoSet.add(lists)

        subClassCount = len(subClassFolderSet)
        print("subClassCount = " + str(subClassCount))
        if len(com_phenoSet) != 1:
            raise Exception("com_pheno檔案位於 " + mainClassPath + " 路徑下不只一個,請移除或重新命名非必要項目(搜尋規則:com_pheno.*\.[(xlsx)(xls)(csv)])!")

        print("Get Excel File: " + os.path.join(mainClassPath, list(com_phenoSet)[0]).replace("\\","/"))
        excel = Excel(os.path.join(mainClassPath, list(com_phenoSet)[0]).replace("\\","/"), self.blnShowExcel, self.blnForceRestartExcel)

        #excel.get_sheet(1)
        self.getPhoneSheet(excel)

        dataRecoderCount = excel.sheetRowCounts - 1
        print("dataRecoderCount = " + str(dataRecoderCount))

        #randomPickLists = randomPickNGroup(dataRecoderCount, subClassCount)

        randomPickDict = {}

        case = 0
        for subClassFolder in subClassFolderSet:
            randomPickList = []
            case+=1
            phone_v_Path = os.path.join(mainClassPath, "pheno_" + str(subClassFolder) + ".csv").replace("\\","/")
            print("Read phone_v_File(" + phone_v_Path + ")")
            csv = CSVFile(phone_v_Path, decoding="utf-8")
            #print(csv.readToString())
            list2D = csv.readTo2DList(",")
            #print list2D
            #print "-------------------------------------"

            mappingColIdx = None
            if is_integer(self.pheno_MappingColId):
                mappingColIdx = self.pheno_MappingColId - 1
            else:
                m = -1
                for colName in list2D[0]:
                    m+=1
                    if colName.strip().lower() == self.pheno_MappingColId.strip().lower():
                        mappingColIdx = m

            #print("mappingColIdx = " + str(mappingColIdx))

            list2D.pop(0)
            #print list2D[0]
            for line in list2D:
                #print(line)
                #print(line[0])'
                if line[mappingColIdx] != "":
                    data = line[mappingColIdx].lower()
                    randomPickList.append(data)

            randomPickDict[subClassFolder] = randomPickList
            pass

        print ("randomPickDict = ", randomPickDict)
        for (k, randomList) in randomPickDict.items():
            print (len(randomList))

        colNamesString = list2str(excel.get_rowData(1))
        #range1 = excel.get_range(1, 1, 1 , excel.sheetColCounts)
        print("list2str=", colNamesString)

        #case = 0

        #subClassesResult = [] #[v1, v2, v3, ...]
        subClassesResult = {} #[v1, v2, v3, ...]
        for subClass in subClassFolderSet:
            result = self.getSlnFiles(excel, mainClassDictValue, mainClassPath, subClass, randomPickDict[subClass])
            #subClassesResult.append(result)
            subClassesResult[subClass] = result
            #case+=1

        print subClassesResult
        excel.close()
        print("++++++++++++++++++++++++++++++++++++++++++++++++")

        csv = CSVFile(pearsonResultPath, "utf-8")
        classCaseNum = 0
        #for slnFilesResult  in subClassesResult:
        for (subClassName, slnFilesResult)  in subClassesResult.items():
            #classRecoderSize = randomPickLists[classCaseNum]
            classRecoderSize = len(randomPickDict[subClassName])


            for lineNum in range(1, classRecoderSize+2):
                strLine = ""
                for sectionNum in range(1, len(mainClassDictValue)+1):
                    #print(slnFilesResult[sectionNum-1][lineNum-1])
                    strLine = strLine + list2str(slnFilesResult[sectionNum-1][lineNum-1]) + ",,"

                csv.writeLine(strLine)

            csv.writeLine("")
            classCaseNum+=1

        #subClassNum=0
        subClassResultDict = OrderedDict()
        for (subClassName, slnFilesResult)  in subClassesResult.items():
            #subClassNum+=1

            slnClassNum=0
            slnResultDict = OrderedDict()
            #subClass_v1
            v_type = slnFilesResult[0][0][2][9:]
            for slnFile in slnFilesResult:
                slnClassNum+=1
                pearsonr_list1 = []
                pearsonr_list2 = []
                #type_9
                select_Type = slnFile[0][5][5:]

                slnFile.pop(0)
                for lineData in slnFile:
                    print("lineData = ", lineData)
                    pearsonr_list1.append(lineData[4])
                    pearsonr_list2.append(lineData[5])

                print("pearsonr_list1(v_type=" + str(v_type) +  ", slnClassNum=" + str(slnClassNum) + ", select_Type=" + str(select_Type) + ") : " , pearsonr_list1)
                print("pearsonr_list2(v_type=" + str(v_type) +  ", slnClassNum=" + str(slnClassNum) + ", select_Type=" + str(select_Type) + ") : " , pearsonr_list2)

                pearson_result = pearsonr(pearsonr_list1, pearsonr_list2)
                pearsonr_list1[:] = []
                pearsonr_list2[:] = []
                print("pearsonr_result(v_type=" + str(v_type) +  ", slnClassNum=" + str(slnClassNum) + ", select_Type=" + str(select_Type) + ") : " + str(pearson_result))
                if str(select_Type) in slnResultDict:
                    raise Exception("select_Type 重複,請檢查 self.mainClassDict 或 self.defaultSelectProperty 是否有重複設定!!")
                slnResultDict[str(select_Type)] = pearson_result

            subClassResultDict[str(v_type)] = slnResultDict

        return subClassResultDict
コード例 #39
0
ファイル: PIA FINAL.py プロジェクト: A-K4RM4/forbidden
#Alejandra Nohemí Tamez Montes      1925345

import os, requests, re
from bs4 import BeautifulSoup as bs
print("Aqui inicia el programa")
import Menu
Menu.funcionMenu()
print("A continuación usando las Expresiones Regulares obtenemos la información significativa")
import RegExp
RegExp.funcionRegExp()
print("A continuación utilizando la Api obtenemos la información del clima de la gira")
import Clima
Clima.funcionClima()
print("A continuación creamos el Excel y almacenamos la información obtenida")
import Excel
Excel.funcionExcel()
print("Aquí termina el programa")













コード例 #40
0
    from  app_channel where LEFT(create_time,10) ='%s'
     GROUP BY  app_channel_name,LEFT(create_time,10)
    ''' % (d2, d2)
    sql2 = '''SELECT COUNT(*), '注册数',LEFT(create_time,10),app_channel_name
    from  account where LEFT(create_time,10) >'%s'  and app_channel_name='hy10'
    GROUP BY  app_channel_name,LEFT(create_time,10)
    UNION ALL
    SELECT COUNT(*), '激活数',LEFT(create_time,10),app_channel_name
    from  app_channel where LEFT(create_time,10)  >'%s'  and app_channel_name='hy10'
     GROUP BY  app_channel_name,LEFT(create_time,10)
    ''' % (d3, d3)
    s = [sql, sql1, sql2]
    sql4 = '''SELECT phone_num  from account    
    '''

    weekbook = Excel.__getWorkbook__()
    for i in xrange(len(s)):

        sheet = weekbook.add_sheet('gj' + str(i), cell_overwrite_ok=True)
        restults = __select__(s[i], conn())
        Excel.__writeExcel__(restults, sheet)

    weekbook.save('scshuju&gjshuju.xls')
    with open('gjzhuce.csv', 'wb') as csvfile:
        writer = csv.writer(csvfile, dialect='excel')
        restults = __select__(sql4, conn())
        writer.writerows(restults)
        csvfile.close()
    send_mail('scshuju&gjshuju.xls', 'gjzhuce.csv.zip')
    #send_mail('gjzhuce.csv.zip')