コード例 #1
0
    system_code = table_row[1].value.upper().strip()  #系统编码 S0001
    if '' != sys_code and system_code != sys_code:
        continue  #对特定系统代码生成:sys_code为空全部生成 (这个判断可以放到前面去)
    # tableName_cn=table_row[4].value
    table_key = tableName_short + '-' + system_code  #表名+系统编码组成字典的key
    if table_key not in distTable.keys():
        tableRcs = []
        distTable[table_key] = []
    distTable[table_key].append(table.row(i))

print(distTable)
print(len(distTable))

row = 0  # 记录行数
tableSum = 0  # 记录表个数
workbook = xlwt.Workbook(encoding='utf-8')
worksheet = workbook.add_sheet("sheet1")
# style = xlwt.easyxf('pattern: pattern solid, fore_colour ice_blue')  单元格样式

for (k, v) in distTable.items():
    access_flag = v[0][2].value  #是否接入
    if not 'Y' == access_flag.upper(): continue

    system_code = v[0][1].value.upper().strip()  #系统编码 S0001
    if '' != sys_code and system_code != sys_code:
        continue  #对特定系统代码生成:sys_code为空全部生成 (这个判断可以放到前面去)

    tableName_short = v[0][3].value.upper().strip()  # 表名 tableName_short=k
    tableName_cn = v[0][4].value.upper().strip()  # 表名中文
    library_name = v[0][5].value.upper().strip()  # 库名
コード例 #2
0
ファイル: mdfreader.py プロジェクト: henesissrl/mdfreader
    def exportToExcel(self, filename=None):
        """Exports mdf data into excel 95 to 2003 file

        Parameters
        ----------------
        filename : str, optional
            file name. If no name defined, it will use original mdf name and path

        Dependencies
        --------------------
        xlwt for python 2.6+
        xlwt3 for python 3.2+

        Notes
        --------
        xlwt is not fast for even for small files, consider other binary formats like HDF5 or Matlab
        If there are more than 256 channels, data will be saved over different worksheets
        Also Excel 203 is becoming rare these days
        """
        try:
            if PythonVersion < 3:
                import xlwt
            else:
                import xlwt3 as xlwt
        except:
            raise ImportError('xlwt module missing')
        if filename is None:
            filename = filename = splitext(self.fileName)[0]
            filename = filename + '.xls'
        styleText = xlwt.easyxf('font: name Times New Roman, color-index black, bold off')
        coding = 'utf-8'
        wb = xlwt.Workbook(encoding=coding)
        channelList = list(self.keys())
        if PythonVersion < 3:
            Units = [self.getChannelUnit(channel).decode(coding, 'replace') for channel in list(self.keys())]
        else:
            Units = [self.getChannelUnit(channel) for channel in list(self.keys())]
        # Excel 2003 limits
        maxCols = 255
        maxLines = 65535
        workbooknumber = int(ceil(len(channelList) * 1.0 / (maxCols * 1.0)))
        tooLongChannels = []
        # split colmuns in several worksheets if more than 256 cols
        for workbook in range(workbooknumber):
            ws = wb.add_sheet('Sheet' + str(workbook))  # , cell_overwrite_ok = True )
            if workbook == workbooknumber - 1:  # last sheet
                columnrange = list(range(workbook * maxCols, len(channelList)))
            elif workbook < workbooknumber - 1 and workbooknumber > 1:  # first sheets
                columnrange = list(range(workbook * maxCols, (workbook + 1) * maxCols))
            for col in columnrange:
                # write header
                ws.write(0, col - workbook * maxCols, channelList[col], styleText)
                ws.write(1, col - workbook * maxCols, Units[col], styleText)
                vect = self.getChannelData(channelList[col])  # data vector
                if not len(vect) > maxLines:
                    if vect.dtype.kind not in ['S', 'U']:  # if not a string or unicode
                        [ws.row(row + 2).set_cell_number(col - workbook * maxCols, vect[row]) for row in list(range(len(vect)))]
                    else:  # it's a string, cannot write for the moment
                        if PythonVersion < 3:
                            try:
                                vect = vect.encode(coding)
                            except:
                                pass
                        [ws.row(row + 2).set_cell_text(col - workbook * maxCols, vect[row]) for row in list(range(len(vect)))]
                else:  # channel too long, written until max Excel line limit
                    if vect.dtype.kind not in ['S', 'U']:  # if not a string
                        [ws.row(row + 2).set_cell_number(col - workbook * maxCols, vect[row]) for row in list(range(maxLines))]
                    else:  # it's a string, cannot write for the moment
                        if PythonVersion < 3:
                            vect = vect.encode(coding)
                        [ws.row(row + 2).set_cell_text(col - workbook * maxCols, vect[row]) for row in list(range(maxLines))]
                    tooLongChannels.append(channelList[col])  # to later warn user the channel is not completely written
        wb.save(filename)  # writes workbook on HDD
        if len(tooLongChannels) > 0:  # if not empty, some channels have been not processed
            print('Following channels were too long to be processed completely, maybe you should resample : ')
            print(tooLongChannels)
コード例 #3
0
import random

import time
instances = [
    'comp01', 'comp02', 'comp03', 'comp04', 'comp05', 'comp06', 'comp07',
    'comp08', 'comp09', 'comp10', 'comp11', 'comp12', 'comp13', 'comp14',
    'comp15', 'comp16', 'comp17', 'comp18', 'comp19', 'comp20', 'comp21'
]

w1 = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]

# ------------ w1 parameter tuning ------------- #

configs.instance_name = "comp01.ectt"

wb = xlwt.Workbook()

ws = wb.add_sheet('w1' + configs.instance_name)

ws.write(0, 0, "W1")
ws.write(0, 1, "Cost")
ws.write(0, 2, "Time")

row = 0
for param_value in w1:
    parameters.w1 = param_value

    for instance in instances:
        configs.instance_name = instance + ".ectt"
        print('\nINSTANCE' + configs.instance_name + '\n')
コード例 #4
0
import xlsxwriter
import pandas as pd

mpl.rcParams['font.sans-serif'] = ['SimHei']

if __name__ == '__main__':
    # 打开名字文件
    name_file = open(
        r"C:\Users\Administrator\Desktop\Python聚类\数据爬取_以文本形式保存\names.txt", "r")
    name_text = name_file.read()
    name_list = name_text.split("\n")  # 保存名字到name_list
    name_file.close()

    item = 0

    book = xlwt.Workbook()  #新建工作簿
    sheet1 = book.add_sheet("sheet1")  #新建工作表
    sheet1.write(0, 0, r"时间\名称")  #保存表头为时间
    for temp in name_list:
        if (temp == ""):
            break
        item = item + 1
        #依次读取文件夹中各个文本数据
        sheet1.write(0, item, temp)
        print(temp)
        with open(
                r'C:\Users\Administrator\Desktop\Python聚类\数据爬取_以文本形式保存\{name}.txt'
                .format(name=temp), 'r') as file:
            file_text = file.read()
            json_text = json.loads(file_text)
            #x = []  # 保存时间坐标
コード例 #5
0
def monthly_excel(year: int, month: int):
    output = BytesIO()

    items = Summary.objects.filter(
        date=datetime.date(year, month, 1)).order_by(
            'employee__first_name',
            'employee__last_name',
        )

    wbk = xlwt.Workbook()

    sheet = wbk.add_sheet("Kopsavilkums %s-%s" % (year, month))
    sheet.write(0, 0, "Kopsavilkums par %s - %s" % (year, month))
    row = 1
    header_row = ('#', 'ID', 'Vārds', 'Uzvārds', 'Pers.kods', 'Līguma NR',
                  'Likme', 'Stundas', 'Alga', 'Procedūru skaits',
                  'Kontaktu skaits')

    for col, value in enumerate(header_row):
        sheet.write(row, col, value)
    row = 2
    for index, item in enumerate(items, start=1):
        salary = round(
            float(item.employee.contract_rate) * item.hours_worked, 2)
        row_values = (
            index,
            item.employee_id,
            item.employee.first_name,
            item.employee.last_name,
            item.employee.ssn,
            item.employee.contract_no,
            item.employee.contract_rate,
            item.hours_worked,
            salary,
            item.total_procedures,
            item.total_contacts,
        )

        for col, value in enumerate(row_values):
            sheet.write(row, col, value)
        row += 1

    for item in items:
        sheet = wbk.add_sheet(slugify(str(item)))
        sheet.write(
            0, 0, "Darbinieka %s visi darbi par %s - %s" %
            (item.employee, year, month))
        row = 1
        header_row = ('#', 'ID', 'No', 'Līdz', 'Stundu skaits', 'Slimnīca',
                      'Nodaļa', 'Procedūru skaits', 'Kontaktu skaits',
                      'Komentāri', 'Pievienots')
        for col, value in enumerate(header_row):
            sheet.write(row, col, value)
        works = Work.objects.filter(employee=item.employee,
                                    start__year=item.date.year,
                                    start__month=item.date.month)
        row = 2
        for index, work in enumerate(works, start=1):
            row_values = (
                index, work.id,
                work.start.astimezone(riga_tz).strftime("%Y-%m-%d %H:%M"),
                work.end.astimezone(riga_tz).strftime("%Y-%m-%d %H:%M"),
                work.hours_worked, work.department.hospital.name,
                work.department.name, work.number_of_procedures,
                work.number_of_contacts, work.comments,
                work.created.astimezone(riga_tz).strftime("%Y-%m-%d %H:%M"))

            for col, value in enumerate(row_values):
                sheet.write(row, col, value)
            row += 1

    wbk.save(output)
    return output
コード例 #6
0
ファイル: exportar.py プロジェクト: mbenyoub/server
    def exportar(self, cr, uid, ids, directory=".", context=None):
        #ExcelExportView.index()
        _cp_path = '/web/export/zb_excel_export'
        result = {}
        company = self.pool.get('res.company').browse(cr, uid, 1, context=context).name


#_________________________________________Busqueda de las variables utilizadas para el reporte_______________________________________________#
        for reporte in self.browse(cr, uid, ids, context=context):
            id = reporte.id              

#_________________________________________Declaracion de variables para realizar la consulta_________________________________________________#        
        debit = 'debit'+reporte.mes
        credit = 'credit'+reporte.mes
        balance = 'balance'+reporte.mes 

#_________________________________________Consulta para sacar el reporte de Balance de comprobacion___________________________________________#
        if reporte.cero == True:
            if reporte.reporte == 'com':
                if reporte.mensual == True:              
                    query = ("select fiscalyear, account_code, account_name, account_level, initial_balance, %s, %s, %s from account_annual_balance where fiscalyear = cast(%s as text) and account_level <= %s"%(debit, credit, balance, reporte.periodo.name,(int(reporte.nivel))))               
                    cr.execute("select fiscalyear, account_code, account_name, account_level, initial_balance, %s, %s, %s from account_annual_balance where fiscalyear = cast(%s as text) and account_level <= %s"%(debit, credit, balance, reporte.periodo.name,(int(reporte.nivel))))
                    resultado = cr.fetchall()
                    registros = cr.rowcount
                    cabeceras = cr.description
                elif reporte.mensual == False:  
                    query = ("select fiscalyear, account_code, account_name, account_level, initial_balance, debit1, credit1, balance1, debit2, credit2, balance2, debit3, credit3, balance3, debit4, credit4, balance4, debit5, credit5, balance5, debit6, credit6, balance6, debit7, credit7, balance7, debit8, credit8, balance8, debit9, credit9, balance9, debit10, credit10, balance10, debit11, credit11, balance11, debit12, credit12, balance12 from account_annual_balance where fiscalyear = cast(%s as text) and account_level <= %s"%(reporte.periodo.name,(int(reporte.nivel))))                                   
                    cr.execute("select fiscalyear, account_code, account_name, account_level, initial_balance, debit1, credit1, balance1, debit2, credit2, balance2, debit3, credit3, balance3, debit4, credit4, balance4, debit5, credit5, balance5, debit6, credit6, balance6, debit7, credit7, balance7, debit8, credit8, balance8, debit9, credit9, balance9, debit10, credit10, balance10, debit11, credit11, balance11, debit12, credit12, balance12 from account_annual_balance where fiscalyear = cast(%s as text) and account_level <= %s"%(reporte.periodo.name,(int(reporte.nivel))))
                    resultado = cr.fetchall()
                    registros = cr.rowcount
                    cabeceras = cr.description
        elif reporte.cero == False:
            if reporte.reporte == 'com':
                if reporte.mensual == True:
                    query = ("select fiscalyear, account_code, account_name, account_level, initial_balance, %s, %s, %s from account_annual_balance where fiscalyear = cast(%s as text) and account_level <= %s and (%s) != 0"%(debit, credit, balance, reporte.periodo.name,(int(reporte.nivel)), balance))                    
                    cr.execute ("select fiscalyear, account_code, account_name, account_level, initial_balance, %s, %s, %s from account_annual_balance where fiscalyear = cast(%s as text) and account_level <= %s and (%s) != 0"%(debit, credit, balance, reporte.periodo.name,(int(reporte.nivel)), balance))
                    resultado = cr.fetchall()
                    registros = cr.rowcount
                    cabeceras = cr.description
                elif reporte.mensual == False:    
                    query = ("select fiscalyear, account_code, account_name, account_level, initial_balance, debit1, credit1, balance1, debit2, credit2, balance2, debit3, credit3, balance3, debit4, credit4, balance4, debit5, credit5, balance5, debit6, credit6, balance6, debit7, credit7, balance7, debit8, credit8, balance8, debit9, credit9, balance9, debit10, credit10, balance10, debit11, credit11, balance11, debit12, credit12, balance12 from account_annual_balance where fiscalyear = cast(%s as text) and account_level <= %s and (balance1 + balance2 + balance3 + balance4 + balance5 + balance6 + balance7 + balance8 + balance9 + balance10 + balance11 + balance12) != 0"%(reporte.periodo.name,(int(reporte.nivel))))
                    cr.execute("select fiscalyear, account_code, account_name, account_level, initial_balance, debit1, credit1, balance1, debit2, credit2, balance2, debit3, credit3, balance3, debit4, credit4, balance4, debit5, credit5, balance5, debit6, credit6, balance6, debit7, credit7, balance7, debit8, credit8, balance8, debit9, credit9, balance9, debit10, credit10, balance10, debit11, credit11, balance11, debit12, credit12, balance12 from account_annual_balance where fiscalyear = cast(%s as text) and account_level <= %s and (balance1 + balance2 + balance3 + balance4 + balance5 + balance6 + balance7 + balance8 + balance9 + balance10 + balance11 + balance12) != 0"%(reporte.periodo.name,(int(reporte.nivel))))
                    resultado = cr.fetchall()
                    registros = cr.rowcount
                    cabeceras = cr.description

#_________________________________________Consulta para sacar el reporte de Balance General_________________________________________________#                    

            elif reporte.reporte == 'bal':
                query = ("""select a.cuenta, a.nombre, a.saldo from (select 'A' as f, 'Activo' as cuenta, null as nombre, null as saldo from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id
                union
                select 'B', code, account_account.name, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 1111001000 and cast(code as bigint) <= 1153003000 and account_move.state != 'draft' group by code, account_account.name
                union
                select 'B1', 'Total Circulante', null, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 1111001000 and cast(code as bigint) <= 1153003000 and account_move.state != 'draft'
                union
                select 'C', code, account_account.name, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 1211001000 and cast(code as bigint) <= 1212015000 and account_move.state != 'draft' group by code, account_account.name
                union
                select 'C1', 'Total Fijo', null, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 1211001000 and cast(code as bigint) <= 1212015000 and account_move.state != 'draft'
                union
                select 'D', code, account_account.name, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 1221001000 and cast(code as bigint) <= 1291004000 and account_move.state != 'draft' group by code, account_account.name
                union
                select 'D1', 'Total Diferido', null, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 1221001000 and cast(code as bigint) <= 1291004000 and account_move.state != 'draft'
                union 
                select 'E', 'Suma Activo', null, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 1111001000 and cast(code as bigint) <= 1291004000 and account_move.state != 'draft'
                union
                select 'F', 'Pasivo', null, null from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id
                union
                select 'G' as f, code as cod, account_account.name as nom, sum(debit-credit) as bal from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 2111001000 and cast(code as bigint) <= 2202123000 and account_move.state != 'draft' group by code, account_account.name
                union
                select 'G1', 'Total Corto Plazo', null, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 2111001000 and cast(code as bigint) <= 2202123000 and account_move.state != 'draft'
                union
                select 'H' as f, code as cod, account_account.name as nom, sum(debit-credit) as bal from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 2511001000 and cast(code as bigint) <= 2591001000 and account_move.state != 'draft' group by code, account_account.name
                union
                select 'H1', 'Total Largo Plazo', null, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 2511001000 and cast(code as bigint) <= 2591001000 and account_move.state != 'draft'
                union
                select 'I', 'Suma Pasivo', null, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 2111001000 and cast(code as bigint) <= 2591001000 and account_move.state != 'draft'
                union
                select 'J', 'Capital', null, null from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id
                union
                select 'K' as f, code as cod, account_account.name as nom, sum(debit-credit) as bal from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 3111001000 and cast(code as bigint) <= 3911003000 and account_move.state != 'draft' group by code, account_account.name
                union
                select 'K1', 'Suma Capital', null, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 3111001000 and cast(code as bigint) <= 3911003000 and account_move.state != 'draft'
                union
                select 'L', 'Suma Pasivo+Capital', null, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 2111001000 and cast(code as bigint) <= 3911003000 and account_move.state != 'draft'
                ) as a order by f""".format(reporte.periodo.name))
                cr.execute("""select a.cuenta, a.nombre, a.saldo from (select 'A' as f, 'Activo' as cuenta, null as nombre, null as saldo from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id
                union
                select 'B', code, account_account.name, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 1111001000 and cast(code as bigint) <= 1153003000 and account_move.state != 'draft' group by code, account_account.name
                union
                select 'B1', 'Total Circulante', null, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 1111001000 and cast(code as bigint) <= 1153003000 and account_move.state != 'draft'
                union
                select 'C', code, account_account.name, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 1211001000 and cast(code as bigint) <= 1212015000 and account_move.state != 'draft' group by code, account_account.name
                union
                select 'C1', 'Total Fijo', null, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 1211001000 and cast(code as bigint) <= 1212015000 and account_move.state != 'draft'
                union
                select 'D', code, account_account.name, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 1221001000 and cast(code as bigint) <= 1291004000 and account_move.state != 'draft' group by code, account_account.name
                union
                select 'D1', 'Total Diferido', null, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 1221001000 and cast(code as bigint) <= 1291004000 and account_move.state != 'draft'
                union 
                select 'E', 'Suma Activo', null, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 1111001000 and cast(code as bigint) <= 1291004000 and account_move.state != 'draft'
                union
                select 'F', 'Pasivo', null, null from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id
                union
                select 'G' as f, code as cod, account_account.name as nom, sum(debit-credit) as bal from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 2111001000 and cast(code as bigint) <= 2202123000 and account_move.state != 'draft' group by code, account_account.name
                union
                select 'G1', 'Total Corto Plazo', null, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 2111001000 and cast(code as bigint) <= 2202123000 and account_move.state != 'draft'
                union
                select 'H' as f, code as cod, account_account.name as nom, sum(debit-credit) as bal from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 2511001000 and cast(code as bigint) <= 2591001000 and account_move.state != 'draft' group by code, account_account.name
                union
                select 'H1', 'Total Largo Plazo', null, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 2511001000 and cast(code as bigint) <= 2591001000 and account_move.state != 'draft'
                union
                select 'I', 'Suma Pasivo', null, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 2111001000 and cast(code as bigint) <= 2591001000 and account_move.state != 'draft'
                union
                select 'J', 'Capital', null, null from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id
                union
                select 'K' as f, code as cod, account_account.name as nom, sum(debit-credit) as bal from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 3111001000 and cast(code as bigint) <= 3911003000 and account_move.state != 'draft' group by code, account_account.name
                union
                select 'K1', 'Suma Capital', null, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 3111001000 and cast(code as bigint) <= 3911003000 and account_move.state != 'draft'
                union
                select 'L', 'Suma Pasivo+Capital', null, sum(debit-credit) from account_move_line inner join account_account on account_move_line.account_id = account_account.id inner join account_move on account_move_line.move_id = account_move.id where extract(year from account_move.date)=cast({0} as int) and cast(code as bigint) >= 2111001000 and cast(code as bigint) <= 3911003000 and account_move.state != 'draft'
                ) as a order by f""".format(reporte.periodo.name))
                resultado = cr.fetchall()
                registros = cr.rowcount
                cabeceras = cr.description

#_________________________________________Consulta para sacar el reporte de Estado de resultados______________________________________________#                
            elif reporte.reporte == 'est':     
                query = ("""select a.cuenta, a.nombre, a.periodo, a.acumulado from (select 'A' as f, 'Ingresos' as cuenta, null as nombre, null as periodo, null as acumulado from account_account
                union
                select 'B', code, name, (select (sum(debit-credit)*-1) from account_move_line where account_id=account_account.id and extract(month from date)={1} and extract(year from date)={0}),(select (sum(debit-credit)*-1) from account_move_line where account_id=account_account.id and extract(month from date)<={1} and extract(year from date)={0}) from account_account where cast(code as bigint) >= 5111001000 and cast(code as bigint) <= 5114001000 or cast(code as bigint) >= 9211001000 and cast(code as bigint) <= 9212008000 group by id, code, name
                union
                select 'B1', 'Total Ingresos', null, (select (sum(debit-credit)*-1) from account_move_line inner join account_account on account_move_line.account_id = account_account.id where extract(month from date)={1} and extract(year from date)={0} and cast(code as bigint) >= 5111001000 and cast(code as bigint) <= 5114001000 or cast(code as bigint) >= 9211001000 and cast(code as bigint) <= 9212008000), (select (sum(debit-credit)*-1) from account_move_line inner join account_account on account_move_line.account_id = account_account.id where extract(month from date)<={1} and extract(year from date)={0} and cast(code as bigint) >= 5111001000 and cast(code as bigint) <= 5114001000 or cast(code as bigint) >= 9211001000 and cast(code as bigint) <= 9212008000)
                union
                select 'C' as f, 'Egresos' as cuenta, null as nombre, null as periodo, null as acumulado from account_account
                union
                select 'D', code, name, (select (sum(debit-credit)*-1) from account_move_line where account_id=account_account.id and extract(month from date)={1} and extract(year from date)={0}),(select (sum(debit-credit)*-1) from account_move_line where account_id=account_account.id and extract(month from date)<={1} and extract(year from date)={0}) from account_account where cast(code as bigint) >= 6111001000 and cast(code as bigint) <= 9113099000 group by id, code, name
                union
                select 'D1', 'Total Egresos', null, (select (sum(debit-credit)*-1) from account_move_line inner join account_account on account_move_line.account_id = account_account.id where extract(month from date)={1} and extract(year from date)={0} and cast(code as bigint) >= 6111001000 and cast(code as bigint) <= 9113099000),(select (sum(debit-credit)*-1) from account_move_line inner join account_account on account_move_line.account_id = account_account.id where extract(month from date)<={1} and extract(year from date)={0} and cast(code as bigint) >= 6111001000 and cast(code as bigint) <= 9113099000)
                union
                select 'E', 'Utilidad o Perdida', null, (select (sum(debit-credit)*-1) from account_move_line inner join account_account on account_move_line.account_id = account_account.id where extract(month from date)={1} and extract(year from date)={0} and cast(code as bigint) >= 5111001000 and cast(code as bigint) <= 9212008000), (select (sum(debit-credit)*-1) from account_move_line inner join account_account on account_move_line.account_id = account_account.id where extract(month from date)<={1} and extract(year from date)={0} and cast(code as bigint) >= 5111001000 and cast(code as bigint) <= 9212008000)
                ) as a where a.acumulado is not null order by f""".format(reporte.periodo.name, reporte.mes))

#, null, (sum(debit-credit)*-1) from account_move_line inner join account_account on account_move_line.account_id = account_account.id where extract(month from date)<={1} and extract(year from date)={0} and cast(code as bigint) >= 5111001000 and cast(code as bigint) <= 5114001000 or cast(code as bigint) >= 9211001000 and cast(code as bigint) <= 9212008000
#, null, (sum(debit-credit)*-1) from account_move_line inner join account_account on account_move_line.account_id = account_account.id where extract(month from date)<={1} and extract(year from date)={0} and cast(code as bigint) >= 6111001000 and cast(code as bigint) <= 9113099000

                cr.execute(query)#"""select a.cuenta, a.nombre, a.periodo, a.acumulado from (select 'A' as f, 'Ingresos' as cuenta, null as nombre, null as periodo, null as acumulado from account_account
                #union
                #select 'B', code, name, (select (sum(debit-credit)*-1) from account_move_line where account_id=account_account.id and extract(month from date)={1} and extract(year from date)={0}),(select (sum(debit-credit)*-1) from account_move_line where account_id=account_account.id and extract(month from date)<={1} and extract(year from date)={0}) from account_account where cast(code as bigint) >= 5111001000 and cast(code as bigint) <= 5114001000 or cast(code as bigint) >= 9211001000 and cast(code as bigint) <= 9212008000 group by id, code, name
                #union
                #select 'B1', 'Total Ingresos', null, null, (sum(debit-credit)*-1) from account_move_line inner join account_account on account_move_line.account_id = account_account.id where extract(month from date)<={1} and extract(year from date)={0} and cast(code as bigint) >= 5111001000 and cast(code as bigint) <= 5114001000 or cast(code as bigint) >= 9211001000 and cast(code as bigint) <= 9212008000
                #union
                #select 'C' as f, 'Egresos' as cuenta, null as nombre, null as periodo, null as acumulado from account_account
                #union
                #select 'D', code, name, (select (sum(debit-credit)*-1) from account_move_line where account_id=account_account.id and extract(month from date)={1} and extract(year from date)={0}),(select (sum(debit-credit)*-1) from account_move_line where account_id=account_account.id and extract(month from date)<={1} and extract(year from date)={0}) from account_account where cast(code as bigint) >= 6111001000 and cast(code as bigint) <= 9113099000 group by id, code, name
                #union
                #select 'D1', 'Total Egresos', null, null, (sum(debit-credit)*-1) from account_move_line inner join account_account on account_move_line.account_id = account_account.id where extract(month from date)<={1} and extract(year from date)={0} and cast(code as bigint) >= 6111001000 and cast(code as bigint) <= 9113099000
                #union
                #select 'E', 'Utilidad o Perdida', null, null, (sum(debit-credit)*-1) from account_move_line inner join account_account on account_move_line.account_id = account_account.id where extract(month from date)<={1} and extract(year from date)={0} and cast(code as bigint) >= 5111001000 and cast(code as bigint) <= 9113099000
                #) as a where a.acumulado is not null order by f""".format(reporte.periodo.name, reporte.mes))
                resultado = cr.fetchall()
                registros = cr.rowcount
                cabeceras = cr.description
        
#__________________________________________________Variable de formato de consulta____________________________________________________________#
        if reporte.reporte == 'est':
            report = 'Estado de resultados'
        elif reporte.reporte == 'bal':
            report = 'Balance General'
        elif reporte.reporte == 'com':
            report = 'Balanza de comprobacion'

        com = {}
        com[0] = {0:report, 1:'Periodo '+reporte.periodo.name, 2:'Mes '+reporte.mes, 3:company}

        outputquery = "COPY ({0}) TO STDOUT WITH CSV HEADER".format(query)
        with open('tmp/excel.txt', 'w')  as f:
            writer = csv.writer(f, delimiter=',')
            cr.copy_expert(outputquery, f)
        with open('tmp/excel.txt', 'r') as l:
            data = l.read()
        fil = data.split('\n')
#______________________________________________________Creacion del archivo PDF_______________________________________________________________#
   #      pdf = FPDF('L', 'mm', 'Letter')
   #      fila = fil[0].split(',')     
   #      pdf.add_page()
   #      pdf.set_font('Arial', 'B', 12)

   #      for num in range(registros):
   #          fila = resultado[num]
   #          for index, col in enumerate(fila):
   #              if num == 0:
   #                  if index>0 and index%9==0:
   #                      pdf.add_page()
   #                      pdf.set_font('Arial', 'B', 12)  
   #                  cab = cabeceras[index]
   #                  pdf.cell(30, 0, str(cab[0]), 0, 0, 'C')
   #              else:   
   #                  pdf.set_font('Arial','',10)
   #                  fila = resultado[num-1]
   #                  celda = fila[index]
   #                  pdf.cell(30, 0, str(celda), 0, 0, 'C')
   #          if num+1 == registros:
   #              pdf.cell(50, 5, "", 0, 1)
   #              for index, col in enumerate(fila):  
   #                  pdf.set_font('Arial','',10)
   #                  fila = resultado[num]
   #                  celda = fila[index]
   #                  pdf.cell(30, 0, str(celda), 0, 0, 'C')
   #          pdf.cell(50, 5, "", 0, 1) 
   #      pdf.close()
   #      pdf.output("tmp/pdf.pdf","F")
   # # Se agrega otra celda próxima a la anterior pero con texto centrado
   #      #pdf.cell(80, 10, "Muy cerca de ti!", 1, 1, "C")
   #      with open("tmp/pdf.pdf", "rb") as f:
   #          data = f.read()
   #      pdf = data.encode("base64")  
   #      #return libro
   #      thispdf = self.browse(cr, uid, ids, context=context)[0]
   #      # mods = sorted(map(lambda m: m.name, this.modules)) or ['all']
   #      # lang = this.lang if this.lang != NEW_LANG_KEY else False
   #      filenamepdf = 'new'
   #      # if lang:
   #      #     filename = get_iso_codes(lang)
   #      # elif len(mods) == 1:
   #      # filename = mods[0]
   #      extensionpdf = "pdf"#this.format
   #      # if not lang and extension == 'po':
   #      #     extension = 'pot'
   #      namepdf = "%s.%s" % (filenamepdf, extensionpdf)
   #      thispdf.write({'bpdf': pdf, 'namepdf': namepdf})
    # Se cierra el documento y se escribe
        #pdf.output("tmp/pdf.pdf","F")
        #with open('tmp/excel.csv', 'w')  as f:
        #    writer = csv.writer(f, delimiter=',')
        #    cr.copy_expert(outputquery, f)

        #filepdf = open('tmp/excel.csv', 'r')
        #outfile = open('tmp/pdf.pdf', 'w+b')
        #pisaStatus = pisa.CreatePDF(filepdf, dest=outfile)
         #pdf = base64.encodestring(pisaStatus)

#______________________________________________________Creacion del archivo XLS_______________________________________________________________#        
        libro = xlwt.Workbook()
        libro1 = libro.add_sheet("Consulta")
        txt = "Fila %s, Columna %s"

        for num in range(registros):
            row = libro1.row(num)
            fila = resultado[num]
            if num == 0:
                fila = com[num]
                for index, col in enumerate(fila):
                    colunm = libro1.col(col)
                    colunm.width = 256 * 30  
                    comp = com[0]
                    row.write(index, comp[col])
            else:
                for index, col in enumerate(fila):
                    if num == 1:
                        cab = cabeceras[index]
                        row.write(index, cab[0])
                    elif num>1:    
                        fila = resultado[num-2]
                        celda = fila[index]
                        row.write(index, celda)
            if num+2 == registros:
                row = libro1.row(num+2)
                for index, col in enumerate(fila):
                    fila = resultado[num]
                    celda = fila[index]
                    row.write(index, celda)        
            if num+1 == registros:
                row = libro1.row(num+2)
                for index, col in enumerate(fila):
                    fila = resultado[num]
                    celda = fila[index]
                    row.write(index, celda)

        fp = StringIO.StringIO()
        libro.save(fp)
        fp.seek(0)        
        data = fp.read()
        fp.close()
        out = base64.encodestring(data)
        libro.save('tmp/Excel.pdf')
  
        #return libro
        this = self.browse(cr, uid, ids, context=context)[0]
        # mods = sorted(map(lambda m: m.name, this.modules)) or ['all']
        # lang = this.lang if this.lang != NEW_LANG_KEY else False
        filename = 'new'
        # if lang:
        #     filename = get_iso_codes(lang)
        # elif len(mods) == 1:
        # filename = mods[0]
        extension = "xls"#this.format
        # if not lang and extension == 'po':
        #     extension = 'pot'
        name = "%s.%s" % (filename, extension)
        this.write({'binario': out, 'name': name})
コード例 #7
0
def evaluate(X_train_list, Y_train_list, X_all_list, Y_all_list, evaluate_train, model_path):
    tf.reset_default_graph()

    x = tf.placeholder(tf.float32, [None, 224, 224, 1])
    y_ = tf.placeholder(tf.float32, [None, classes])
    training = tf.placeholder(tf.bool)

    fc7, logits = VggNet(x, 1.0, False)
    right_number = comput_right(logits, y_)

    saver = tf.train.Saver()
    train_features = []
    test_features = []
    with tf.Session() as sess:
        print('Start to restore model')
        saver.restore(sess,
                      model_path)  # './model_saving/resnet_softmax/2018-08-11_00.35/osteoporosis_classifier'  self.model_save_path

        if evaluate_train == True:
            print('Start to evaluate train data')
            train_patch_right_num_all = 0
            # original_right_all = 0
            if train_size % train_batch_size == 0:
                for i in range(int(train_size / train_batch_size)):
                    batch_xs, batch_ys = next_batch(X_train_list, Y_train_list, train_batch_size, i, train_size)
                    #*************
                    logits_feature = sess.run(fc7,
                                              feed_dict={x: batch_xs, y_: batch_ys,
                                                         training: False})
                    train_features.append((logits_feature))
                    # **********************
                    right_num = sess.run(right_number,
                                         feed_dict={x: batch_xs, y_: batch_ys, training: False})
                    print(i, '/', int(train_size / train_batch_size), right_num)

                    train_patch_right_num_all += right_num

                print('patch_right_num_all:', train_patch_right_num_all)
                train_patch_acc = train_patch_right_num_all / train_size
                print('patch: train', '%s accuracy', train_patch_acc)
                # **********************
                train_features = np.array(train_features)
                train_features = train_features.reshape([-1, 128])
                book = xlwt.Workbook()
                sheet1 = book.add_sheet(u'sheet1', cell_overwrite_ok=True)
                for i in range(train_features.shape[0]):
                    for j in range(train_features.shape[1]):
                        sheet1.write(i, j, float(train_features[i][j]))
                book.save('./vggnet_featureNew.xls')
                # **********************
        print('Start to evaluate test data')
        test_patch_right_num_all = 0
        original_right_all = 0
        tp = 0
        tn = 0
        if test_size % test_batch_size == 0:

            for i in range(int(test_size / test_batch_size)):
                batch_xs, batch_ys = next_batch(X_all_list, Y_all_list, test_batch_size, i, test_size)
                logits_feature = sess.run(fc7,
                                          feed_dict={x: batch_xs, y_: batch_ys,
                                                     training: False})

                test_features.append((logits_feature))

                patch_right_num = sess.run(right_number,
                                           feed_dict={x: batch_xs, y_: batch_ys, training: False})
                print(i, '/', int(test_size / test_batch_size), '   ', patch_right_num)
                # if patch_right_num > all_batch_size / 2:
                #     original_right_all += 1
                #     if i < int((test_size / test_batch_size) / 2):
                #         tn += 1
                #     else:
                #         tp += 1
                # test_patch_right_num_all += patch_right_num
            test_features = np.array(test_features)
            test_features = test_features.reshape([-1, 128])
            book = xlwt.Workbook()
            sheet1 = book.add_sheet(u'sheet1', cell_overwrite_ok=True)
            for i in range(test_features.shape[0]):
                for j in range(test_features.shape[1]):
                    sheet1.write(i, j, float(test_features[i][j]))
            book.save('./vggnet_featureTest.xls')
        # if test_size % test_batch_size == 0:
        #     for i in range(int(test_size / test_batch_size)):
        #         batch_xs, batch_ys = next_batch(X_test_list, Y_test_list, test_batch_size, i, test_size)
        #         patch_right_num = sess.run(right_number,
        #                                    feed_dict={x: batch_xs, y_: batch_ys, training: False})
        #         print(i, '/', int(test_size / test_batch_size), '   ', patch_right_num)
        #         if patch_right_num > test_batch_size / 2:
        #             original_right_all += 1
        #             if i < int((test_size / test_batch_size) / 2):
        #                 tn += 1
        #             else:
        #                 tp += 1
        #         test_patch_right_num_all += patch_right_num
        #     fp = int((test_size / test_batch_size) / 2) - tp
        #     fn = int((test_size / test_batch_size) / 2) - tn
        #
        #     PPV = tp / (tp + fp)
        #     TPR = tp / (tp + fn)
        #     F1_score = 2 * tp / (2 * tp + fp + fn)
        #
        #     print('test_patch_right_num_all:', test_patch_right_num_all)
        #     print('original_right_all:', original_right_all)
        #     patch_acc = test_patch_right_num_all / test_size
        #     original_acc = original_right_all / (test_size / test_batch_size)
        #     print('test_patch_accuracy', patch_acc)
        #     print('test_original_accuracy', original_acc)
        #     print('PPV:', PPV)
        #     print('TPR:', TPR)
        #     print('F1_score:', F1_score)
    sess.close()
コード例 #8
0
def export_base_project(project_id):
    """ 
    导出单个项目为 word
    """
    # 查询项目名称
    sql = "SELECT name from `tk_chapter` WHERE id = %s"
    project_name = db.select_one(sql, (project_id))["name"]

    # 查询项目的章节
    sql = "SELECT id, name from `tk_chapter` WHERE pid = %s"
    chapters = db.select_all(sql, (project_id))

    # 查询章节对应的知识点
    for i, item in enumerate(chapters):
        sql = "SELECT id, name from `tk_chapter` WHERE pid = %s"
        knows = db.select_all(sql, (item["id"]))

        # 查询知识点对应的题目
        for j, know in enumerate(knows):
            sql = "SELECT `id`, `title`,`content`,`select`,`answer` from `tk_questions` WHERE chapter_id = %s"
            questions = db.select_all(sql, (know["id"]))
            knows[j]["questions"] = questions

        chapters[i]["child"] = knows

    excel = xlwt.Workbook()
    sheet = excel.add_sheet("Sheet1")
    headers = [
        "序号", "考题标题", "考题科目", "考题科目", "考题类型", "考题答案", "选项1", "选项2", "选项3",
        "选项4", "选项5", "图片", "解析", "关键词"
    ]
    # 写标题行
    row_header = sheet.row(0)
    for i in range(len(headers)):
        row_header.write(i, headers[i])

    number = 1  # 序号
    for chapter in chapters:
        for know in chapter["child"]:
            for question in know["questions"]:
                row = sheet.row(number)

                # 提取题目类型
                question_type = re.findall(r"\((.+)\)", question["title"])[0]
                # 提取答案和解析
                try:
                    [answer, analyze
                     ] = re.findall(r"^正确答案:([A-F]+)[\s\S]*?\(单击隐藏\)(.*)$",
                                    question["answer"],
                                    re.DOTALL | re.UNICODE)[0]
                except IndexError:
                    answer = ""
                    analyze = re.findall(r"\(单击隐藏\)([\s\S]*)$",
                                         question["answer"],
                                         re.DOTALL | re.UNICODE)[0].strip()

                selects = question["select"].split("\n")
                selects = list(
                    map(lambda select: re.sub("^[A-F] ", "", select), selects))
                if len(selects) < 5:
                    selects += ["" for x in range(0, 5 - len(selects))]

                item = [
                    number,
                    question["content"],  # 考题标题
                    chapter["name"],  # 章节
                    know["name"],  # 知识点
                    question_type,  # 考题类型 
                    answer,  # 考题答案
                    *selects,  # 5个选项
                    "",  # 图片
                    analyze,  # 解析
                    "",  # 关键词
                ]
                for cell in range(len(item)):
                    row.write(cell, item[cell])

                number += 1

    filename = project_name.strip().replace("/", " ") + ".xlsx"
    excel.save(filename)
コード例 #9
0
        value + 1
    except TypeError:
        return False
    else:
        return True


#将学号密码导入数组
for row in range(1, nrows):
    cell_value0 = sheet1.cell(row, 1).value
    cell_value1 = sheet1.cell(row, 0).value
    cell_value1 = int(cell_value1)
    uname.append(cell_value1)
    if isNum(cell_value0):
        cell_value0 = int(cell_value0)
        pwd.append(str(cell_value0))
    else:
        pwd.append(cell_value0)
#print(uname)
idu = 0
bk = xlwt.Workbook()  #打开工作簿
sheet = bk.add_sheet('sheet totall', cell_overwrite_ok=True)  #创建工作表
bk.save('D:\\test.xls')
#获取每个人的无课表情况
for un in uname:

    test = loginin(un, pwd[idu])
    test.login()
    test.nameInTable(idu)

    idu = idu + 1
コード例 #10
0
 def __init__(self, path='/tmp/report.xls'):
     self.wbk = xlwt.Workbook()
     self.path = path
コード例 #11
0
ファイル: vercount.py プロジェクト: frankewq/defect-sta
	else:
		state = 2
#	print '%s, %s, %s, %s' % (id, module, level, state)
	# 避免插入重复记录
	cursor.execute('SELECT * FROM buglist WHERE id=?', (id,))
	res = cursor.fetchone()
	if res == None:
		cursor.execute('INSERT INTO buglist (id, module, level,category, state, ctime) VALUES (?, ?, ?, ?, ?, ?)', (id, module, level, category, state, ctime))
	else:
		print "Something wrong with db!"

buglist_db.commit()


# 写入Excel文件
wb = xlwt.Workbook(encoding = 'ascii')
ws = wb.add_sheet('Count graphic')


#标题
ws.write(0, 0, "MODULE(ALL)")
ws.write(0, 1, "COUNT")
ws.write(0, 2, "MODULE(SER)")
ws.write(0, 3, "COUNT")
ws.write(0, 4, "STATE")
ws.write(0, 5, "COUNT")
ws.write(0, 6, "LEVEL")
ws.write(0, 7, "COUNT")
ws.write(0, 8, "CTIME")
ws.write(0, 9, "COUNT")
ws.write(0, 10, "CATEGORY")
コード例 #12
0
    def write_dataCollect(self):
        style0 = xlwt.easyxf('font: name Times New Roman',
                             num_format_str='#,##0.00')

        wb = xlwt.Workbook()
        ws = wb.add_sheet('A Test Sheet', cell_overwrite_ok=True)

        Row0 = [
            u'设备编号', u'被试编号', u'出生日期', u'被试姓名', u'被试性别', u'利手', u'登录时间',
            u'测试条件', u'练习或正式实验', u'行数序号', u'单行用时', u'单行错误次数', u'总用时', u'总错误次数'
        ]

        #设置第一行
        for i in range(len(Row0)):
            ws.write(0, i, Row0[i], style0)

        #设置被试编号
        testID = unicode(self.task.testID)
        equimentID = unicode(self.task.equimentID)
        bornDate = unicode(self.task.bornDate)
        testName = unicode(self.task.testName)
        Sex = unicode(self.task.Sex)
        Hand = unicode(self.task.Hand)
        signupTime = unicode(self.task.signUpTime)

        ws.write(1, 0, equimentID, style0)
        ws.write(1, 1, testID, style0)
        ws.write(1, 2, bornDate, style0)
        ws.write(1, 3, testName, style0)
        ws.write(1, 4, Sex, style0)
        ws.write(1, 5, Hand, style0)
        ws.write(1, 6, signupTime, style0)

        #任务类型
        typeList = [
            "10ms*100ms", "15ms*100ms", "20ms*100ms", "10ms*200ms",
            "15ms*200ms", "20ms*200ms", "10ms*300ms", "15ms*300ms",
            "20ms*300ms"
        ]
        typeListTemp = [
            "10_1", "15_1", "20_1", "10_2", "15_2", "20_2", "10_3", "15_3",
            "20_3"
        ]

        #每一种任务类型的长度
        typeLengthList = []

        typeLengthList.append(len(self.task.practiceTimeDic["10_1"]) + 5)
        typeLengthList.append(len(self.task.practiceTimeDic["15_1"]) + 5)
        typeLengthList.append(len(self.task.practiceTimeDic["20_1"]) + 5)

        typeLengthList.append(len(self.task.practiceTimeDic["10_2"]) + 5)
        typeLengthList.append(len(self.task.practiceTimeDic["15_2"]) + 5)
        typeLengthList.append(len(self.task.practiceTimeDic["20_2"]) + 5)

        typeLengthList.append(len(self.task.practiceTimeDic["10_3"]) + 5)
        typeLengthList.append(len(self.task.practiceTimeDic["15_3"]) + 5)
        typeLengthList.append(len(self.task.practiceTimeDic["20_3"]) + 5)

        #打印各种任务类型
        j = 1
        for i in range(0, 9):
            ws.write(j, 7, typeList[i], style0)
            j = j + typeLengthList[i]

        #打印练习或正式实验,打印行数序号,单行用时,单行错误次数
        length = 1
        for i in range(0, 9):
            lop = typeLengthList[i] - 5
            lof = 1

            for k in range(typeLengthList[i]):
                if (k < lop):
                    tem = (k + 1)

                    ptime = (self.task.practiceTimeDic[typeListTemp[i]][k])
                    perror = (self.task.practiceErrorDic[typeListTemp[i]][k])
                    ws.write(length, 8, u'练习', style0)
                    ws.write(length, 9, tem, style0)
                    ws.write(length, 10, ptime, style0)
                    ws.write(length, 11, perror, style0)
                else:
                    tem = lof
                    ftime = (self.task.formalTimeDic[typeListTemp[i]][lof - 1])
                    ferror = (self.task.formalErrorDic[typeListTemp[i]][lof -
                                                                        1])

                    ws.write(length, 8, u'正式实验', style0)
                    ws.write(length, 9, tem, style0)
                    ws.write(length, 10, ftime, style0)
                    ws.write(length, 11, ferror, style0)
                    lof = lof + 1

                length = length + 1

        #计算练习,正式实验总用时,总错误次数
        length1 = 0
        for k in range(0, 9):
            #计算练习用时
            sum1 = 0
            length1 = length1 + len(self.task.practiceTimeDic[typeListTemp[k]])
            for i in range(len(self.task.practiceTimeDic[typeListTemp[k]])):
                sum1 = sum1 + self.task.practiceTimeDic[typeListTemp[k]][i]

            # 计算练习错误次数
            sum2 = 0
            for i in range(len(self.task.practiceErrorDic[typeListTemp[k]])):
                sum2 = sum2 + self.task.practiceErrorDic[typeListTemp[k]][i]

            ws.write(length1, 12, sum1, style0)
            ws.write(length1, 13, sum2, style0)

            #计算正式实验用时
            sum1 = 0

            for i in range(5):
                sum1 = sum1 + self.task.practiceTimeDic[typeListTemp[k]][
                    length1 + i]

            length1 = length1 + 5
            # 计算正式实验错误次数
            sum2 = 0
            for i in range(5):
                sum2 = sum2 + self.task.formalErrorDic[typeListTemp[k]][i]

            ws.write(length1, 12, sum1, style0)
            ws.write(length1, 13, sum2, style0)

        testID = unicode(self.task.testID)
        testName = unicode(self.task.testName)

        #练习和正式实验数据表格,存放路径D盘
        str = "d:\\" + testID + "_" + testName + ".xls"
        wb.save(str)  # 保存文件
コード例 #13
0
ファイル: 16.py プロジェクト: Fayostyle/spiderstudy
import xlwt

with open('numbers.txt', 'r', encoding='utf-8') as f:
    data = f.read()
    _numbers = eval(data)
    numbers = list()
    for i in range(3):
        numbers.extend(_numbers[i])
    row = len(numbers) // len(_numbers)

file = xlwt.Workbook()
table = file.add_sheet('numbers')
for i in range(len(numbers)):
    table.write(i // row, i % row, numbers[i])
file.save('numbers.xls')
コード例 #14
0
ファイル: template.py プロジェクト: braimp/pySpider-2
def phangetObj(url):
    driver =webdriver.PhantomJS(executable_path="phantomjs.exe")   
    #使用浏览器请求页面
    driver.get(url)
    #加载3秒,等待所有数据加载完毕
    time.sleep(3)
    #通过id来定位元素,
    #.text获取元素的文本数据
    # dl=driver.find_elements_by_css_selector("#searchLeftOptions")
    pageSource=driver.page_source
    bsObj=BeautifulSoup(pageSource,"html.parser")
    crawl(bsObj,f,city,indu,r)   
    driver.close()

import xlwt
#写入excel
data=xlwt.Workbook()
table=data.add_sheet(indu.replace("/","_"),cell_overwrite_ok=True)
r=0
row=[]
for i in range(0,len(row)):
    table.write(r,i,row[i])
data.save("test.xls")


#解析本地html文件
from bs4 import BeautifulSoup
bsObj = BeautifulSoup(open('2016.html','r',encoding='utf-8').read(),'html.parser')


def link_analysis():
    """
    和隨機網絡相比,判斷目標網絡的每一條連結的拓樸連結屬性:BOND/sink/local bridge of layer #n/global bridge
    """
    global path, times, quick, separation

    # 剖析包含目錄及檔案名稱的 path 變數,並分別儲存成目錄 head、檔案主要名稱 tail 及副檔名 ext
    root, ext = os.path.splitext(path)
    head, tail = os.path.split(root)

    # 假如目標網絡存在,則讀入記憶體,並計算其平均最短路徑,當作稍後計算基礎
    if os.path.exists(path) & os.path.isfile(path):
        debugmsg('read and analyse the target network...')
        # 打開 Pajek 網絡檔案,並轉換成無向圖
        G = nx.Graph(nx.read_pajek(path))
        # 設定開始的第一個 component graph 的編號為 0
        compNo = 0
        for g in nx.connected_component_subgraphs(G):

            # 如果這個 component 的節點數為 1 的話,則不做任何事
            #if len(g.edges()) == 0: continue
            if g.order() == 1 or g.size() == 1: continue

            # 計算平均最短路徑
            g.graph[GRAPH_KEY_SHORTEST_PATH] = nx.average_shortest_path_length(
                g)
            # component 的名稱等同 component 的編號
            g.name = compNo
            compNo += 1

            # 決定每個節點要外看幾層,決定強弱連結
            layers = max(
                1,
                int(min((g.graph[GRAPH_KEY_SHORTEST_PATH] / 2.0), separation))
                if quick else int(g.graph[GRAPH_KEY_SHORTEST_PATH] / 2.0))

            # 計算任意兩個有邊相連的節點的每一層共同朋友的正規化比率
            compute_link_property(g, layers)

            t_start = time.time()
            t_ttl = 0
            rgs = []
            # 產生供比較對應用的 times 個隨機網絡
            for c in xrange(times):
                rg_shelve_path = root + '_' + str(compNo) + '_' + str(
                    c) + '_shelve.obj'
                rg_path = root + '_' + str(compNo) + '_' + str(c) + ext
                rg_name = 'random_network_' + str(g.name) + '_' + str(c)
                # 如果第 c 個隨機網絡過去已經產生過,則直接開檔讀取,否則重新建立一個隨機網絡
                if os.path.exists(rg_shelve_path) & os.path.isfile(
                        rg_shelve_path):
                    debugmsg('read and analyse the random network #' + str(c) +
                             ' from shelve file ' + rg_shelve_path + '...')
                    sf = shelve.open(rg_shelve_path)
                    rg = sf[rg_name]
                else:
                    if os.path.exists(rg_path) & os.path.exists(rg_path):
                        debugmsg('read and analyse the random network #' +
                                 str(c) + ' from pajek file ' + rg_path +
                                 '...')
                        rg = compute_link_property(
                            nx.connected_component_subgraphs(
                                nx.Graph(nx.read_pajek(rg_path)))[0], layers)
                    else:
                        debugmsg(
                            'create, analyse and write the random network #' +
                            str(c) + ' to pajek file ' + rg_path + '...')
                        rg = g.copy()
                        rg.graph['name'] = rg_name
                        if g.number_of_edges() > 2:
                            nx.connected_double_edge_swap(
                                rg, g.number_of_edges())
                        compute_link_property(rg, layers)
                        nx.write_pajek(rg, rg_path)
                    rg.remove_nodes_from(rg.nodes())
                    sf = shelve.open(rg_shelve_path)
                    sf[rg_name] = rg
                sf.close()
                rgs.append(rg)
                t_inc = time.time() - t_start
                t_ttl += t_inc
                debugmsg('+--- * Time spent (increment, total): (%f, %f)' %
                         (t_inc, t_ttl))
                t_start = time.time()
            times = len(rgs)

            debugmsg('generate a threshold for BOND/bridge link analysis...')
            for i in xrange(layers):
                l = str(i + 1)
                g.graph[GRAPH_KEY_AVG_LIST + l] = []
                g.graph[GRAPH_KEY_STD_LIST + l] = []
                for j in xrange(times):
                    g.graph[GRAPH_KEY_AVG_LIST + l].append(
                        rgs[j].graph[GRAPH_KEY_AVG_COMMON_NODES + l])
                    g.graph[GRAPH_KEY_STD_LIST + l].append(
                        rgs[j].graph[GRAPH_KEY_STD_COMMON_NODES + l])
                g.graph[GRAPH_KEY_THRESHOLD_R1 + l] = scipy.mean(
                    g.graph[GRAPH_KEY_AVG_LIST + l]) + 2 * scipy.mean(
                        g.graph[GRAPH_KEY_STD_LIST + l])
                if g.graph[GRAPH_KEY_THRESHOLD_R1 + l] > 1:
                    g.graph[GRAPH_KEY_THRESHOLD_R1 + l] = 1.0

            debugmsg(
                'assess the link property of every edge of the target network...'
            )
            # phase 1: identify the sink links
            g.graph[SINK] = 0
            g.graph[BOND] = 0
            g.graph[LOCAL_BRIDGE] = 0
            g.graph[GLOBAL_BRIDGE] = 0
            for s, t in g.edges_iter(data=False):
                if (g.degree(s) == 1) | (g.degree(t) == 1):
                    g[s][t][EDGE_KEY_LAYER + '0'] = SINK
                    g[s][t][EDGE_KEY_NEXT_STEP] = STOP
                    g[s][t][EDGE_KEY_WIDTH] = SINK_BASIC_WIDTH
                    g[s][t][EDGE_KEY_COLOR] = SINK_COLOR
                    g.graph[SINK] += 1
                else:
                    g[s][t][EDGE_KEY_NEXT_STEP] = PASS

            # phase 2: identify the BOND/local bridge links on every layer
            for i in xrange(layers):
                l = -(i + 1)
                n = str(i + 1)
                g.graph[GRAPH_KEY_PASS_TO_NEXT_LAYER + n] = []
                for s, t in g.edges_iter(data=False):
                    if g[s][t][EDGE_KEY_NEXT_STEP] == STOP:
                        g[s][t][EDGE_KEY_LAYER + n] = g[s][t][EDGE_KEY_LAYER +
                                                              str(i)]
                    elif g[s][t][l] >= g.graph[GRAPH_KEY_THRESHOLD_R1 + n]:
                        g[s][t][EDGE_KEY_LAYER + n] = BOND
                        g[s][t][EDGE_KEY_NEXT_STEP] = STOP
                        g[s][t][EDGE_KEY_WIDTH] = (layers - i +
                                                   1) * BOND_BASIC_WIDTH
                        g[s][t][EDGE_KEY_COLOR] = BOND_COLOR
                        g.graph[BOND] += 1
                    else:
                        g[s][t][EDGE_KEY_LAYER +
                                n] = LOCAL_BRIDGE + ' of layer ' + n
                        g[s][t][EDGE_KEY_WIDTH] = (layers - i +
                                                   1) * BRIDGE_BASIC_WIDTH
                        g[s][t][EDGE_KEY_COLOR] = LOCAL_BRIDGE_COLOR
                        g.graph[GRAPH_KEY_PASS_TO_NEXT_LAYER + n].append(
                            g[s][t][l])

                if len(g.graph[GRAPH_KEY_PASS_TO_NEXT_LAYER + n]) == 0:
                    g.graph[GRAPH_KEY_THRESHOLD_R2 + n] = 0
                else:
                    g.graph[GRAPH_KEY_THRESHOLD_R2 + n] = scipy.mean(
                        g.graph[GRAPH_KEY_PASS_TO_NEXT_LAYER + n]) - scipy.std(
                            g.graph[GRAPH_KEY_PASS_TO_NEXT_LAYER + n])
                    if g.graph[GRAPH_KEY_THRESHOLD_R2 + n] < 0:
                        g.graph[GRAPH_KEY_THRESHOLD_R2 + n] = 0.0
                    for s, t in g.edges_iter(data=False):
                        if g[s][t][EDGE_KEY_NEXT_STEP] == PASS:
                            if g[s][t][l] > g.graph[GRAPH_KEY_THRESHOLD_R2 +
                                                    n]:
                                g[s][t][EDGE_KEY_NEXT_STEP] = STOP
                                g.graph[LOCAL_BRIDGE] += 1

            # phase 3: identify the global links
            for s, t in g.edges_iter(data=False):
                if g[s][t][EDGE_KEY_NEXT_STEP] == PASS:
                    g[s][t][EDGE_KEY_LAYER + n] = GLOBAL_BRIDGE
                    g[s][t][EDGE_KEY_WIDTH] = BRIDGE_BASIC_WIDTH
                    g[s][t][EDGE_KEY_COLOR] = GLOBAL_BRIDGE_COLOR
                    g.graph[GLOBAL_BRIDGE] += 1

            # extra phase 4: identify the node entropy
            ns = []
            nc = []
            g.graph[GRAPH_KEY_EDGE_CLASS] = {
                BOND: g.graph[BOND],
                LOCAL_BRIDGE: g.graph[LOCAL_BRIDGE],
                GLOBAL_BRIDGE: g.graph[GLOBAL_BRIDGE]
            }
            g.graph[GRAPH_KEY_ENTROPY] = entropy(
                g.graph[GRAPH_KEY_EDGE_CLASS].values())
            for s in g.nodes_iter(data=False):
                g.node[s][NODE_KEY_EDGE_CLASS] = g.graph[
                    GRAPH_KEY_EDGE_CLASS].copy()
                for t in nx.neighbors(g, s):
                    for key in g.node[s][NODE_KEY_EDGE_CLASS].keys():
                        if g[s][t][EDGE_KEY_LAYER +
                                   str(layers)].startswith(key):
                            g.node[s][NODE_KEY_EDGE_CLASS][key] -= 1
                g.node[s][NODE_KEY_NEW_ENTROPY] = entropy(
                    g.node[s][NODE_KEY_EDGE_CLASS].values())
                g.node[s][NODE_KEY_INFORMATION_GAIN] = max(
                    0, g.graph[GRAPH_KEY_ENTROPY] -
                    g.node[s][NODE_KEY_NEW_ENTROPY])
                ns.append(g.node[s][NODE_KEY_INFORMATION_GAIN])
                nc.append([
                    REGULAR_NODE_COLOR, IMPORTANT_NODE_COLOR, SUPER_NODE_COLOR
                ][max(0,
                      int(math.ceil(g.node[s][NODE_KEY_INFORMATION_GAIN])))])
            ns_avg = scipy.mean(ns)
            if not ns_avg == 0:
                ns = [
                    NODE_SIZE_BASE + NODE_SIZE * (value / ns_avg)
                    for value in ns
                ]

            # extra phase 5: save the network fingerprint into a file
            nfp_shelve_path = 'network_fingerprints.obj'
            if os.path.exists(nfp_shelve_path) & os.path.isfile(
                    nfp_shelve_path):
                sf = shelve.open(nfp_shelve_path)
                finger_prints = sf['finger_prints']
            else:
                sf = shelve.open(nfp_shelve_path)
                finger_prints = {}
            d = float(g.graph[BOND] + g.graph[LOCAL_BRIDGE] +
                      g.graph[GLOBAL_BRIDGE] + g.graph[SINK])
            network_name = root + '_' + str(compNo)
            finger_prints[network_name] = {
                0: round(g.graph[BOND] / d, 4),
                1: round(g.graph[LOCAL_BRIDGE] / d, 4),
                2: round(g.graph[GLOBAL_BRIDGE] / d, 4),
                3: round(g.graph[SINK] / d, 4)
            }
            corr_table = {}
            for net_name1, net_series1 in finger_prints.items():
                corr_table[net_name1] = {}
                for net_name2, net_series2 in finger_prints.items():
                    corr_table[net_name1][net_name2] = numpy.corrcoef(
                        net_series1.values(), net_series2.values())[0, 1]
            sf['corr_table'] = corr_table
            sf['finger_prints'] = finger_prints
            sf.close()

            debugmsg('write the analysis results to a pajek file...')
            ng = nx.Graph()
            ng.add_nodes_from(g.nodes())
            ng.add_edges_from(g.edges(data=True))
            ng.graph['name'] = root + '_' + str(compNo) + '_result' + ext
            nx.write_pajek(ng, root + '_' + str(compNo) + '_result' + ext)

            debugmsg('write the analysis results to a excel file...')
            # Phase 1: write texphe analysis results of the target network to the sheet1
            book = xlwt.Workbook()
            s1 = book.add_sheet('target network')
            s2 = book.add_sheet(str(times) + ' random networks')
            s3 = book.add_sheet('node information')
            si = xlwt.Style.easyxf(
                'font: name Arial, colour dark_red, bold True; alignment: horizontal left;'
            )
            st = xlwt.Style.easyxf(
                'font: name Arial, colour dark_red, bold True; alignment: horizontal center;'
            )
            sb = xlwt.Style.easyxf('font: name Arial, colour dark_blue;')

            # phase 1.1: create the heading data of the analysis report
            row = 5
            col = 7
            s1.write(0, 0, 'number of nodes = ' + str(g.number_of_nodes()), si)
            s1.write(1, 0, 'number of edges = ' + str(g.number_of_edges()), si)
            s1.write(
                2, 0, 'average degree = ' +
                str(g.number_of_edges() * 2.0 / g.number_of_nodes()), si)
            s1.write(3, 0, 'diameter = ' + str(nx.diameter(g)), si)
            s1.write(
                4, 0, 'average shortest path = ' +
                str(round(g.graph[GRAPH_KEY_SHORTEST_PATH], 4)), si)
            s1.write(
                5, 0, 'average clustering coefficient = ' +
                str(round(nx.average_clustering(g), 4)), si)
            s1.write(
                6, 0, 'degree assortativity coefficient = ' +
                str(round(nx.degree_assortativity_coefficient(g), 4)), si)
            s1.write(
                7, 0, 'BOND = ' + str(g.graph[BOND]) + " (" +
                str(100 * round(float(g.graph[BOND]) / g.size(), 4)) + '%)',
                si)
            s1.write(
                8, 0, 'sink = ' + str(g.graph[SINK]) + " (" +
                str(100 * round(float(g.graph[SINK]) / g.size(), 4)) + '%)',
                si)
            s1.write(
                9, 0, 'local bridge = ' + str(g.graph[LOCAL_BRIDGE]) + " (" +
                str(100 * round(float(g.graph[LOCAL_BRIDGE]) / g.size(), 4)) +
                '%)', si)
            s1.write(
                10, 0,
                'global bridge = ' + str(g.graph[GLOBAL_BRIDGE]) + " (" +
                str(100 * round(float(g.graph[GLOBAL_BRIDGE]) / g.size(), 4)) +
                '%)', si)
            s1.write(11, 0, 'entropy = ' + str(g.graph[GRAPH_KEY_ENTROPY]), si)
            s1.write(row - 1, col - 6, 'st.sp', st)
            s1.write(row - 1, col - 5, 'avg.sp', st)
            s1.write(row - 1, col - 4, 's.cc', st)
            s1.write(row - 1, col - 3, 't.cc', st)
            s1.write(row - 1, col - 2, 'source', st)
            s1.write(row - 1, col - 1, 'target', st)
            for i in xrange(layers):
                s1.write(
                    row - 3, col + (i * 2), 'R1 = ' +
                    str(round(g.graph[GRAPH_KEY_THRESHOLD_R1 + str(i + 1)],
                              4)), si)
                s1.write(
                    row - 2, col + (i * 2), 'R2 = ' +
                    str(round(g.graph[GRAPH_KEY_THRESHOLD_R2 + str(i + 1)],
                              4)), si)
                s1.write(row - 1, col + (i * 2), 'intersection weight', st)
                s1.write(row - 1, col + (i * 2) + 1, 'layer ' + str(i + 1), st)

            # phase 1.2: create the body data of the analysis report
            for s, t in g.edges_iter(data=False):
                s1.write(row, col - 4, round(nx.clustering(g, s), 2), sb)
                s1.write(row, col - 3, round(nx.clustering(g, t), 2), sb)
                s1.write(row, col - 2, s, sb)
                s1.write(row, col - 1, t, sb)

                for i in xrange(layers):
                    s1.write(row, col + (i * 2), round(g[s][t][-(i + 1)], 4),
                             sb)
                    if (i == 0):
                        s1.write(row, col + (i * 2) + 1,
                                 g[s][t][EDGE_KEY_LAYER + str(i + 1)], sb)
                    elif (g[s][t][EDGE_KEY_LAYER + str(i + 1)] !=
                          g[s][t][EDGE_KEY_LAYER + str(i)]):
                        s1.write(row, col + (i * 2) + 1,
                                 g[s][t][EDGE_KEY_LAYER + str(i + 1)], sb)
                    else:
                        s1.write(row, col + (i * 2) + 1, '...', sb)
                row += 1

            # phase 2: write the analysis results of the random networks to the sheet2
            row = 5
            col = 3
            for i in xrange(layers):
                l = str(i + 1)
                s2.write(row - 2, col + (i * 4), 'layer ' + l, st)
                s2.write(row - 1, col + (i * 4), 'AVG', st)
                s2.write(row - 1, col + (i * 4) + 1, 'STD', st)
                for j in xrange(times):
                    s2.write(row + j, col + (i * 4),
                             rgs[j].graph[GRAPH_KEY_AVG_COMMON_NODES + l], sb)
                    s2.write(row + j, col + (i * 4) + 1,
                             rgs[j].graph[GRAPH_KEY_STD_COMMON_NODES + l], sb)

            # extra phase 3: write the analysis results of node entropy
            row = 1
            col = 1
            now = 1
            s3.write(row, col + 0, 'node', st)
            s3.write(row - 1, col + 1, 'o.entropy = ', st)
            s3.write(row, col + 1, 'n.entropy', st)
            s3.write(row - 1, col + 2, g.graph[GRAPH_KEY_ENTROPY], sb)
            s3.write(row, col + 2, 'gain', st)
            s3.write(row - 1, col + 3, g.graph[GRAPH_KEY_EDGE_CLASS][BOND], sb)
            s3.write(row, col + 3, 'BOND', st)
            s3.write(row - 1, col + 4,
                     g.graph[GRAPH_KEY_EDGE_CLASS][LOCAL_BRIDGE], sb)
            s3.write(row, col + 4, 'local bridge', st)
            s3.write(row - 1, col + 5,
                     g.graph[GRAPH_KEY_EDGE_CLASS][GLOBAL_BRIDGE], sb)
            s3.write(row, col + 5, 'global bridge', st)
            s3.write(row, col + 6, 'avg', st)
            s3.write(row + 1, col + 6, ns_avg, sb)
            for s in g.nodes_iter(data=False):
                s3.write(row + now, col + 0, s, sb)
                s3.write(row + now, col + 1, g.node[s][NODE_KEY_NEW_ENTROPY],
                         sb)
                s3.write(row + now, col + 2,
                         g.node[s][NODE_KEY_INFORMATION_GAIN], sb)
                s3.write(row + now, col + 3,
                         g.node[s][NODE_KEY_EDGE_CLASS][BOND], sb)
                s3.write(row + now, col + 4,
                         g.node[s][NODE_KEY_EDGE_CLASS][LOCAL_BRIDGE], sb)
                s3.write(row + now, col + 5,
                         g.node[s][NODE_KEY_EDGE_CLASS][GLOBAL_BRIDGE], sb)
                now += 1

            book.save(root + '_' + str(compNo) + '_result.xls')

            debugmsg('draw the analysis results of the target network...')
            # phase 1: draw the target network
            if path in SPECIAL_NETWORKS:
                pos = {
                    seq_no: (float(g.node[seq_no]['posx']),
                             float(g.node[seq_no]['posy']))
                    for seq_no in g.nodes_iter()
                }
            else:
                pos = nx.spring_layout(g, pos=nx.circular_layout(g))
            if show_the_major_result == True:
                fig_no = 10
                net_fig1 = plot.figure(
                    fig_no,
                    figsize=(4, 6) if path in SPECIAL_NETWORKS else (6, 6),
                    dpi=200,
                    facecolor='white')
                if path in SPECIAL_NETWORKS:
                    bb_width = [
                        0.1 if g[s][t][EDGE_KEY_LAYER + n] == BOND else 0.5
                        for (s, t) in g.edges_iter(data=False)
                    ]
                    ns = [0.5 for n in g.nodes_iter(data=False)]
                else:
                    bb_width = [
                        g[s][t][EDGE_KEY_WIDTH]
                        for (s, t) in g.edges_iter(data=False)
                    ]
                    plot.title('target network = ' + tail)
                bb_color = [
                    g[s][t][EDGE_KEY_COLOR]
                    for (s, t) in g.edges_iter(data=False)
                ]
                plot.axis('off')
                plot.xticks(())
                plot.yticks(())
                net_fig1.set_tight_layout(True)
                if path in SPECIAL_NETWORKS:
                    nx.draw_networkx(g,
                                     with_labels=False,
                                     pos=pos,
                                     node_size=ns,
                                     linewidths=0.5,
                                     edge_color=bb_color,
                                     width=bb_width)
                else:
                    nx.draw_networkx(g,
                                     pos=pos,
                                     linewidths=0,
                                     width=bb_width,
                                     node_size=ns,
                                     node_color=nc,
                                     font_size=8,
                                     edge_color=bb_color)
                plot.savefig(root + '_' + str(compNo) + '_result.png', dpi=600)

            # phase 1.1: draw the detail analysis result of the target network
            if show_the_detailed_result == True:
                for i in xrange(layers):
                    l = i + 1
                    sub_edge_label = dict()
                    for s, t in g.edges_iter(data=False):
                        sub_edge_label[(s, t)] = round(g[s][t][-l], 3)
                    net_sub_fig = plot.figure(fig_no + l,
                                              figsize=(12, 8),
                                              facecolor='white')
                    plot.title(
                        'target network = ' + tail + ' (layer ' + str(l) +
                        ', R1 = ' + str(
                            round(g.graph[GRAPH_KEY_THRESHOLD_R1 +
                                          str(i + 1)], 4)) + ', R2 = ' +
                        str(
                            round(g.graph[GRAPH_KEY_THRESHOLD_R2 +
                                          str(i + 1)], 4)) + ')')
                    plot.xticks(())
                    plot.yticks(())
                    net_sub_fig.set_tight_layout(True)
                    nx.draw_networkx(g,
                                     pos=pos,
                                     linewidths=0,
                                     width=bb_width,
                                     node_size=ns,
                                     node_color=nc,
                                     font_size=8,
                                     edge_color=bb_color)
                    nx.draw_networkx_edge_labels(g,
                                                 pos=pos,
                                                 edge_labels=sub_edge_label,
                                                 font_size=6)
                    plot.savefig(root + '_' + str(compNo) + '_result_layer_' +
                                 str(l) + '.png')

            # phase 2: show betweenness centrality for edges
            if show_the_betweenness_result == True:
                eb = nx.edge_betweenness_centrality(g)
                for s, t in eb:
                    eb[(s, t)] = round(eb[(s, t)], 3)
                bn_width = [
                    0.5 +
                    ((eb[(s, t)] - min(eb.values())) / scipy.std(eb.values()))
                    for (s, t) in g.edges_iter(data=False)
                ]
                net_fig2 = plot.figure(20, figsize=(12, 8), facecolor='white')
                plot.title('Target network = ' + tail +
                           ' (betweenness centrality for edges)')
                plot.xticks(())
                plot.yticks(())
                net_fig2.set_tight_layout(True)
                nx.draw_networkx(g,
                                 pos=pos,
                                 linewidths=0,
                                 width=bn_width,
                                 node_size=ns,
                                 node_color=nc,
                                 font_size=8)
                nx.draw_networkx_edge_labels(g,
                                             pos=pos,
                                             edge_labels=eb,
                                             font_size=6)
                plot.savefig(root + '_' + str(compNo) +
                             '_result (edge betweenness).png')

            # phone 3: show pagerank-based weighting for edges
            if show_the_pagerank_result == True:
                pg = nx.Graph()
                pg.add_nodes_from(g.edges())
                for pair in pg.nodes():
                    for vertex in pair:
                        for node in g.neighbors(vertex):
                            if (vertex, node) in g.edges():
                                if not pair == (vertex, node):
                                    pg.add_edge(pair, (vertex, node))
                            else:
                                if not pair == (node, vertex):
                                    pg.add_edge(pair, (node, vertex))
                pr = nx.pagerank(pg, max_iter=2000)
                for s, t in pr:
                    pr[(s, t)] = round(pr[(s, t)], 4)
                pg_width = [
                    (pr[(s, t)] - min(pr.values())) / scipy.std(pr.values())
                    for (s, t) in g.edges_iter(data=False)
                ]
                net_fig3 = plot.figure(30, figsize=(12, 8), facecolor='white')
                plot.title('Target network = ' + tail +
                           ' (pagerank-based weighting for edges)')
                plot.xticks(())
                plot.yticks(())
                net_fig3.set_tight_layout(True)
                nx.draw_networkx(g,
                                 pos=pos,
                                 linewidths=0,
                                 width=pg_width,
                                 node_size=ns,
                                 node_color=nc,
                                 font_size=8)
                nx.draw_networkx_edge_labels(g,
                                             pos=pos,
                                             edge_labels=pr,
                                             font_size=6)
                plot.savefig(root + '_' + str(compNo) +
                             '_result (pagerank-based).png')

            # phase 4: show the result of network clustering
            if show_the_major_clustering_result == True:
                fig_no = 40
                sg = network_clustering(g, layers)
                ncc_map = {}
                color_count = 1
                for v in g.nodes_iter(data=False):
                    if not g.node[v][NODE_KEY_GROUP_NUMBER] in ncc_map:
                        ncc_map[g.node[v][NODE_KEY_GROUP_NUMBER]] = color_count
                        color_count += 1
                ncc = [
                    ncc_map[g.node[v][NODE_KEY_GROUP_NUMBER]]
                    for v in g.nodes_iter(data=False)
                ]
                net_fig4 = plot.figure(fig_no,
                                       figsize=(12, 8),
                                       facecolor='white')
                plot.title('Target network = ' + tail + ' (clustering result)')
                plot.xticks(())
                plot.yticks(())
                net_fig4.set_tight_layout(True)
                nx.draw_networkx(g,
                                 pos=pos,
                                 linewidths=0,
                                 width=bb_width,
                                 node_color=ncc,
                                 vmin=min(ncc),
                                 vmax=max(ncc),
                                 cmap=plot.cm.Dark2,
                                 font_size=8,
                                 edge_color=bb_color)
                plot.savefig(root + '_' + str(compNo) +
                             '_result (network clustering).png')
                if show_the_detailed_clustering_result == True:
                    for key, sub_g in sg.items():
                        if type(sub_g) == list:
                            show_g = nx.Graph()
                            for sub_c in sub_g:
                                show_g.add_nodes_from(sub_c.nodes())
                                show_g.add_edges_from(sub_c.edges())
                        else:
                            show_g = sub_g
                        fig_no += 1
                        net_sub_fig = plot.figure(fig_no,
                                                  figsize=(12, 8),
                                                  facecolor='white')
                        plot.title('Target network = ' + tail + ' (' + key +
                                   ')')
                        plot.xticks(())
                        plot.yticks(())
                        net_sub_fig.set_tight_layout(True)
                        nx.draw_networkx(show_g,
                                         pos=pos,
                                         linewidths=0,
                                         font_size=8)
                        plot.savefig(root + '_' + str(compNo) + '_result (' +
                                     key + ').png')

            plot.show()

        plot.close('all')
        return (g, rgs)
    else:
        return -1
コード例 #16
0
def write_to_excel(my_list, WritefilePath):
    wb = xlwt.Workbook()
    ws = wb.add_sheet("Invalid Links")
    for i in range(len(my_list)):
        ws.write(i,0,my_list[i])
    wb.save(WritefilePath)
コード例 #17
0
def rec2excel(r,
              ws,
              formatd=None,
              rownum=0,
              colnum=0,
              nanstr='NaN',
              infstr='Inf'):
    """
    save record array r to excel xlwt worksheet ws
    starting at rownum.  if ws is string like, assume it is a
    filename and save to it

    start writing at rownum, colnum

    formatd is a dictionary mapping dtype name -> mlab.Format instances

    nanstr is the string that mpl will put into excel for np.nan value
    The next rownum after writing is returned
    """

    autosave = False
    if cbook.is_string_like(ws):
        filename = ws
        wb = excel.Workbook()
        ws = wb.add_sheet('worksheet')
        autosave = True

    if formatd is None:
        formatd = dict()

    formats = []
    font = excel.Font()
    font.bold = True

    stylehdr = excel.XFStyle()
    stylehdr.font = font

    for i, name in enumerate(r.dtype.names):
        dt = r.dtype[name]
        format = formatd.get(name)
        if format is None:
            format = mlab.defaultformatd.get(dt.type, mlab.FormatObj())

        format = xlformat_factory(format)
        ws.write(rownum, colnum + i, name, stylehdr)
        formats.append(format)

    rownum += 1

    ind = np.arange(len(r.dtype.names))
    for row in r:

        for i in ind:
            val = row[i]
            format = formats[i]
            val = format.toval(val)
            if mlab.safe_isnan(val):
                ws.write(rownum, colnum + i, nanstr)
            elif mlab.safe_isinf(val):
                sgn = np.sign(val)
                if sgn > 0: s = infstr
                else: s = '-%s' % infstr
                ws.write(rownum, colnum + i, s)
            elif format.xlstyle is None:
                ws.write(rownum, colnum + i, val)
            else:
                ws.write(rownum, colnum + i, val, format.xlstyle)
        rownum += 1

    if autosave:
        wb.save(filename)
    return rownum
コード例 #18
0
def getPhone():
    ssl._create_default_https_context = ssl._create_unverified_context
    shop_url = 'https://ds.appgrowing.cn/api/shop/rank?timeType=range&startDate=2020-06-05&endDate=2020-06-11&page=1&limit=20&sort=-quantitySoldIncr'
    #product_url='https://ds.appgrowing.cn/api/shop/product/rank?startDate=2020-05-12&endDate=2020-06-10&timeType=range&page=1&limit=20&sort=-quantitySoldIncr&isNew=false&matchType=product&shopId=ea4316bbe439555656e10718f3e1af73'
    headers2 = {
        'authority': 'ds.appgrowing.cn',
        'method': 'GET',
        'path':
        '/api/leaflet/mt?startDate=2019-12-12&endDate=2020-06-08&order=-_score&isExact=false&keyword=%E8%A1%A3%E6%9C%8D&page=1&limit=60',
        'scheme': 'https',
        'accept': 'application/json, text/plain, */*',
        'accept-encoding': 'gzip, deflate',
        'accept-language': 'zh-CN,zh;q=0.9',
        'cookie':
        'GA1.2.1801623842.1591588262; NPS_Dialog-287498=gAAAAABeVISjTuGs3BvpNcZnvYK0uCs_XAnrmilhczWzvEG0_A1lGEg8OcVSP-W33R3amK0bTxCOFlZJH2ClU--kaPnwLVfxQw==; _gid=GA1.2.1024763389.1591848138; AG_Token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpZCI6ImU3ZDRjYTdiLWYzNjQtMzgxZS04N2YyLTU2MGVmNzYzMzYyMyIsImFjYyI6Mjg3NDk4LCJleHAiOjE1OTQ0NDAxNDgsImlhdCI6MTU5MTg0ODE0OX0.rgbSUZWcFHF3C1quieFrp6o2qqhHjG328I8VUW-b_TY; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22287498%22%2C%22%24device_id%22%3A%22172920d8f7b465-0ba3bc5efd71c3-58133018-1327104-172920d8f7c60c%22%2C%22props%22%3A%7B%22%24latest_referrer%22%3A%22https%3A%2F%2Fyoucloud.com%2Fservice%2Finfo%2F%3Fyoumi_aff%22%2C%22%24latest_traffic_source_type%22%3A%22%E5%BC%95%E8%8D%90%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%7D%2C%22first_id%22%3A%22172920d8f7b465-0ba3bc5efd71c3-58133018-1327104-172920d8f7c60c%22%7D',
        'referer':
        'https://ds.appgrowing.cn/leaflet?startDate=2019-12-12&endDate=2020-06-08&order=-_score&isExact=false&keyword=%E8%A1%A3%E6%9C%8D&page=1',
        'user-agent':
        'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.14 Safari/537.36',
        'cache-control': 'no-cache',
        'pragma': 'no-cache',
        'sec-fetch-dest': 'no-cache',
        'sec-fetch-mode': 'cors',
        'sec-fetch-site': 'same-origin'
    }
    headers = {
        'authority': 'ds.appgrowing.cn',
        'method': 'GET',
        'path':
        '/api/leaflet/mt?startDate=2019-12-12&endDate=2020-06-08&order=-_score&isExact=false&keyword=%E7%9A%AE%E9%9E%8B&page=1&limit=60',
        'scheme': 'https',
        'accept': 'application/json, text/plain, */*',
        'accept-encoding': 'gzip, deflate',  #, br',
        'accept-language': 'zh-CN,zh;q=0.9',
        'cookie':
        'GA1.2.1801623842.1591588262; NPS_Dialog-287498=gAAAAABeVISjTuGs3BvpNcZnvYK0uCs_XAnrmilhczWzvEG0_A1lGEg8OcVSP-W33R3amK0bTxCOFlZJH2ClU--kaPnwLVfxQw==; _gid=GA1.2.1024763389.1591848138; AG_Token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpZCI6ImU3ZDRjYTdiLWYzNjQtMzgxZS04N2YyLTU2MGVmNzYzMzYyMyIsImFjYyI6Mjg3NDk4LCJleHAiOjE1OTQ0NDAxNDgsImlhdCI6MTU5MTg0ODE0OX0.rgbSUZWcFHF3C1quieFrp6o2qqhHjG328I8VUW-b_TY; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22287498%22%2C%22%24device_id%22%3A%22172920d8f7b465-0ba3bc5efd71c3-58133018-1327104-172920d8f7c60c%22%2C%22props%22%3A%7B%22%24latest_referrer%22%3A%22https%3A%2F%2Fyoucloud.com%2Fservice%2Finfo%2F%3Fyoumi_aff%22%2C%22%24latest_traffic_source_type%22%3A%22%E5%BC%95%E8%8D%90%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%7D%2C%22first_id%22%3A%22172920d8f7b465-0ba3bc5efd71c3-58133018-1327104-172920d8f7c60c%22%7D',
        'referer':
        'https://ds.appgrowing.cn/leaflet?startDate=2019-12-12&endDate=2020-06-08&order=-_score&isExact=false&keyword=%E7%9A%AE%E9%9E%8B&page=1',
        'user-agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
        'if-none-match': 'W/"1de8f09fa55c512546152acc5565175b"'
    }
    k = 1
    pageSize = 20
    f = xlwt.Workbook()
    sheet1 = f.add_sheet(u'联系方式', cell_overwrite_ok=True)
    rowTitle = [u'店铺名称', u'商品标题', u'电话', u'商品链接']
    for i in range(0, len(rowTitle)):
        sheet1.write(0, i, rowTitle[i])
    shop_response = requests.get(url=shop_url, headers=headers2, verify=False)
    shop_res = shop_response.json()
    for shop_ki in shop_res['data']:
        try:
            shop_id = shop_ki['shopId']
            product_url = 'https://ds.appgrowing.cn/api/shop/product/rank?startDate=2020-05-13&endDate=2020-06-11&timeType=range&page=1&limit=20&sort=-quantitySoldIncr&isNew=false&matchType=product&shopId=' + str(
                shop_id) + ''
            print(product_url)
            item_response = requests.get(url=product_url,
                                         headers=headers2,
                                         verify=False)
            item_res = item_response.json()
            totalCount = item_res['total']
            if (totalCount % pageSize == 0):
                page = (int)(totalCount / pageSize)
            else:
                page = (int)(totalCount / pageSize + 1)
            for i in range(1, page + 1):
                pro_url = 'https://ds.appgrowing.cn/api/shop/product/rank?startDate=2020-05-13&endDate=2020-06-11&timeType=range&page=' + str(
                    i
                ) + '&limit=20&sort=-quantitySoldIncr&isNew=false&matchType=product&shopId=' + str(
                    shop_id) + ''
                print(pro_url)
                pro_response = requests.get(url=pro_url,
                                            headers=headers2,
                                            verify=False)
                pro_res = pro_response.json()
                for pro_ki in pro_res['data']:
                    try:
                        redirect = pro_ki['redirect']
                        item_id = redirect[52:72]
                        moblie_url = 'https://ec.snssdk.com/product/lubanajaxstaticitem?id=' + str(
                            item_id
                        ) + '&token=38bf3df39e0fbcb773cd2372e2d6dec7&page_id=' + str(
                            item_id) + '&b_type_new=0'
                        print(moblie_url)
                        #print(moblie_url, item_id, redirect)
                        moblie_response = requests.get(moblie_url)
                        moblie_res = moblie_response.json()
                        company_name = moblie_res['data']['company_name']
                        name = moblie_res['data']['name']
                        mobile = moblie_res['data']['mobile']
                        if (mobile == '' or mobile == None):
                            continue
                        sheet1.write(k, 0, company_name)
                        sheet1.write(k, 1, name)
                        sheet1.write(k, 2, mobile)
                        sheet1.write(k, 3, redirect)
                        k += 1
                    except Exception as es:
                        print("异常" + es)
                        continue
        except Exception as e:
            print("异常" + e)
            continue
        print("执行保存")
        f.save('D:/a.xls')
コード例 #19
0
from xls import Property
import xlwt
from xlutils.copy import copy
import xlrd

workbook = xlwt.Workbook(encoding='ascii')
workbook = xlwt.Workbook(encoding='utf-8')
workbook = xlrd.open_workbook("io_xls/jzdr.xls")
# 对数据表格进行复制
old_content = copy(workbook)
worksheet = old_content.get_sheet(0)
# 实例化类
dh = Property()
# 调用类中的方法
name = dh.name()
sex = dh.sex()
equipment_number = dh.equipment_number()
identity = dh.identity()
number_three = dh.number_three()
other_name = dh.other_name()
tm = dh.type_medicine()
tel = dh.telephone()
time = dh.local_time()
ranges = dh.ranges()

# 导入excel
worksheet.write(1, 0, name)
worksheet.write(1, 1, sex)
worksheet.write(1, 2, equipment_number)
worksheet.write(1, 3, tel)
worksheet.write(1, 4, identity)
コード例 #20
0
def CheckXlsx(excel_path='', rulemap={}, btwords={}):
    work_sheet = xlrd.open_workbook(excel_path)
    sheet = work_sheet.sheet_by_index(0)

    workbook = xlwt.Workbook(encoding='utf-8')
    worksheet = workbook.add_sheet('0')

    count_dict = {}

    for i in range(sheet.nrows):
        sent = sheet.cell_value(i, 2)
        label = sheet.cell_value(i, 4)
        sent_bt_list = getSentBt(sent, btwords)
        worksheet.write(i, 0, sheet.cell_value(i, 0))
        worksheet.write(i, 1, sheet.cell_value(i, 1))
        worksheet.write(i, 2, sheet.cell_value(i, 2))
        worksheet.write(i, 3, sheet.cell_value(i, 3))
        worksheet.write(i, 4, sheet.cell_value(i, 4))
        worksheet.write(i, 5, sheet.cell_value(i, 5))

        # print(sent,sent_bt_list)
        lock, lockrule, lockruleword = CheckIsLockWord(sent, rulemap,
                                                       sent_bt_list)
        print(label, sent, sent_bt_list, lockrule, lock)
        worksheet.write(i, 6, str(lock))
        worksheet.write(i, 7, str(lockrule))
        worksheet.write(i, 8, str(sent_bt_list))

        # count_dict[label]=count_dict.get(label,0)+1
        if label == "是A推其他":
            if lock == True:
                count_dict["是A推其他_锁定"] = count_dict.get("是A推其他_锁定", 0) + 1
            else:
                count_dict["是A推其他_非锁定"] = count_dict.get("是A推其他_非锁定", 0) + 1

        if label == "是A推模糊":
            if lock == True:
                count_dict["是A推模糊_锁定"] = count_dict.get("是A推模糊_锁定", 0) + 1
            else:
                count_dict["是A推模糊_非锁定"] = count_dict.get("是A推模糊_非锁定", 0) + 1

        if label == "不是A推A":
            if lock == True:
                count_dict["不是A推A_锁定"] = count_dict.get("不是A推A_锁定", 0) + 1
            else:
                count_dict["不是A推A_非锁定"] = count_dict.get("不是A推A_非锁定", 0) + 1

        if label == "正确":
            if lock == True:
                count_dict["正确_锁定"] = count_dict.get("正确_锁定", 0) + 1
            else:
                count_dict["正确_非锁定"] = count_dict.get("正确_非锁定", 0) + 1

    pr_1 = float(count_dict["是A推其他_锁定"]) / float(count_dict["是A推其他_锁定"] +
                                                 count_dict["是A推其他_非锁定"])
    pr_2 = float(count_dict["是A推模糊_锁定"]) / float(count_dict["是A推模糊_锁定"] +
                                                 count_dict["是A推模糊_非锁定"])
    pr_3 = float(count_dict["不是A推A_非锁定"]) / float(count_dict["不是A推A_锁定"] +
                                                  count_dict["不是A推A_非锁定"])
    pr_4 = float(count_dict["正确_锁定"]) / float(count_dict["正确_锁定"] +
                                              count_dict["正确_非锁定"])

    print("是A推其他_正确率:%s  是A推模糊_准确率:%s  不是A推A_准确率:%s  正确_准确率:%s" %
          (pr_1, pr_2, pr_3, pr_4))

    workbook.save('自动续费问题_output.xls')
コード例 #21
0
    def generate_report_xls(self):
        filename = 'Cash-flow-ventas'
        periods = self.search_periods()

        import StringIO
        try:
            import xlwt
        except:
            raise ValidationError(
                'Por favor descargue el modulo xlwt de python '
                'desde\nhttp://pypi.python.org/packages/source/x/xlwt/xlwt-0.7.2.tar.gz\ne '
                'instalelo.')

        book = xlwt.Workbook()
        style = xlwt.easyxf(
            'font: bold on,height 240,color_index 0X36;'
            'align: horiz left;'
            'borders: left_color 0X36, right_color 0X36, top_color 0X36,'
            ' bottom_color 0X36, left thin, right thin, top thin, bottom thin;'
        )
        style_details = xlwt.easyxf('align: horiz left;')

        stype_details_amount = xlwt.easyxf('align: horiz right;')

        style_subtotal = xlwt.easyxf(
            'font: bold on, height 200, color_index 0X36;'
            'align: horiz left;')
        style_total_amount = xlwt.easyxf(
            'font: bold on, height 200, color_index 0X36;'
            'align: horiz right;'
            'pattern: pattern solid, fore_colour 0x16;')

        row = 0
        sheet = book.add_sheet('Cash flow de venta')

        sheet.col(0).width = 7000
        sheet.col(1).width = 3000
        sheet.col(2).width = 7000

        sheet.write(row, 0, "Cliente", style)
        sheet.write(row, 1, "Tipo", style)
        sheet.write(row, 2, "Referencia", style)
        sheet.write(row, 3, "Moneda", style)

        report_items = []

        col = 4
        sheet.write(row + 1, col - 1, 'USD', style)
        sheet.write(row + 2, col - 1, 'EUR', style)
        for period in periods:

            report_items_of_period = self._get_report_items(period, col)
            if report_items_of_period:
                report_items += report_items_of_period
                rate_usd = self.calculate_period_rate(
                    self.env.ref('base.USD').id, period)
                rate_eur = self.calculate_period_rate(
                    self.env.ref('base.EUR').id, period)
                sheet.write(row, col, period[0].strftime('%m/%Y'), style)
                sheet.write(row + 1, col, str(rate_usd), style)
                sheet.write(row + 2, col, str(rate_eur), style)
                col += 1
        row += 2
        row_sum = row + 1

        report_items.sort(key=lambda x: (x.partner.id, x.report_type.id))
        for key, grouped_by_partner in groupby(report_items,
                                               key=lambda x: x.partner.id):

            grouped_by_partner = list(grouped_by_partner)
            sheet.write(row + 1, 0, grouped_by_partner[0].partner.name,
                        style_subtotal)

            for keym, items in groupby(
                    grouped_by_partner,
                    key=lambda x: (x.report_type._name, x.report_type.id)):
                row += 1
                items = list(items)
                if items[0].report_type._name == 'sale.order.line':
                    report_type = 'Venta'
                    amount = items[0].report_type.price_subtotal
                else:
                    report_type = 'Contrato'
                    amount = sum(
                        l.price_subtotal for l in
                        items[0].report_type.recurring_invoice_line_ids)
                sheet.write(row, 1, report_type, style_details)
                sheet.write(
                    row, 2, items[0].report_type.order_id.name + ' - ' +
                    items[0].report_type.product_id.name_get()[0][1] +
                    ' ({})'.format(items[0].report_type.currency_id.name)
                    if items[0].report_type._name == 'sale.order.line' else
                    items[0].report_type.name, style_details)
                sheet.write(row, 3, items[0].report_type.currency_id.name,
                            style_details)
                for item in items:
                    sheet.write(row, item.column,
                                (amount * item.qty) * item.rate,
                                stype_details_amount)

        for x in range(4, col):
            column_start = xlwt.Utils.rowcol_to_cell(row_sum, x)
            column_end = xlwt.Utils.rowcol_to_cell(row, x)
            sheet.write(
                row + 1, x,
                xlwt.Formula('SUM(' + column_start + ':' + column_end + ')'),
                style_total_amount)
        """PARSING DATA AS STRING """
        file_data = StringIO.StringIO()
        book.save(file_data)
        """STRING ENCODE OF DATA IN SHEET"""
        out = base64.encodestring(file_data.getvalue())
        filename = filename + '.xls'
        self.write({'report': out})

        return {
            'type':
            'ir.actions.act_url',
            'url':
            '/web/binary/download_project?wizard_id=%s&filename=%s' %
            (self.id, filename + '.xls'),
            'target':
            'new',
        }
コード例 #22
0
ファイル: excel.py プロジェクト: phoenixhu/python-exercise
 def __init__(self):
     # 新建excel文件
     self.workbook = xlwt.Workbook(encoding="ascii")
     # 新建工作表
     self.worksheet = self.workbook.add_sheet('My Worksheet')
コード例 #23
0
ファイル: movie.py プロジェクト: yangyang0126/PythonSpider
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 30 14:40:03 2020

@author: zhaoy
"""

import requests
from bs4 import BeautifulSoup  # 导入BeautifulSoup
import xlwt  # 把Excel输出模块加进来

# 定义保存Excel的位置
workbook = xlwt.Workbook()  # 定义workbook
sheet = workbook.add_sheet('数据')  # 添加sheet
head = ['年份', '电影名称', '豆瓣评分', '评价人数', '推荐理由', '电影链接']  # 表头
for h in range(len(head)):
    sheet.write(0, h, head[h])  # 把表头写到Excel里面去,从0开始


def GetRes(url):
    headers = {
        'user-agent':
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'
    }
    res = requests.get(url, headers=headers)
    soup = BeautifulSoup(res.text, 'html.parser')
    return soup


url = 'https://movie.douban.com/top250?start={}&filter='
urls = [url.format(num * 25) for num in range(10)]
コード例 #24
0
#修改原有表格内容并另存
from xlutils.copy import copy
import xlrd

tem_execl = xlrd.open_workbook('日统计.xls')
new_execl = copy(tem_execl)

new_sheet = new_execl.get_sheet(0)
new_sheet.write(2, 1, 44)   #写入值(行,列,内容)
new_sheet.write(3, 1, 55)

new_execl.save('日统计.xls')

#创建新xls文件的方法
import xlwt
new_workbook = xlwt.Workbook()
workbook = new_workbook.add_sheet("日统计")    #创建新工作簿
workbook.write(0,0,"test")  #写入内容(行,列,内容)
new_workbook.save('d:/test.xls')



コード例 #25
0
ファイル: csv2xls.py プロジェクト: TianChun525/stash
        This function will not search any sub-directories.
    """

    matches = []
    for file in os.listdir(root):
        if fnmatch.fnmatch(file, pattern):
            matches.append(os.path.join(path, file))

    return matches


#------------------------------------------------------------------------------
assert (os.path.exists(path)), "Folder path not found"
ext = '*.csv'
files = locate(ext, path)
book = xlwt.Workbook()

# excel has tab-name size limitations of 31 characters
n_match = '(_ACT_)|(_NSW_)|(_NT_)|(_QLD_)|(_SA_)|(_TAS_)|(_VIC_)|(_WA_)|(_ZERO_)'
tab_names = []
base_names = []

for n in range(len(files)):
    assert (os.path.exists(files[n])), "Path not valid"
    base_names.append(os.path.splitext(os.path.basename(files[n]))[0])
    find = re.search(n_match,
                     base_names[n])  # will only find the first occurrence
    assert (type(find) !=
            None), "String match not found for %s\n Possible matches %s" % (
                files[n], n_match)
    tab_names.append(find.group())
コード例 #26
0
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
import xlwt
import time
import re
import requests
import os

chrome_options = Options()
chrome_options.add_argument('--headless')
browser = webdriver.Chrome(chrome_options=chrome_options)
#browser = webdriver.PhantomJS()
WAIT = WebDriverWait(browser, 20)
browser.set_window_size(1400, 900)

book = xlwt.Workbook(encoding='utf-8', style_compression=0)

sheet = book.add_sheet('蔡徐坤篮球', cell_overwrite_ok=True)
sheet.write(0, 0, '名称')
sheet.write(0, 1, '地址')
sheet.write(0, 2, '描述')
sheet.write(0, 3, '观看次数')
sheet.write(0, 4, '弹幕数')
sheet.write(0, 5, '发布时间')

n = 1

def search():
    try:
        print('开始访问b站....')
        browser.get("https://www.bilibili.com/")
コード例 #27
0
article_infors = []

for single_id in ids:
    url = 'https://hacker-news.firebaseio.com/v0/item/' + str(
        single_id) + '.json'
    submission_r = requests.get(url)
    print("Single Status Code:", submission_r.status_code)
    response_dict = submission_r.json()
    article_infor = {
        'title': response_dict['title'],
        'link': 'http://news.ycombinator.com/item?id=' + str(single_id),
        'comments': response_dict.get('descendants', 0)
    }
    article_infors.append(article_infor)

article_infors = sorted(article_infors,
                        key=itemgetter('comments'),
                        reverse=True)

file_name = 'HackerNews.json'
book = xlwt.Workbook(encoding="utf-8")
sheet1 = book.add_sheet("Sheet 1")

x = 0
for article_infor in article_infors:
    sheet1.write(x, 0, str(article_infor['title']))
    sheet1.write(x, 1, str(article_infor['link']))
    sheet1.write(x, 2, str(article_infor['comments']))
    x += 1

book.save("Top_Stories_HackerNews.xls")
コード例 #28
0
ファイル: datatools.py プロジェクト: dayanhu/commsevice_test
    def __init__(self, excelPath, sheetName):
        self.file = xlwt.Workbook()
        self.table = self.file.add_sheet(sheetName)

        self.excelPath = excelPath
コード例 #29
0
def makeExcel(row_value, col_value, name, content):
    filename = os.path.join(DATA_PATH, name)  # file path,name
    wb = xlwt.Workbook(encoding='utf-8')  # create workbook
    sheet = wb.add_sheet('sheet1')  # create sheet
    sheet.write(row_value, col_value, content)  # write data in workbook
    wb.save(filename)  # save file
コード例 #30
0
ファイル: pipelines.py プロジェクト: chenrun666/Spider
 def __init__(self):
     self.num = 1
     # 实例化一个excel对象
     self.book = xlwt.Workbook(encoding='utf-8', style_compression=0)
     self.sheet = self.book.add_sheet("sheet1", cell_overwrite_ok=True)