def import_xls(request): form = Upload_XLForm(request.POST or None, request.FILES or None) if form.is_valid(): form.save() form = Upload_XLForm() obj = Upload_XL.objects.get(activated=False) s3_session = Session(aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY) bucket_object = s3_session.resource('s3').Bucket( AWS_STORAGE_BUCKET_NAME).Object(str(obj.file_name)) content = bucket_object.get()['Body'].read() workbook = xlrd.open_workbook_xls(file_contents=content) # book = xlrd.open_workbook(obj.file_name.url) sh = workbook.sheet_by_index(0) p_row = 1 while p_row < sh.nrows: # print(sh.row(p_row).value) user, created = User.objects.get_or_create( username=sh.row(p_row)[3].value, email=sh.row(p_row)[4].value, ) current_post = Post.objects.create( title=sh.row(p_row)[0].value, content=sh.row(p_row)[1].value, date_posted=datetime.datetime.strptime( sh.row(p_row)[2].value, "%Y-%m-%d %H:%M:%S"), author=user, likes=sh.row(p_row)[5].value, ) c_row = p_row + 3 try: while sh.row(c_row)[1].value != '': comment_user, c_created = User.objects.get_or_create( username=sh.row(c_row)[3].value, email=sh.row(c_row)[4].value, ) Comment.objects.create( post=current_post, author=comment_user, text=sh.row(c_row)[1].value, created_date=datetime.datetime.strptime( sh.row(c_row)[2].value, "%Y-%m-%d %H:%M:%S"), ) c_row += 1 except: pass p_row = c_row + 2 obj.activated = True obj.save() obj.delete() return render(request, 'blog/uploadXL.html', {'form': form})
def nsfocus_clean(report_dir='./扫描报告/绿盟'): global vuln_result global ip_count global vuln_name_filter global vuln_detail_filter col_idx = { 'port': 0, 'name': 3, 'risk': 5, 'detail': 17, 'firm': 18, 'cve': 13, 'result': 19, 'discot': 12, } for root, dirs, files in os.walk(report_dir, topdown=False): for fn in files: if fn.startswith('~') or fn.startswith("index") \ or not fn.endswith('.xls'): continue ip_count += 1 fp = os.path.join(root, fn) workbook = xlrd.open_workbook_xls(fp, formatting_info=True) cur_worksheet = workbook.sheet_by_name("远程漏洞") print(f'[*] -> {fp}') for row in range(4, cur_worksheet.nrows): v = Vuln() v.ip = fn.split('.xls')[0] v.discom = '绿盟漏扫器' for item in list(v.__dict__.keys()): if item == 'ip' or item == 'discom': continue cellv = cur_worksheet.cell_value(row, col_idx[item]) if item == 'discot': cellv = str( datetime.datetime(*xlrd.xldate_as_tuple( cellv, workbook.datemode))).split(' ')[0] v.__dict__[item] = cellv_filter(cellv) if vuln_filter(v): vuln_result.append(v)
def parse_input_file(): """ Colum map 'Nome Parlamentar' 'Partido' 'UF' 'Titular/Suplente/Efetivado' 'Anexo' 'Gabinete' 'Telefone' 'Mês Aniversário' 'Dia Aniversário' 'Correio Eletrônico' 'Nome Civil' :return: """ filepath = os.path.join(get_local_dados_foldername(), filename) wb = xlrd.open_workbook_xls(filename=filepath) # first_sheet_name = wb[0] sh = wb.sheet_by_index(0) print("{0} {1} {2}".format(sh.name, sh.nrows, sh.ncols)) print("Cell D30 is {0}".format(sh.cell_value(rowx=29, colx=3))) deputados = [] for i in range(1, sh.nrows): row = None try: row = sh.row(i) nomeparlamentar = row[0].value partido = row[1].value uf = row[2].value titular_outro = row[3].value anexopredio = row[4].value ngabinete = int(row[5].value) fixphone = row[6].value fixphone = fixphone.replace('-', '') email = row[9].value nomecivil = row[10].value deput_excel = DeputadoExcel(nomeparlamentar, partido, uf, titular_outro, anexopredio, ngabinete, fixphone, email, nomecivil) # print(deput_excel) deputados.append(deput_excel) except ValueError as e: print(row) raise ValueError(e) return deputados
def get_data(sheetnames): workbook = xlrd.open_workbook_xls('result.xls') data = {} meta = [] for sheet_name in sheetnames: sheet = workbook.sheet_by_name(sheet_name) for col in range(2, sheet.ncols): item = sheet.cell_value(0, col).replace('\n', '\\n') meta.append(f'{sheet_name}::{item}') for row in range(1, sheet.nrows): date = sheet.cell_value(row, 0) if date not in dates: continue if date not in data: data[date] = {} for col in range(2, sheet.ncols): item = sheet.cell_value(0, col).replace('\n', '\\n') k = f'{sheet_name}::{item}' if k not in data[date]: data[date][k] = [] data[date][k].append( sheet.cell_value(row, col).replace('\n', '\\n')) with open('data.pkl', 'wb') as out: pickle.dump({'meta': meta, 'data': data}, out)
import pandas as pd import numpy as np import xlrd from matplotlib import pyplot as plt from pandas_profiling import ProfileReport from sklearn.metrics import mean_squared_error from sklearn.metrics import average_precision_score #Leitura dos dados a partir do arquivo Excel. data_set = pd.read_excel("teste_smarkio_lbs.xls") print(data_set) #Utilização de comandos da biblioteca do xlrd para manipulação dos dados dentro da planilha. book = xlrd.open_workbook_xls("teste_smarkio_lbs.xls") page1 = book.sheet_by_index(0) #Relatório com a análise descritiva unidimensional exportado para arquivo .html. profile = ProfileReport(data_set, title = "Análise Exploratória dos Dados") profile.to_notebook_iframe((output_file="analise_exploratoria_dos_dados.html")) #Tratamento dos dados para colocá-los em listas. limit = len(page1.col_values(colx=0)) - 1 print(limit) i = 1 list_true_class = page1.col_values(start_rowx=1, colx=3) list_pred_class = page1.col_values(start_rowx=1, colx=0) print(list_true_class)
import xlrd as a data = a.open_workbook_xls(r'C:/Users/hujl/Desktop/1.xlsx') print(data.sheet_names())
import csv import xlrd original_number = 2907 year = "2016" for index in range(5, 13): month = str(index) model_label = "20220406070531" data_file_gen = "F:/result/" + year + "/" # 预测的result的xls的文件夹 model_name = "gtnnwr" save_file_path = "F:/result/" + year + "/" + month + "/res_merge" + model_label + ".csv" # 要存储的位置和文件名 for i in range( 75): # 经过raster_input.py处理之后,128个栅格分块会被去除没有数据的分块,剩下的为75块,即有75个csv data_file_path = data_file_gen + month + "/test_" + str( i) + "/" + model_name + "/test_" + str( i) + "_10_cv10_" + model_label + "_restore.xls" wb = xlrd.open_workbook_xls(data_file_path) sheet = wb.sheet_by_name("data") len_row = sheet.nrows list1 = [] for j in range(original_number + 1, len_row): # 2908是因为原训练验证测试数据为2907条 row = (sheet.row_values(j, 3, 5)) + sheet.row_values( j, 22, 23) # 3、4两列是经纬度,22列是预测的norm值 list1.append(row) with open(save_file_path, 'a+', newline='') as f: writer = csv.writer(f) list = ['lat', 'lon', 'result'] writer.writerow(list) writer.writerows(list1) f.close() print(save_file_path + " is ok")
# -*- coding: utf-8 -*- """ ------------------------------------------------- # @Project :webAutoTest # @File :read_excel # @Date :2020/12/31 18:07 # @Author :吴利民 # @Email :[email protected] # @Software :PyCharm ------------------------------------------------- """ import xlrd import config data = xlrd.open_workbook_xls('E:\\job\\CRM系统\\data\\CRM系统_测试用例.xlsx') table = data.sheets()[0] rows = table.nrows() cols = table.ncols() print(rows)