Пример #1
0
def make_protected(workbook_male, workbook_female, worksheet_male, worksheet_female, country):

    row_country_m, col_country_m = find_row_col_index(country, worksheet_male)
    row_country_f, col_country_f = find_row_col_index(country, worksheet_female)

    wb_male = copy(workbook_male)
    wb_female = copy(workbook_female)

    for i in range(17, 258):
        for j in range(3, 50):
            if i != row_country_m:
                wb_male.get_sheet(0).write(i, j, worksheet_male.cell(i, j).value, xlwt.easyxf('protection: cell_locked false;'))
            else:
                wb_male.get_sheet(0).write(i, j, worksheet_male.cell(i, j).value, xlwt.easyxf('protection: cell_locked true;'))
    wb_male.get_sheet(0).set_protect(True)
    wb_male.save('Data/WPP2015_POP_F01_2_TOTAL_POPULATION_MALE.XLS')

    for i in range(17, 258):
        for j in range(3, 50):
            if i != row_country_f:
                wb_female.get_sheet(0).write(i, j, worksheet_female.cell(i, j).value, xlwt.easyxf('protection: cell_locked false;'))
            else:
                wb_female.get_sheet(0).write(i, j, worksheet_female.cell(i, j).value, xlwt.easyxf('protection: cell_locked true;'))
    wb_female.get_sheet(0).set_protect(True)
    wb_female.save('Data/WPP2015_POP_F01_2_TOTAL_POPULATION_FEMALE.XLS')
Пример #2
0
def main():
    while(1):
        file_input = raw_input("Nombre del archivo: ")
        ask = raw_input("Esta seguro? (y/n)  ")
        if ask == 'y':
            break
            
     
    while (1):
        row_input = raw_input("Introducir el numero de la fila: ")
        row = int(row_input)
        ask = raw_input("Esta seguro? (y/n)  ")
        if ask == 'y':
            break
            
    
    array = []
    array = ServerConnect()
    
    #Modificador de excell
    rb = open_workbook(file_input, formatting_info=True)
    wb = copy(rb)             # a writable copy (I can't read values out of this, only write to it)
    w_sheet = wb.get_sheet(0) # the sheet to write to within the writable copy
    
    #Escribir en excell
    counter = 1
    for x in array:
        w_sheet.write(row - 1, counter, x)
        counter += 1
    
    #Salvar el documento
    wb.save(file_input)

    #Leer el archivo para encontrar difenrencia
    workbook = open_workbook(file_input)
    worksheet = workbook.sheet_by_index(0)
    cell_value1 = worksheet.cell_value(row - 1, 4)
    value1 = float(cell_value1)
    cell_value2 = worksheet.cell_value(row - 2, 4)
    value2 = float(cell_value2)
    dif = value1 -value2
    
    #Modificador de excell
    rb = open_workbook(file_input, formatting_info=True)
    wb = copy(rb)             # a writable copy (I can't read values out of this, only write to it)
    w_sheet = wb.get_sheet(0) # the sheet to write to within the writable copy
    
    w_sheet.write(row - 1, 6, dif)
    
    wb.save(file_input)

    raw_input("Datos actualizados!")
Пример #3
0
def write(name,match,data):
	file = name + '.xls'
	try:
		w = copy(open_workbook(file))
	except:
		initExcel(name)
		w = copy(open_workbook(file))
	row = match
	col = 1
	for d in data:
		w.get_sheet(0).write(row,col,str(d))
		col += 1
	w.save(file)
	print("Data written to " + file)
Пример #4
0
def setup_workbook():
	rb = open_workbook('sample.xls', formatting_info=True, on_demand=True )
	wb = copy(rb)
	p, q = find_boundries(rb)
	position = find_position(rb, q)
	position = position - 1
	print p
	print q
	print position
	for x in range(position,p):
		val = rb.sheet_by_index(0).cell(0,x).value
		print "Input data for " + val
		response = raw_input("enter the response: ")
		number_of_response = len(response)
		if number_of_response != q-1 :
			print number_of_response
			print "missing responses"
			exit()
		else:	
			for y in range(1,q):
				if y >= q-6 :
					resp = int(raw_input("enter range"))
					resp = (resp/20.0)*7.0
					wb.get_sheet(0).write(y,x,resp)
				else:
					wb.get_sheet(0).write(y,x,int(response[y-1]))
			wb.get_sheet(0).write(y+1,x+1,"done")
			wb.get_sheet(0).write(y+1,x,"")
			os.system("rm sample.xls")
			print "Saving records"
			wb.save('sample.xls')
Пример #5
0
 def __init__(self, filename, config, level = logging.DEBUG):
     """TabLinker constructor
     
     Keyword arguments:
     filename -- String containing the name of the current Excel file being examined
     config -- Configuration object, loaded from .ini file
     level -- A logging level as defined in the logging module
     """
     self.config = config
     self.filename = filename
      
     self.log = logging.getLogger("TabLinker")
     self.log.setLevel(level)
     
     self.log.debug('Initializing Graphs')
     self.initGraphs()
     
     self.log.debug('Setting Scope')
     basename = os.path.basename(filename)
     basename = re.search('(.*)\.xls',basename).group(1)
     self.setScope(basename)
     
     self.log.debug('Loading Excel file {0}.'.format(filename))
     self.rb = open_workbook(filename, formatting_info=True)
     
     self.log.debug('Reading styles')
     self.styles = Styles(self.rb)
     
     self.log.debug('Copied Workbook to writable copy')
     self.wb = copy(self.rb)
Пример #6
0
def XlsDictWriter(f, ld, sheet_name='sheet'):
	""""f is a string for a filename, sheet_name is the name of the sheet to be written, can't be overwritten. ld is limited to 65536 rows due to xlwt."""
	try:
		tmp = xlrd.open_workbook(f)
		book = copy(tmp)
	except:
		book = xlwt.Workbook(encoding = 'UTF-8')
	sheet = book.add_sheet(sheet_name)
	if len(ld) > 0:
		header = set()
		for row in ld:
			for r in row.keys():
				header.add(r)
		for i,key in enumerate(header):
			sheet.write(0, i, label = key)
		for i,r in enumerate(ld):
			for j,key in enumerate(header):
				try:
					sheet.write(i+1, j, label = r[key])
				except:
					sheet.write(i+1, j, label = "N/A")
		book.save(f)
		return True
	#~ if len(ld) > 0:
		#~ ks = ld[0].keys()
		#~ for i,key in enumerate(ks):
			#~ sheet.write(0, i, label = str(key))
		#~ for i,r in enumerate(ld):
			#~ for j,key in enumerate(ks):
				#~ sheet.write(i+1, j, label = str(r[key]))
	else:
		return False
def main(argv):
    try:
       opts, args = getopt.getopt(argv,"hi:d:")
    except getopt.GetoptError:
       print sys.argv[0] + ' -i <inputfile> -d <directory>'
       sys.exit(2)
    for opt, arg in opts:
       if opt == '-h':
          print sys.argv[0] + ' -i <inputfile> -d <directory>'
          sys.exit()
       elif opt in ("-i"):
          inputfile = arg
       elif opt in ("-d"):
          directory = arg
    onlyfiles = [ f for f in listdir(directory) if isfile(join(directory,f)) ]
    
    wb = open_workbook(inputfile)

    sheet = wb.sheet_by_index(0)

    book = copy(wb)
    csheet = book.get_sheet(0)
    
    for row in range(sheet.nrows):
        v = sheet.cell(row,1).value
        if isinstance(v, float):
            v = int(v)
        name = str(v) + '.pdf'
        check = name in onlyfiles;
        if check:
            csheet.write(row,0,'1')
            
    book.save('result.xls')
Пример #8
0
def GetQ(session, baseurl, row):
    rb = open_workbook('zhihu.xlsx')
    wb = copy(rb)
    ws = wb.get_sheet(0)

    for i in range(1, 11):
        url = ''.join([baseurl, '?page=', str(i)])
        topicObj = topic(session, url)
        row_topic = row
        for ques in topicObj.question():
            ws.write(row_topic, 0, ques)
            row_topic += 1
            print '%d, 1' % row_topic
        links = topicObj.links()
        for link in links:
            try:
                print link
                answerObj = answer(session, link)
                ws.write(row, 1, answerObj.answerednum())
                ws.write(row, 2, answerObj.type())
                ws.write(row, 3, answerObj.watched())
                row += 1
            except TypeError:
                continue
            except AttributeError:
                continue
            print '%d, 123' % row
        wb.save('zhihu.xlsx')

    return row
Пример #9
0
    def getuptime(self, path, name):
        #初始化样式
        style = xlwt.XFStyle()
        style1 = xlwt.XFStyle()

        f = setfont.Font(0, 220)
        f1 = setfont.Font(4, 300)
        style.font = f.fontset(0, 220)
        style1.font = f1.fontset(4, 300)
        
        work_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
        chart_dir = os.path.join(work_dir, 'chart')
        
        if os.path.exists(os.path.join(path, 'uptime.csv')):           
            reboot = self.getmonkeydata(os.path.join(path, 'uptime.csv'))
            rb = open_workbook(os.path.join(path, '('+name+')'+'performance.xls'), formatting_info=True)
            wb = copy(rb)
            w_sheet = wb.add_sheet('uptime')
            w_sheet.write(0, 0, u'开始时间测试报告', style1)
            
            for i in range(0, 10):
                w_sheet.col(i).width = 0x0d00 + 2000
            
            try:                
                for i in range(len(reboot)):
                    w_sheet.write(i+1, 0, reboot[i][0].decode('UTF-8'), style)
            except IndexError:
                pass
        
            wb.save(os.path.join(path, '('+name+')'+'performance.xls'))
        else:
            pass
Пример #10
0
def init():

    global ws,ws2,ws3,firstTime,wb,rs

    if os.path.exists(excelFileName):

        rb = open_workbook(excelFileName, formatting_info=True)

        rs = rb.sheet_by_index(0)

        wb = copy(rb)

        ws = wb.get_sheet(0)

        ws2 = wb.get_sheet(1)

        ws3 = wb.get_sheet(2)

        firstTime = False

    else:

        wb = Workbook()

        ws = wb.add_sheet('string')

        ws2 = wb.add_sheet('string_array')

        ws3 = wb.add_sheet('plurals')

        firstTime = True
Пример #11
0
def missense_vs_age(datasorted):
	datasorted_LUSC = open(datasorted, 'r')
	data = datasorted_LUSC.readlines();
	age_dictionary = {}
	ages = [50,60,70,80] 
	from xlrd import open_workbook
	from xlutils.copy import copy
	book = open_workbook('genecomparison2.xls', formatting_info=True)
	wbook = copy(book)
	sheet9= wbook.get_sheet(8)
	missense_total = 0
	a = 0 
	for line in data: 
		cols = line.split(',')
		if cols[1] != '[Not Available]':
			age = int(cols[1])
		mutation = cols[3].rstrip('\n')
		if mutation == "Missense_Mutation":
			missense_total = missense_total + 1
			decade = agegroup(age) # agegroup function groups any ages under 30 and any ages over 80 in that group 
			if decade in age_dictionary: 
				age_dictionary[decade].total = age_dictionary[decade].total + 1
			else: 
				age_dictionary[decade] = missense_count()
	print age_dictionary[decade]
	print age_dictionary
	print 'missense total: ', missense_total 
	sheet9.write(0,1,'Missense Mutations')
	sheet9.write(0,0,'Age Group')
	rowgenename = sheet9.row(0)
	for group in ages:
		a = a +1
		sheet9.write(a, 0, group) 
		sheet9.write(a, 1, age_dictionary[group].total) 
	wbook.save('genecomparison2.xls')		
Пример #12
0
def main():
	data = xlrd.open_workbook('result.xls')
	t0 = data.sheets()[0]#第一张表
	rows = t0.nrows
	pubDate = '2018-04-12T12:00:00'
	
	
	json_xls = copy(data)
	table = json_xls.get_sheet(0)
	
	for i in range(1,rows):#行
		id,title,headline,QuestionacceptedAnswer = '','','',''
		for j in range(11):#列
			if j == 7:
				id = 'https://m.jd.com/phb/zhishi/'+t0.row_values(i)[j]+'.html'				
			elif j == 8:
				title = t0.row_values(i)[j]
				headline = t0.row_values(i)[j]				
			elif j == 9:
				QuestionacceptedAnswer = t0.row_values(i)[j]
			
		_str = generate_json(id,title,pubDate,headline,QuestionacceptedAnswer)
		
		if len(t0.row_values(i)[10]+_str) < 32767:
			table.write(i,11,t0.row_values(i)[10]+_str)
		else:
			table.write(i,11,'String longer than 32767 characters')
		print i
		
	json_xls.save('result.xls')
	
	
	'''
Пример #13
0
 def getstress(self, path, name):           
       f = setfont.Font(0, 250)
       f1 = setfont.Font(4, 300)
       
       style = xlwt.XFStyle()
       style1 = xlwt.XFStyle()
       
       style.font = f.fontset(0, 250)
       style1.font = f.fontset(4, 300)
       
       try:
             stress = self.getstressdata(path)     
             rb = open_workbook(os.path.join(path, '('+name+')'+'performance.xls'), formatting_info=True)       
             wb = copy(rb) 
             w_sheet = wb.add_sheet('stress')
             w_sheet.write(0, 0, u'压力测试报告', style1)
             
       except KeyboardInterrupt:
             pass
       try:                  
             for i in range(0,10):
                   w_sheet.col(i).width = 0x0d00 + 2500    
            
             for i in range(len(stress)):
                   for j in range(len(stress[i])):
                               w_sheet.write(i+1, j, unicode(stress[i][j], 'UTF-8'), style)
       except KeyboardInterrupt:
             pass
       wb.save(os.path.join(path, '('+name+')'+'performance.xls'))
Пример #14
0
	def appendexcel(self,whichone,**values):
		rsheet=self.rxld.sheet_by_index(whichone)
		rows=rsheet.nrows-1
		while(rsheet.cell_value(rows,5)==''):
			rows-=1
		wxls=copy(self.rxld)
		wsheet=wxls.get_sheet(whichone)
		wsheet.write(rows,0,values['name'],self.style1)
		wsheet.write(rows,1,values['address'],self.style1)
		wsheet.write(rows,2,values['product'],self.style1)
		wsheet.write(rows,3,values['price'],self.style1)
		wsheet.write(rows,4,values['counts'],self.style1)
		wsheet.write(rows,5,values['fee'],self.style1)
		wsheet.write(rows,6,float(values['price'])*int(values['counts'])+float(values['fee']),self.style1)
		wsheet.write(rows,7,'%.2f'%float(self.changerate),self.style1)
		wsheet.write(rows,8,'%.2f'%((float(values['price'])*int(values['counts'])+float(values['fee']))/100.0*float(self.changerate)),self.style1)
		total=0
		maxfee=float(values['fee'])
		maxrate=float(values['rate'])
		for i in xrange(1,rows):
			fee=float(rsheet.cell_value(i,5))
			rate=float(rsheet.cell_value(i,7))
			total+=float(rsheet.cell_value(i,6))-fee
			if fee>maxfee:
				maxfee=fee
			if rate>maxrate:
				maxrate=rate			
		wsheet.write(rows+1,5,maxfee,self.style2)
		wsheet.write(rows+1,6,total+maxfee+float(values['price'])*int(values['counts']),self.style2)
		wsheet.write(rows+1,7,'%.2f'%maxrate,self.style2)
		wsheet.write(rows+1,8,'%.2f'%(maxrate*(total+float(values['fee'])+float(values['price'])*int(values['counts']))/100.0),self.style2)
		wxls.save(self.file)
Пример #15
0
def add_result(status1,tc_no1,exp_result1,act_result1,comments1):
       global ws
       global Locaitonfile
       status = status1
       tc_no = tc_no1
       act_result = act_result1
       exp_result = exp_result1
       comment = comments1
       #fail_color = easyxf('pattern: pattern solid, fore_colour red')
       #pass_color = easyxf('pattern: pattern solid, fore_colour green')
       wbR = xlrd.open_workbook(Locaitonfile)
       ws1 = wbR.sheet_by_name('Summary')
       ws = wbR.sheet_by_name('Validations')
       row_count = ws.nrows
       print row_count
       wbW = copy(wbR)       
       wbW.get_sheet(1).write(row_count,0,status)
       wbW.get_sheet(1).write(row_count,1,tc_no)
       wbW.get_sheet(1).write(row_count,2,exp_result)
       wbW.get_sheet(1).write(row_count,3,act_result)
       wbW.get_sheet(1).write(row_count,4,comment)
       wbW.get_sheet(1).col(0).width = 6000
       wbW.get_sheet(1).col(1).width = 4000
       wbW.get_sheet(1).col(2).width = 20000
       wbW.get_sheet(1).col(3).width = 20000
       wbW.get_sheet(1).col(4).width = 20000
       wbW.save(Locaitonfile)
       summary(Locaitonfile)
Пример #16
0
    def write(self, tpl, dest_file, tuple_args):
        '''
        Args:
          tuple_args: 从数据库获取的结果集(list)
          dest_file: 生成的excel文件名
          tpl: excel模板
        '''

        # 单元格样式
        style = xlwt.XFStyle()
        font = xlwt.Font()
        font.name = 'SimSun' #设置字体
        style.font = font
        tpl_book = open_workbook(tpl, formatting_info=True)
        target = copy(tpl_book)
        i = 0
        for arg in tuple_args:
            sheet = target.get_sheet(i) # a writable copy
            i += 1
            size = len(arg)
            if size == 0:
                continue
            cols = len(arg[0])
            for row in xrange(0,size):
                for col in xrange(0, cols):
                    sheet.write(row + 1, col, arg[row][col], style)
        target.save(dest_file)
Пример #17
0
 def make_excel(self, fn_template):
     """ Export report to .xls file fn_save. Variables found in fn_template
     are filled in.
     fn_template should have Python-style format strings in cells that
     should be filled in, e.g. {TiedotNimi} would fill the cell using
     the corresponding key in self.data.
     fn_template must be in .xls (not xlsx) format, since formatting info
     cannot be read from xlsx (xlutils limitation).
     xlrd and friends are weird, so this code is also weird. """
     rb = open_workbook(fn_template, formatting_info=True)
     wb = copy(rb)
     r_sheet = rb.sheet_by_index(0)
     w_sheet = wb.get_sheet(0)
     # loop through cells, conditionally replace fields with variable names.
     # for unclear reasons, wb and rb are very different structures,
     # so we read from rb and write to corresponding cells of wb
     # (using the hacky methods above)
     for row in range(r_sheet.nrows):
         for col in range(r_sheet.ncols):
             cl = r_sheet.cell(row, col)
             varname = cl.value
             if varname:  # format non-empty cells
                 newval = self._cond_format(varname, self.data)
                 # apply replacement dict only if formatting actually did
                 # something. this is to avoid changing text-only cells.
                 if newval != varname:
                     for str, newstr in (iter(self.cell_postprocess_dict.
                                         items())):
                         if str in newval:
                             newval = newval.replace(str, newstr)
                 _setOutCell(w_sheet, col, row, newval)
     return wb
Пример #18
0
    def writeXls(self,book):
        print "write xls ..."
        #sheet=book
        #r_xls = xlrd.open_workbook(filename) 
        sheet_reader = book.sheet_by_index(5)
        w_xls = copy(book) 
        sheet_write = w_xls.get_sheet(5) 
        cols = sheet_reader.ncols 
        if sheet_reader.nrows > 0 and sheet_reader.ncols > 0:
            #for row in range(7, r_sheet.nrows):
            for row in range(7, sheet_reader.nrows ):    
                if len(sheet_reader.cell(row, 0).value)!=12:
                    continue
                banxue=self.findBanxue(sheet_reader.cell(row, 0).value)
                #print banxue.decode('utf-8')
                sheet_write.write(row, 15, banxue.decode('utf-8')) 

        sheet_reader = book.sheet_by_index(8)
        sheet_write = w_xls.get_sheet(8) 
  

        if sheet_reader.nrows > 0 and sheet_reader.ncols > 0:
            #for row in range(7, r_sheet.nrows):
            for row in range(7, sheet_reader.nrows ):    
                if len(sheet_reader.cell(row, 0).value)<=0:
                    continue
                jincai=self.findJincai(sheet_reader.cell(row, 0).value)
                #print banxue.decode('utf-8')
                sheet_write.write(row, 21, jincai.decode('utf-8')) 


        w_xls.save(self.outputFile)
Пример #19
0
 def __init__(self, path):
     # save template sheet setup to avoid setup re-construct. This is for speed up
     self._sheet_setup= {}
     self._rb = xlrd.open_workbook(path,formatting_info=True)
     # save explicit (name:sheet_pos) reflection to which will be use in post
     self._sheet_arrange = self._get_sheet_arrange(self._rb)
     self._wb = copy(self._rb)
Пример #20
0
def addItems(filePath):
    if os.path.exists(filePath):
        readBook = xlrd.open_workbook(filePath)
        table = readBook.sheets()[0]
        rows = table.nrows
        cols = table.ncols
        # 文件名称
        fileID = None
        writeBook = copy(readBook)

        for row in range(rows):
            cik = table.cell(row, 0).value
            acceptanceDateTime = table.cell(row, 2).value[:4]
            print acceptanceDateTime, cik , row
            jsonDic = getItems(acceptanceDateTime, cik)

            idx = 13
            try:  # 公司名
                if jsonDic['companyName'] != None:
                    writeBook.get_sheet(0).write(row, idx, jsonDic['companyName'])
            except:
                pass
            # try:  # 公司ID
            #     if jsonDic['period'] != None:
            #         writeBook.get_sheet(0).write(row, idx + 1, jsonDic['period'])
            # except:
            #     pass
            writeBook.save(filePath)
        print '写入成功。。。'
Пример #21
0
def new_col(path,newpath,val1,val2,
            sheet_index=None,sheet_name=None,
            row1=None, col1=None):
    logger.info(str(('new column for sheet', sheet_index, val1, val2)))
    # open our xls file, there's lots of extra default options in this call, 
    # for logging etc. take a look at the docs
    book = xlrd.open_workbook(path) 

    worksheet = None
    if sheet_name:
        worksheet = book.sheet_by_name(sheet_name)
        sheet_index = worksheet.number
    else:
        worksheet = book.sheet_by_index(sheet_index) 
    num_rows = worksheet.nrows
    num_cols = worksheet.ncols

    wb = copy(book)
    worksheet = wb.get_sheet(sheet_index) 

    start_col = col1 or num_cols
    start_row = row1 or 0

    worksheet.write(start_row,start_col,val1)
    for x in range(start_row+1,num_rows):
        #worksheet.write(x,start_col,val2)
        # NOTE: will lose chars in ascii encoding
        worksheet.write(x,start_col,smart_str(val2, 'ascii', errors='ignore'))    
    wb.save(newpath)   
    print 'wrote new workbook col to:', newpath, val1, val2
Пример #22
0
 def __init__(self, analysis, version = 'Unknown', template = template_name, output_fname = (os.getcwd() + os.sep + 'Data Sharing Initiative 1 Report.xls')):
     rb = xlrd.open_workbook(template, formatting_info=True)
     self.workbook = copy(rb)
     self.analysis = analysis
     self.no_of_datasets = len(analysis.datasetConfigs)
     self.output_fname = output_fname
     self.version = version
Пример #23
0
def write_to_excel(contents, file=config.OUT_FILE):
    print u'正在写入到文本中', contents[0]
    try:
        rb = xlrd.open_workbook(file)
        sheet = rb.sheets()[0]
        row = sheet.nrows
        wb = copy(rb)
        sheet = wb.get_sheet(0)
        count = 0
        name = contents[0]
        if not repeat_excel(name, file):
            for content in contents:
                sheet.write(row, count, content)
                count = count + 1
                wb.save(file)
                print u'已成功写入到文件', file, u'第', row + 1, u'行'
        else:
            print u'内容已存在, 跳过写入文件', file

    except IOError:
        print u'未找到该文件', file
        book = xlwt.Workbook(encoding='utf-8', style_compression=0)
        book.add_sheet('sheet1', cell_overwrite_ok=True)
        book.save(file)
        print u'已成功创建该文件', file
        write_to_excel(contents, file)
Пример #24
0
    def updateDB(self):
        print "正在写入数据库.....\n"
        rb = open_workbook(self.FileName)
        sheet = rb.sheets()[self.OutFileSheetNumber]
        wb = copy(rb)
        row = wb.get_sheet(self.OutFileSheetNumber).row(rb.sheets()[self.OutFileSheetNumber].nrows)
        row.write(0, self.dataTag)
        row.write(1, encode(self.Time))
        row.write(2, self.userid)
        row.write(3, encode(self.username))
        row.write(4, encode(self.Product))
        row.write(5, encode(self.Machine))
        row.write(6, encode(self.shift))
        for parm in self.ParmDict:
            for col in range(sheet.ncols):
                cellValue = sheet.cell(self.OutFileKeyRowNumber, col).value
                if cellValue == unicode(parm, "cp936"):
                    data = self.ParmDict[parm]
                    if type(data) == type(u"hh"):
                        writedata = data
                    else:
                        writedata = encode(data)
                    row.write(col, writedata)

        # 保存到文件
        wb.save(self.FileName)
        # 拷贝一份
        try:
            shutil.copyfile(self.FileName, self.BackupFileName)
        except IOError:
            print "备份文件只读"

        print "写入数据库完成!\n"
Пример #25
0
def write_excelfile(filePath, fileName, sheetName, xCell, yCell, inValue):
        sourceFile = os.path.join(filePath, fileName)
        wWorkBook = None
        sheetIndex = None
        
        if os.path.isfile(sourceFile):
                sWorkBook = xlrd.open_workbook(sourceFile)
                if sheetName in sWorkBook.sheet_names():
                        sheetIndex = sWorkBook.sheet_names().index(sheetName)
                else:
                        print "WRN - Sheet %s not exist!" % sheetName

                wWorkBook = copy(sWorkBook)
        else:
                print "ERR - Source excel %s is not exist!" % sourceFile
                print "INF - Create new excel named %s" % sourceFile
		
                wWorkBook = xlwt.Workbook(encoding = 'utf-8')

        if wWorkBook:
                if sheetIndex != None:
                        wSheet = wWorkBook.get_sheet(sheetIndex)
                        wSheet.write(int(xCell), int(yCell), inValue)
                else:
                        print "INF - Add new sheet named %s" % sheetName
                        wSheet = wWorkBook.add_sheet(sheetName, cell_overwrite_ok = True)
                        wSheet.write(int(xCell), int(yCell), inValue)

                wWorkBook.save(sourceFile)
                return True
        else:
                return False
Пример #26
0
def edit_file(filename, base_id=[]):
    font0 = xlwt.Font()
    font0.name = 'Times New Roman'
    font0.colour_index = 2  # 红色
    font0.bold = True

    style0 = xlwt.XFStyle()
    style0.font = font0

    style1 = xlwt.XFStyle()
    style1.num_format_str = 'YYYY/MM/DD'  # 对日期格式的处理

    rb = open_workbook(filename)
    wb = copy(rb)
    ws = wb.get_sheet(0)
    # table = rb.get_sheet()[0] #这个方法好像过时了,这里会报错
    table = rb.sheets()[0]
    for row_number in range(table.nrows):
        if row_number:
            if table.row_values(row_number)[0] in base_id:
                print xldate.xldate_as_datetime(table.row_values(row_number)[1], 0)
                ws.write(row_number, 0, table.row_values(row_number)[0], style0)  # 这个地方需要改一个颜色
            ws.write(row_number, 1, xldate.xldate_as_datetime(table.row_values(row_number)[1], 0),style1)  # 这个地方需要改一个颜色

    wb.save(filename)
    # wb.save('b' + filename)# 可以把文件保存为另外的名字,原文件不会改变
    print 'ok'
Пример #27
0
def dealwithFile(fileName):
    try:
        xls = xlrd.open_workbook(fileName)
        sheetNames = xls.sheet_names()
        # sheet = xls.sheet_by_name("Sheet1")
        for sheetName in sheetNames:
            try:
                sheetName1 = str(sheetName).upper().replace('SELECT ', '')
                print 'sheetName:' + sheetName1
                if 'SQL' == sheetName1:
                    continue
                workbook = xlrd.open_workbook(BaseDicPath + "/" + sheetName1 + ".xls")
                workbook_t = copy(workbook)
                sheet_t = workbook_t.add_sheet(str('Example'), cell_overwrite_ok=True)
                cur_sheet = xls.sheet_by_name(sheetName)
                for row in range(0, cur_sheet.nrows):
                    for col in range(0, cur_sheet.ncols):
                        sheet_t.write(row, col, cur_sheet.cell_value(row, col), style)

                workbook_t.save(BaseDicPath + "/" + sheetName1 + ".xls")
                print sheetName1, ' gen sucess'
            except Exception, e:
                print Exception, ":", e
    except Exception, e:
        print Exception, ":", e
Пример #28
0
def ucf_to_xls(ifile_ucf,ofile_xls,sheet_name):
    file_path = ofile_xls

    book = open_workbook(file_path,formatting_info=True)
    for index in range(book.nsheets):
	worksheet_name = book.sheet_by_index(index)
	if worksheet_name.name == sheet_name:
	    index_sheet_numb = index

    # use r_sheet if you want to make conditional writing to sheets
    #r_sheet = book.sheet_by_index(index_sheet_numb) # read only copy to introspect the file
    wb = copy(book) # a writable copy (can't read values out of this, only write to it)
    w_sheet = wb.get_sheet(index_sheet_numb) # sheet write within writable copy

    ucfMap = extractUCF(ifile_ucf)    # Name of file to 
    keylist = ucfMap.keys()
    keylist.sort()
    excelMapCoords = routeExcel()     

    i = 0
    for key in keylist:
	x,y = excelMapCoords[str(key)]
	changeCell(w_sheet,x,y,ucfMap[key]) # Write to Sheet without changing format
	#w_sheet.write(x,y,ucfMap[key])  # Write to sheet but changes format
	i += 1
    
    # Save .xls file into a diff file name
    wb.save(os.path.splitext(file_path)[-2]+"_rv"+os.path.splitext(file_path)[-1])
Пример #29
0
def new_row(path,newpath,val1,val2,
            sheet_index=None,sheet_name=None,
            row1=None, col1=None):
    logger.info(str(("new row for sheet",sheet_index, val1,val2)))
    # open our xls file, there's lots of extra default options in this call, 
    # for logging etc. take a look at the docs
    book = xlrd.open_workbook(path) 

    worksheet = None
    if sheet_name:
        worksheet = book.sheet_by_name(sheet_name)
        sheet_index = worksheet.number
    else:
        worksheet = book.sheet_by_index(sheet_index) 
    num_rows = worksheet.nrows

    wb = copy(book)
    # doesnt work: wb.encoding='utf-8'
    worksheet = wb.get_sheet(sheet_index) 
    
    start_col = col1 or 0
    start_row = row1 or num_rows
    worksheet.write(start_row,start_col,val1)
    # NOTE: will lose chars in ascii encoding
    worksheet.write(start_row,start_col+1,smart_str(val2, 'ascii', errors='ignore'))    
    wb.save(newpath)   
    print 'wrote new workbook row:', newpath, val1, val2
Пример #30
0
def useTemplate(rows):


    id_ROW = 6 # 0 based (subtract 1 from excel row number)
    col_name = 1
    col_description = 2
    col_avalaible = 3
    
    rb = open_workbook('./Book.xlt',formatting_info=True)

    r_sheet = rb.sheet_by_index(0) # read only copy to introspect the file
    wb = copy(rb) # a writable copy (I can't read values out of this, only write to it)
    w_sheet = wb.get_sheet(0) # the sheet to write to within the writable copy


    for row in rows:
      
        setOutCell(w_sheet, col_name, id_ROW, row['name'])
        setOutCell(w_sheet, col_description, id_ROW, row['description'])
        setOutCell(w_sheet, col_avalaible, id_ROW, row['available'])
        #w_sheet.write(id_ROW, col_name, row['name'])
        #w_sheet.write(id_ROW, col_description, row['description'])
        #w_sheet.write(id_ROW, col_avalaible, row['available'])
        id_ROW = id_ROW + 1

    output = StringIO.StringIO()
    wb.save(output)

    return output
Пример #31
0
path2 = '/home/exx/PycharmProjects/DLS_score'  #Path to pycharm
os.chdir(path)  #Heading to launcher path
shutil.copy('predicted_values.xlsx', path2)  #Copying file to pycharm directory
book = xlrd.open_workbook(
    'predicted_values.xlsx')  #Opening the output file from DLS score
shutil.copy(
    '/home/exx/Desktop/all/general-set-except-refined/experimental_pdb_binddata.xlsx',
    path2)  #Copying the experimental data from pdb bind to pycharm
os.chdir(path2)  #Setting path to pycharm and work from there
book2 = xlrd.open_workbook(
    'experimental_pdb_binddata.xlsx')  #Opening the experimental pdb bind data
worksheet1 = book.sheet_by_index(
    0)  #Setting the sheet to work from in the output Excel file
worksheet2 = book2.sheet_by_index(
    0)  #Setting the sheet to work from in pdb bind
book_out = copy(
    book)  #This command copies the original workbook so we can edit on it
sheet_out = book_out.get_sheet(0)  #We open the first sheet of our output file
id_list = []  #Creating a list for the pdb bind ID
id_list2 = []  #ID list from the predicted_value csv file
value_list = []  #Creating a list for the experimental values from pdb bind ID
for i in range(1, 4632, 1):  #Loop that reads all the 4632 IDs from the file
    id_list.append(worksheet2.cell_value(
        i, 0))  #Storing IDs from the file into the list
    value_list.append(
        worksheet2.cell_value(i, 1)
    )  #Storing values from the IDs, and making sure they match to each one of them

for j in range(
        1, 1692, 1
):  #In this case we use this for loop to walk through the 1692 IDs from the output file
    id_list2.append(worksheet1.cell_value(
from elasticsearch import Elasticsearch


import os
import sys
import xlrd
from xlrd import open_workbook
from xlutils.copy import copy
sys.path.append(os.getcwd())
from helk_info import HELK_IP

rb = open_workbook('output.xls')
wb = copy(rb)
sheet1 = rb.sheets()[0]
rowCount = sheet1.nrows
print(rowCount)
es = Elasticsearch(HELK_IP + ':9200')

doc = {
  "query": {
    "constant_score": {
      "filter": {
        "bool": {
          "must": [
            {
              "bool": {
                "should": [
                  {
                    "wildcard": {
                      "process_parent_path.keyword": "*\\\\wmiprvse.exe"
                    }
Пример #33
0
import numpy as np
import xlrd, xlwt
from xlutils.copy import copy
book = xlrd.open_workbook('test.xls')
sheet = book.sheet_by_index(0)
write_book = copy(book)
sheet1 = write_book.get_sheet(0)
sheet1.write(0, 8, 'Градиентный спуск с дроблением')
sheet1.write(1, 8, 'Номер итерации')
sheet1.write(1, 9, 'x')
sheet1.write(1, 10, 'y')
sheet1.write(1, 11, 'f(x,y)')
sheet1.write(1, 12, 'Шаг')
sheet1.write(1, 13, 'Количество вызовов функции')
sheet1.write(1, 14, 'Количество вызовов градиента')
sheet1.write(1, 15, 'Действительная часть градиента')
sheet1.write(1, 16, 'Мнимая часть градиента')
li = complex(0, 1)
polynomial = np.array([1, complex(8, 5), complex(5, 8)])


def pol_value(z, polynomial):
    value = complex(0, 0)
    for i in range(len(polynomial)):
        value = value * z + polynomial[i]
    return value


def abs_pol_value(z, polynomial):
    pol = pol_value(z, polynomial)
    return abs(pol) * abs(pol)
Пример #34
0
driver.get('https://www.okcis.cn/search/')  # 打开网页
for cookie in listCookies:
    driver.add_cookie(cookie)
driver.refresh()

# 尝试读取爬取历史
try:
    with open('./history.txt', 'r') as f:
        province_start = int(f.readline())
        city_start = int(f.readline())
        page_start = int(f.readline())
        item_start = int(f.readline())
        sheet_index = int(f.readline())
        print(province_start, city_start, page_start, item_start)
        f.close()
    workbook = copy(xlrd.open_workbook('./Result.xls'))
    worksheet = workbook.get_sheet(-1)  # 获取工作簿中所有表格中的的第一个表格
    print('成功读取历史记录')
    has_his = 1

except:
    print(sys.exc_info()[0])
    print('读取历史记录失败,初始化中')
    province_start = 0
    city_start = 0
    page_start = 0
    item_start = 0
    # Create a new Excel
    workbook = xlwt.Workbook(encoding='utf-8')
    has_his = 0
Пример #35
0
def write_to_excel(file_path, row, col, value):
    work_book = xlrd.open_workbook(file_path, formatting_info=False)
    write_to_work = copy(work_book)
    sheet_data = write_to_work.get_sheet(0)
    sheet_data.write(row, col, str(value))
    write_to_work.save(file_path)
Пример #36
0
def time_interval(XY, frame_rate, Y_half_lim, Y_fourth_lim, Y_third_lim, file_name, val, name, X0, Xn, Y0, Yn):
	Y1, Y2, Y3, Y4, Y5, Y6, Y7, Y8, Y9, Y10, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, XY_1_third, XY_2_third, XY_3_third, XY_4_third, XY_5_third, XY_6_third, XY_7_third, XY_8_third, XY_9_third, XY_10_third, empty = [],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]
	XY1_centre, XY1_boundary, XY2_centre, XY2_boundary, XY3_centre, XY3_boundary, XY4_centre, XY4_boundary, XY5_centre, XY5_boundary, XY6_centre, XY6_boundary, XY7_centre, XY7_boundary, XY8_centre, XY8_boundary, XY9_centre, XY9_boundary, XY10_centre, XY10_boundary = [],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]
	chunk_value = int(val/10)
	part1, part2, part3, part4, part5, part6, part7, part8, part9, part10 = (part for part in chunks((XY), chunk_value))
	
	for x, y in part1:
		if y>=Y_third_lim:
			XY_1_third.append((x,y))
		X1.append(x)
		Y1.append(y)
		V1, S1 = find_vel_dist(frame_rate, X1, Y1)	
		mean_V1 = np.mean(V1)
		sd_V1 = np.std(V1)		
		if x>=X0 and x<=Xn and y>=Y0 and y<=Yn:
			XY1_centre.append((x, y))
		else:
			XY1_boundary.append((x, y))

	for x, y in part2:
		if y>=Y_third_lim:
			XY_2_third.append((x,y))
		X2.append(x)
		Y2.append(y)
		V2, S2 = find_vel_dist(frame_rate, X2, Y2)	
		mean_V2 = np.mean(V2)
		sd_V2 = np.std(V2)
		if x>=X0 and x<=Xn and y>=Y0 and y<=Yn:
			XY2_centre.append((x, y))
		else:
			XY2_boundary.append((x, y))
	
	for x, y in part3:
		if y>=Y_third_lim:
			XY_3_third.append((x,y))
		X3.append(x)
		Y3.append(y)
		V3, S3 = find_vel_dist(frame_rate, X3, Y3)	
		mean_V3 = np.mean(V3)
		sd_V3 = np.std(V3)
		if x>=X0 and x<=Xn and y>=Y0 and y<=Yn:
			XY3_centre.append((x, y))
		else:
			XY3_boundary.append((x, y))
	
	for x, y in part4:
		if y>=Y_third_lim:
			XY_4_third.append((x,y))
		X4.append(x)
		Y4.append(y)
		V4, S4 = find_vel_dist(frame_rate, X4, Y4)	
		mean_V4 = np.mean(V4)
		sd_V4 = np.std(V4)
		if x>=X0 and x<=Xn and y>=Y0 and y<=Yn:
			XY4_centre.append((x, y))
		else:
			XY4_boundary.append((x, y))
	
	for x, y in part5:
		if y>=Y_third_lim:
			XY_5_third.append((x,y))
		X5.append(x)
		Y5.append(y)
		V5, S5 = find_vel_dist(frame_rate, X5, Y5)	
		mean_V5 = np.mean(V5)
		sd_V5 = np.std(V5)
		if x>=X0 and x<=Xn and y>=Y0 and y<=Yn:
			XY5_centre.append((x, y))
		else:
			XY5_boundary.append((x, y))
	
	for x, y in part6:
		if y>=Y_third_lim:
			XY_6_third.append((x,y))
		X6.append(x)
		Y6.append(y)
		V6, S6 = find_vel_dist(frame_rate, X6, Y6)	
		mean_V6 = np.mean(V6)
		sd_V6 = np.std(V6)
		if x>=X0 and x<=Xn and y>=Y0 and y<=Yn:
			XY6_centre.append((x, y))
		else:
			XY6_boundary.append((x, y))
	
	for x, y in part7:
		if y>=Y_third_lim:
			XY_7_third.append((x,y))
		X7.append(x)
		Y7.append(y)
		V7, S7 = find_vel_dist(frame_rate, X7, Y7)	
		mean_V7 = np.mean(V7)
		sd_V7 = np.std(V7)
		if x>=X0 and x<=Xn and y>=Y0 and y<=Yn:
			XY7_centre.append((x, y))
		else:
			XY7_boundary.append((x, y))
	
	for x, y in part8:
		if y>=Y_third_lim:
			XY_8_third.append((x,y))
		X8.append(x)
		Y8.append(y)
		V8, S8 = find_vel_dist(frame_rate, X8, Y8)	
		mean_V8 = np.mean(V8)
		sd_V8 = np.std(V8)
		if x>=X0 and x<=Xn and y>=Y0 and y<=Yn:
			XY8_centre.append((x, y))
		else:
			XY8_boundary.append((x, y))
	
	for x, y in part9:
		if y>=Y_third_lim:
			XY_9_third.append((x,y))
		X9.append(x)
		Y9.append(y)
		V9, S9 = find_vel_dist(frame_rate, X9, Y9)	
		mean_V9 = np.mean(V9)
		sd_V9 = np.std(V9)
		if x>=X0 and x<=Xn and y>=Y0 and y<=Yn:
			XY9_centre.append((x, y))
		else:
			XY9_boundary.append((x, y))
	
	for x, y in part10:
		if y>=Y_third_lim:
			XY_10_third.append((x,y))
		X10.append(x)
		Y10.append(y)
		V10, S10 = find_vel_dist(frame_rate, X10, Y10)	
		mean_V10 = np.mean(V10)
		sd_V10 = np.std(V10)
		if x>=X0 and x<=Xn and y>=Y0 and y<=Yn:
			XY10_centre.append((x, y))
		else:
			XY10_boundary.append((x, y))

	time_1_third = (float(time(XY_1_third, frame_rate))/60)*100
	time_2_third = (float(time(XY_2_third, frame_rate))/60)*100
	time_3_third = (float(time(XY_3_third, frame_rate))/60)*100
	time_4_third = (float(time(XY_4_third, frame_rate))/60)*100
	time_5_third = (float(time(XY_5_third, frame_rate))/60)*100
	time_6_third = (float(time(XY_6_third, frame_rate))/60)*100
	time_7_third = (float(time(XY_7_third, frame_rate))/60)*100
	time_8_third = (float(time(XY_8_third, frame_rate))/60)*100
	time_9_third = (float(time(XY_9_third, frame_rate))/60)*100
	time_10_third = (float(time(XY_10_third, frame_rate))/60)*100

#latencies: works
	latency1_1, latency1_2 = latencies(Y_half_lim, part1, frame_rate)
	latency2_1, latency2_2 = latencies(Y_half_lim, part2, frame_rate)
	latency3_1, latency3_2 = latencies(Y_half_lim, part3, frame_rate)
	latency4_1, latency4_2 = latencies(Y_half_lim, part4, frame_rate)
	latency5_1, latency5_2 = latencies(Y_half_lim, part5, frame_rate)
	latency6_1, latency6_2 = latencies(Y_half_lim, part6, frame_rate)
	latency7_1, latency7_2 = latencies(Y_half_lim, part7, frame_rate)
	latency8_1, latency8_2 = latencies(Y_half_lim, part8, frame_rate)
	latency9_1, latency9_2 = latencies(Y_half_lim, part9, frame_rate)
	latency10_1, latency10_2 = latencies(Y_half_lim, part10, frame_rate)

#transitions: works
	tran1 = find_transitions(Y1, Y_half_lim)
	tran2 = find_transitions(Y2, Y_half_lim)
	tran3 = find_transitions(Y3, Y_half_lim)
	tran4 = find_transitions(Y4, Y_half_lim)
	tran5 = find_transitions(Y5, Y_half_lim)
	tran6 = find_transitions(Y6, Y_half_lim)
	tran7 = find_transitions(Y7, Y_half_lim)
	tran8 = find_transitions(Y8, Y_half_lim)
	tran9 = find_transitions(Y9, Y_half_lim)
	tran10 = find_transitions(Y10, Y_half_lim)	

#darting: works
	darts1 = find_darts(V1, mean_V1, sd_V1)
	darts2 = find_darts(V2, mean_V2, sd_V2)
	darts3 = find_darts(V3, mean_V3, sd_V3)
	darts4 = find_darts(V4, mean_V4, sd_V4)
	darts5 = find_darts(V5, mean_V5, sd_V5)
	darts6 = find_darts(V6, mean_V6, sd_V6)
	darts7 = find_darts(V7, mean_V7, sd_V7)
	darts8 = find_darts(V8, mean_V8, sd_V8)
	darts9 = find_darts(V9, mean_V9, sd_V9)
	darts10 = find_darts(V10, mean_V10, sd_V10)

#time centre/wall: works
	time_centre1 = float(time(XY1_centre, frame_rate))
	time_boundary1 = float(time(XY1_boundary, frame_rate))
	time_centre_perc1 = (time_centre1/(time_centre1+time_boundary1))*100
	time_boundary_perc1 = 100 - time_centre_perc1
	time_centre2 = float(time(XY2_centre, frame_rate))
	time_boundary2 = float(time(XY2_boundary, frame_rate))
	time_centre_perc2 = (time_centre2/(time_centre2+time_boundary2))*100
	time_boundary_perc2 = 100 - time_centre_perc2
	time_centre3 = float(time(XY3_centre, frame_rate))
	time_boundary3 = float(time(XY3_boundary, frame_rate))
	time_centre_perc3 = (time_centre3/(time_centre3+time_boundary3))*100
	time_boundary_perc3 = 100 - time_centre_perc3
	time_centre4 = float(time(XY4_centre, frame_rate))
	time_boundary4 = float(time(XY4_boundary, frame_rate))
	time_centre_perc4 = (time_centre4/(time_centre4+time_boundary4))*100
	time_boundary_perc4 = 100 - time_centre_perc4
	time_centre5 = float(time(XY5_centre, frame_rate))
	time_boundary5 = float(time(XY5_boundary, frame_rate))
	time_centre_perc5 = (time_centre5/(time_centre5+time_boundary5))*100
	time_boundary_perc5 = 100 - time_centre_perc5
	time_centre6 = float(time(XY6_centre, frame_rate))
	time_boundary6 = float(time(XY6_boundary, frame_rate))
	time_centre_perc6 = (time_centre6/(time_centre6+time_boundary6))*100
	time_boundary_perc6 = 100 - time_centre_perc6
	time_centre7 = float(time(XY7_centre, frame_rate))
	time_boundary7 = float(time(XY7_boundary, frame_rate))
	time_centre_perc7 = (time_centre7/(time_centre7+time_boundary7))*100
	time_boundary_perc7 = 100 - time_centre_perc7
	time_centre8 = float(time(XY8_centre, frame_rate))
	time_boundary8 = float(time(XY8_boundary, frame_rate))
	time_centre_perc8 = (time_centre8/(time_centre8+time_boundary8))*100
	time_boundary_perc8 = 100 - time_centre_perc8
	time_centre9 = float(time(XY9_centre, frame_rate))
	time_boundary9 = float(time(XY9_boundary, frame_rate))
	time_centre_perc9 = (time_centre9/(time_centre9+time_boundary9))*100
	time_boundary_perc9 = 100 - time_centre_perc9
	time_centre10 = float(time(XY10_centre, frame_rate))
	time_boundary10 = float(time(XY10_boundary, frame_rate))
	time_centre_perc10 = (time_centre10/(time_centre10+time_boundary10))*100
	time_boundary_perc10 = 100 - time_centre_perc10


	times = [file_name, time_1_third, time_2_third, time_3_third, time_4_third, time_5_third, time_6_third, time_7_third, time_8_third, time_9_third, time_10_third, empty]
	lat1 = [file_name, latency1_1, latency2_1, latency3_1, latency4_1, latency5_1, latency6_1, latency7_1, latency8_1, latency9_1, latency10_1]
	lat2 = [file_name, latency1_2, latency2_2, latency3_2, latency4_2, latency5_2, latency6_2, latency7_2, latency8_2, latency9_2, latency10_2]
	tran = [file_name, tran1, tran2, tran3, tran4, tran5, tran6, tran7, tran8, tran9, tran10]
	dart = [file_name, darts1, darts2, darts3, darts4, darts5, darts6, darts7, darts8, darts9, darts10]
	time_wall = [file_name, time_boundary_perc1, time_boundary_perc2, time_boundary_perc3, time_boundary_perc4, time_boundary_perc5, time_boundary_perc6, time_boundary_perc7, time_boundary_perc8, time_boundary_perc9, time_boundary_perc10]
	time_centre = [file_name, time_centre_perc1, time_centre_perc2, time_centre_perc3, time_centre_perc4, time_centre_perc5, time_centre_perc6, time_centre_perc7, time_centre_perc8, time_centre_perc9, time_centre_perc10]

	with open('raw/time_calc.txt', "a") as f:
		writer = csv.writer(f, delimiter=',')
		writer.writerow(times)

	with open('raw/lat1.txt', 'a') as f:
		writer = csv.writer(f, delimiter=',')
		writer.writerow(lat1)

	with open('raw/lat2.txt', 'a') as f:
		writer = csv.writer(f, delimiter=',')
		writer.writerow(lat2)

	with open('raw/tran.txt', 'a') as f:
		writer = csv.writer(f, delimiter=',')
		writer.writerow(tran)

	with open('raw/dart.txt', 'a') as f:
		writer = csv.writer(f, delimiter=',')
		writer.writerow(dart)
	with open('raw/time_wall.txt', 'a') as f:
		writer = csv.writer(f, delimiter=',')
		writer.writerow(time_wall)
	with open('raw/time_centre.txt', 'a') as f:
		writer = csv.writer(f, delimiter=',')
		writer.writerow(time_centre)

	data2 = []
	with open('raw/time_calc.txt', 'r') as f:
		for line in f:
			data2.append([word for word in line.split(",") if word])
	data3 = []
	with open('raw/lat1.txt', 'r') as f:
		for line in f:
			data3.append([word for word in line.split(",") if word])
	data4 = []
	with open('raw/lat2.txt', 'r') as f:
		for line in f:
			data4.append([word for word in line.split(",") if word])
	data5 = []
	with open('raw/tran.txt', 'r') as f:
		for line in f:
			data5.append([word for word in line.split(",") if word])
	data6 = []
	with open('raw/dart.txt', 'r') as f:
		for line in f:
			data6.append([word for word in line.split(",") if word])
	data7 = []
	with open('raw/time_wall.txt', 'r') as f:
		for line in f:
			data7.append([word for word in line.split(",") if word])
	data8 = []
	with open('raw/time_centre.txt', 'r') as f:
		for line in f:
			data8.append([word for word in line.split(",") if word])

# write new sheet in xls:
	rb = open_workbook("output/Extracted_data_" + name + ".xls")
	wb = copy(rb)
	sheet2 = wb.add_sheet("Time in bottom third")
	for row_index in range(len(data2)):
		for col_index in range(len(data2[row_index])):
			sheet2.write(row_index, col_index, float_if_possible(data2[row_index][col_index]))
	sheet3 = wb.add_sheet("Latency for first entry")
	for row_index in range(len(data3)):
		for col_index in range(len(data3[row_index])):
			sheet3.write(row_index, col_index, float_if_possible(data3[row_index][col_index]))
	sheet4 = wb.add_sheet("Latency for second entry")
	for row_index in range(len(data4)):
		for col_index in range(len(data4[row_index])):
			sheet4.write(row_index, col_index, float_if_possible(data4[row_index][col_index]))
	sheet5 = wb.add_sheet("Transitions")
	for row_index in range(len(data5)):
		for col_index in range(len(data5[row_index])):
			sheet5.write(row_index, col_index, float_if_possible(data5[row_index][col_index]))
	sheet6 = wb.add_sheet("Darting")
	for row_index in range(len(data6)):
		for col_index in range(len(data6[row_index])):
			sheet6.write(row_index, col_index, float_if_possible(data6[row_index][col_index]))
	sheet7 = wb.add_sheet("%time in boundary")
	for row_index in range(len(data7)):
		for col_index in range(len(data7[row_index])):
			sheet7.write(row_index, col_index, float_if_possible(data7[row_index][col_index]))
	sheet8 = wb.add_sheet("%time in centre")
	for row_index in range(len(data8)):
		for col_index in range(len(data8[row_index])):
			sheet8.write(row_index, col_index, float_if_possible(data8[row_index][col_index]))

	wb.save("output/Extracted_data_" + name + ".xls")
Пример #37
0
    def get_pic(self):
        data = xlrd.open_workbook(tablename + '.xls', 'r')  # 打开xls文件
        table = data.sheets()[0]  # 打开第一张表
        i = table.nrows  #上一次爬到的表1的行数
        i1 = 0
        used = 0
        rb = open_workbook(tablename + '.xls', 'utf-8')
        wb = copy(rb)  # 将上一次爬到的复制到新表里,并在新表里继续添加纪录
        # 通过get_sheet()获取的sheet有write()方法
        ws = wb.get_sheet(0)
        p = 1
        for num in range(p, p + page):
            # 这里的num是页码 ,url需要在所需爬取的页面二次加载复制
            web_url='http://kns.cnki.net/kns/brief/brief.aspx?curpage=%s&RecordsP' \
                    'erPage=50&QueryID=3&ID=&turnpage=1&tpagemode=L&dbPrefix=SCDB&Fields=' \
                    '&DisplayMode=listmode&PageName=ASP.brief_default_result_aspx&isinEn=1#J_ORDER&'% num

            # 这里开始是时间控制
            t = int(time.clock())
            useTime = t - used
            # 如果一个周期的时间使用太短,则等待一段时间
            # 主要用于防止被禁
            if (useTime < 120 and useTime > 10):
                print("useTime=%s" % useTime)
                whiteTime = 120 - useTime
                print("等待%s秒" % whiteTime)
                time.sleep(whiteTime)
            used = int(time.clock())
            print(useTime)
            print('开始网页get请求')
            r = self.request(web_url)
            # 这里是报错的解释,能知道到底是因为什么不能继续爬了
            yan = re.search(r'参数错误', r.text)
            if yan != None:
                print("参数")
                break
            yan = re.search(r'验证码', r.text)
            if yan != None:
                print("验证")
                break

            #这里开始使用正则抓列表里每一个文献的url
            soup = re.findall(r'<TR([.$\s\S]*?)</TR>', r.text)
            # print(r.text)
            for a in soup:
                i1 += 1
                name = re.search(r'_blank.*<', a)
                name = name.group()[8:-1]
                name = re.sub(r'<font class=Mark>', '', name)
                name = re.sub(r'</font>', '', name)

                time_his = re.search(
                    r'([1-2][0-9][0-9][0-9]\-([0-1][0-9])\-([0-9][0-9]))', a)
                time_his = time_his.group()
                url = re.search(r'href=.*? ', a)  #将’‘看做一个子表达式,惰性匹配一次就可以了
                url = url.group()

                # 将爬来的相对地址,补充为绝对地址
                url = "http://kns.cnki.net/KCMS/" + url[11:-2]  #数字是自己数的。。。

                #下面是参考文献详情的URL
                FN = re.search(r'FileName.*?&', url)
                if FN != None:
                    FN = re.search(r'FileName.*?&', url).group()
                DN = re.search(r'DbName.*?&', url)
                if DN != None:
                    DN = re.search(r'DbName.*?&', url).group()
                DC = re.search(r'DbCode.*?&', url).group()
                DUrl = "http://kns.cnki.net/KCMS/detail/frame/list.aspx?%s%s%sRefType=1" % (
                    FN, DN, DC)
                R = self.request(DUrl)
                #如果没有参考文献,则认为是劣质文献,不爬,转爬下一篇
                isR = re.search(r'参考文献', R.text)
                if i1 == 1:
                    print("name:%s" % name)
                if isR == None:
                    continue
                d = self.request(url).text
                type = re.search(r'"\).html\(".*?"', d)
                type = type.group()[9:-1]
                ins = re.search(r'TurnPageToKnet\(\'in\',\'.*?\'', d)
                if ins == None:
                    continue
                ins = ins.group()[21:-1]
                wt = re.findall(r'TurnPageToKnet\(\'au\',\'.*?\'', d)
                writer = ""
                for w in wt:
                    writer = writer + "," + w[21:-1]
                writer = writer[1:]

                #文献摘要
                summary = re.search(r'(?<=name="ChDivSummary">).+?(?=</span>)',
                                    d)
                summary = summary.group()

                ws.write(i, 0, name)  #文献名
                ws.write(i, 1, writer)  #作者名
                ws.write(i, 2, type)  #文献类别
                ws.write(i, 17, summary)  #摘要

                ws.write(i, 16, time_his)  #发表时间

                # 期刊 以及是否为核心期刊
                sourinfo = re.search(r'sourinfo([.$\s\S]*?)</div', d)
                if sourinfo != None:
                    sourinfo = sourinfo.group()
                    # print(sourinfo)
                    from_ = re.search(r'title.*</a', sourinfo).group()
                    from_ = re.sub(r'title">.*?>', '', from_)
                    from_ = re.sub(r'</a', '', from_)
                    ws.write(i, 3, from_)
                    core = re.search(r'中文核心期刊', sourinfo)
                    if core != None:
                        # print(core.group())
                        ws.write(i, 4, "中文核心期刊")

                # 这里是文献的来源基金
                fund = re.search(r'TurnPageToKnet\(\'fu\',\'.*?\'', d)
                if fund != None:
                    fund = fund.group()[21:-1]
                    ws.write(i, 5, fund)

                # 这里是文献的关键词,最多可以记录8个关键词
                kw = re.findall(r'TurnPageToKnet\(\'kw\',\'.*?\'', d)
                tnum = 0
                for tkw in kw:
                    tnum += 1
                    tkw = tkw[21:-1]
                    if tnum > 8:
                        break
                    ws.write(i, 5 + tnum, tkw)

                i += 1  # 增加页码的计数
        wb.save(tablename + '.xls')  #
Пример #38
0
headers = {"user-agent": "Mizilla/5.0"}

class_0_title = []
class_0_val = []
url_0 = "https://www.wdzj.com/dangan/"
res_0 = requests.get(url_0, headers=headers)
res_0.encoding = "utf-8"
class_0 = BeautifulSoup(res_0.text, "html.parser").select(
    "[data-tar='businessTypeOr'] .searchFontN a[class!='unlimitedN on']")
for zz in range(0, len(class_0)):
    class_0_title.append(class_0[zz].text)
    class_0_val.append(class_0[zz].attrs["data-val"])
print(class_0_title)
print(class_0_val)
data = xlrd.open_workbook("data1.xls")
data_new = copy(data)
ws = data_new.get_sheet(0)
_a = 0
for i in class_0_title:
    ws.write(0, _a, i)
    _a += 4

for cwx in range(0, len(class_0_val)):
    final1 = []
    final2 = []
    final3 = []
    for i in range(1, 20):  # 根据因子迭代
        url = '%s%s%s%s' % ("https://www.wdzj.com/dangan/search?filter=e1-",
                            class_0_val[cwx], "&show=1&currentPage=", i)
        res = requests.get(url, headers=headers)
        res.encoding = "utf-8"
Пример #39
0
def calculateRouge(hyp_Path, ref_Path, option):
    """
    计算PIO信息与原文中每个句子的相似度
    :param hyp_Path:原文所在文件夹
    :param ref_Path:SR中PIO存储的json文件
    :option {'textOriginal','textStem','textExcludeStopWord'}
    :return: 结果存储在原文所在文件的sheet2中
    """
    with open(ref_Path, 'r') as load_f:
        pio_json = json.load(load_f)
    for pio in pio_json['content']:
        title = pio['Title']
        # PIO信息分别为, pio['Participants']  pio['Interventions'] pio['Outcomes']
        exist = False  # 标记pio参考文献在原文文件夹中是否存在
        year = 2001
        if len(title.split(' ')) < 2:
            year = 2001
        if len(title.split(' ')) > 2:
            year = 2001
        if len(title.split(' ')) == 2:
            year = int(title.split(' ')[1][0:4])
        if year >= 2000:  # 去掉2000年前的论文
            path_list = os.listdir(hyp_Path)
            for filename in path_list:  # 在文件夹中查找与该参考文献对应的原文标题
                str = filename.split('_')
                if str[0] == pio['Title'] and filename.endswith('.xls'):
                    exist = True
                    break
            if exist:  # 标记pio参考文献在原文文件夹中存在
                rd = xlrd.open_workbook(hyp_Path + '/' + filename)
                sheet = rd.sheet_by_index(0)  # 原文所在表格
                nrows = sheet.nrows
                wb = copy(rd)
                try:
                    sheet1 = wb.get_sheet(1)
                except Exception as err:
                    sheet1 = wb.add_sheet(
                        'ROUGE Matrix',
                        cell_overwrite_ok=True)  # 增加一个工作表,记录ROUGE矩阵
                sheet1.write_merge(
                    0, 0, 1, 9,
                    'P{rouge-1[f,p,r]}{rouge-2[f,p,r]}{rouge-l[f,p,r]}')
                sheet1.write_merge(
                    0, 0, 10, 18,
                    'I{rouge-1[f,p,r]}{rouge-2[f,p,r]}{rouge-l[f,p,r]}')
                sheet1.write_merge(
                    0, 0, 19, 27,
                    'O{rouge-1[f,p,r]}{rouge-2[f,p,r]}{rouge-l[f,p,r]}')
                rouge = Rouge()
                for i in range(0, nrows):
                    sheet1.write(i + 1, 0, i + 1)
                    tempStr = bytes.decode(
                        sheet.cell(i, 0).value.encode('utf-8'))

                    textOriginal = tempStr
                    textExcludeStopWord = excludeStopWords(tempStr)
                    textStem = lancaster_stemmer.stem(tempStr)

                    if option == 'textOriginal':
                        # 原文本与PIO相似度
                        score_p = rouge.get_scores(textOriginal,
                                                   pio['Participants'])
                        score_i = rouge.get_scores(textOriginal,
                                                   pio['Interventions'])
                        score_o = rouge.get_scores(textOriginal,
                                                   pio['Outcomes'])
                    if option == 'textStem':
                        # 提取词干后 文本与PIO相似度
                        score_p = rouge.get_scores(
                            textStem,
                            lancaster_stemmer.stem(pio['Participants']))
                        score_i = rouge.get_scores(
                            textStem,
                            lancaster_stemmer.stem(pio['Interventions']))
                        score_o = rouge.get_scores(
                            textStem, lancaster_stemmer.stem(pio['Outcomes']))
                    if option == 'textExcludeStopWord':
                        # 去除stop words后 文本与PIO相似度
                        score_p = rouge.get_scores(
                            textExcludeStopWord,
                            excludeStopWords(pio['Participants']))
                        score_i = rouge.get_scores(
                            textExcludeStopWord,
                            excludeStopWords(pio['Interventions']))
                        score_o = rouge.get_scores(
                            textExcludeStopWord,
                            excludeStopWords(pio['Outcomes']))

                    writeRouge(i + 1, 1, score_p, sheet1)
                    writeRouge(i + 1, 10, score_i, sheet1)
                    writeRouge(i + 1, 19, score_o, sheet1)
                if not os.path.exists(hyp_Path + '/' + option):
                    os.makedirs(hyp_Path + '/' + option)
                wb.save(hyp_Path + '/' + option + '/' +
                        filename.split('_')[0] + '_' + option + '.xls')
                print(filename + ' ROUGE Matrix has generated')
Пример #40
0
 def write_values(self, row, col, value):
     data = xlrd.open_workbook(self.file)
     data_copy = copy(data)
     sheet = data_copy.get_sheet(0)
     sheet.write(row, col, value)
     data_copy.save(self.file_dir)
Пример #41
0
    def testAtp(self):
        # 操作excel
        testCase = TestCase()
        excel = testCase.getAtpTestCase("atpcase.xls")
        sheet = excel.sheets()[0]
        nrows = sheet.nrows
        wb = copy(excel)
        ws = wb.get_sheet(0)
        amount = 0

        for i in range(1,nrows):
            url = sheet.cell(i,3).value
            data = sheet.cell(i,4).value
            if "sequence" in data :
                data = json.loads(data)
                data['sequence'] = random.randint(100000000, 999999999)
                if sheet.cell(i,1).value.encode("utf-8")  == 'sequence已存在':
                    data['sequence'] = '450639911'
                elif sheet.cell(i,1).value.encode("utf-8")  == 'sequence为空':
                    data['sequence'] = ""
                elif sheet.cell(i,1).value.encode("utf-8")  == '正确的还原':
                    atp = DB()
                    sql = "select SEQUENCE_ID as sequence from SKU_HOLD where hold_end_time = 1500000000 and status = '2' and ZONE_CODE = 1000 ORDER BY rand() limit 1;"
                    sequence = atp.atpQuery(sql)
                    for row in sequence :
                        sequence = row['sequence']
                    data['sequence'] = sequence
                elif sheet.cell(i,1).value.encode("utf-8") == '已经还原过':
                    data['sequence'] = 103066570
                elif sheet.cell(i,1).value.encode("utf-8") == '含有hold_id的还原':
                    # 链接数据库
                    atp = DB()
                    sql = "select SEQUENCE_ID as sequence ,HOLD_NO as hold_id from SKU_HOLD where hold_end_time = 1500000000 and status = '2' and ZONE_CODE = 1000 ORDER BY id desc limit 1;"
                    data = atp.atpQuery(sql)
                    for row in data:
                        #print row
                        data = row
                    data['channel'] = 1
                    #cursor.close()
                    #print json.dumps(data)
                elif sheet.cell(i,1).value.encode("utf-8") == 'hold_id为空':
                    # 链接数据库
                    atp = DB()
                    sql = "select SEQUENCE_ID as sequence from SKU_HOLD where hold_end_time = 1500000000 and status = '2' and ZONE_CODE = 1000 ORDER BY id desc limit 1;"
                    data = atp.atpQuery(sql)
                    #cursor.execute(sql)
                    #data = cursor.fetchall()
                    for row in data:
                        #print row
                        data = row
                    data['hold_id'] = ""
                    data['channel'] = 1
                    #cursor.close()
                    #print json.dumps(data)
                elif sheet.cell(i,1).value.encode("utf-8") == 'hold_id和sequence不对应':
                    atp = DB()
                    sql1 = "select SEQUENCE_ID as sequence from SKU_HOLD where hold_end_time = 1500000000 and status = '2' and ZONE_CODE = 1000 ORDER BY rand() limit 1;"
                    sql2 = "select HOLD_NO as hold_id from SKU_HOLD where hold_end_time = 1500000000 and status = '2' and ZONE_CODE = 1000 ORDER BY rand() limit 1;"
                    data1 = atp.atpQuery(sql1)
                    for row in data1 :
                        sequence = row['sequence']
                    data2 = atp.atpQuery(sql2)
                    for row in data2:
                        hold_id = row['hold_id']
                    data['sequence'] = sequence
                    data['hold_id'] = hold_id
                    #data['channel'] = 1
                ws.write(i,4,json.dumps(data))
                result = requests.post(self.host + url, headers = self.headers, data = json.dumps(data))
            elif sheet.cell(i,1).value.encode("utf-8") == '创建出货规则':
                data = json.loads(data)
                data['item_id'] = random.randint(100000, 999999)
                #print data['item_id']
                ws.write(i, 4, json.dumps(data))
                try:
                    result = requests.post(self.host + url, headers = self.headers, data = json.dumps(data))
                except Exception, e:
                    print Exception, ":", e
            else:
                try:
                    result = requests.post(self.host + url, headers = self.headers, data = data)
                #r = json.dumps(result.text)
                #print r
                except Exception, e:
                    print Exception, ":", e
Пример #42
0
		sheet.write(0, 17, 'no-Laser miss')
		sheet.write(0, 18, 'no-Laser FA')
		sheet.write(0, 19, 'no-Laser CR')
		sheet.write(0, 20, 'no-Laser d')
		sheet.write(0, 21, 'no-Laser b')
		sheet.write(0, 22, 'odor1 no-laser_lick')
		sheet.write(0, 23, 'odor2 no-laser_lick')
		sheet.write(0, 24, 'odor1 no-laser_firstlick(ms)')
		sheet.write(0, 25, 'odor2 no-laser_firstlick(ms)')
		book.save('go_nogo_result_laser_08.xls')

	for raw_file in filelist:
		if raw_file == 'go_nogo_result_laser_08.xls':
			continue
		oldwb = xlrd.open_workbook('go_nogo_result_laser_08.xls')
		newwb = copy(oldwb)
		sheet = newwb.get_sheet(0)
		old_sheet = oldwb.sheet_by_index(0)
		saved_files = old_sheet.col_values(0)

	#	print("================================================ start =======================================")

		odor_time = 100
		delay = 200
		action_time = 200
		relative_time = odor_time + delay + action_time

		print("================================================ start =======================================")

		filename = path + "/" + raw_file
		name, *_ = raw_file.split(".")
Пример #43
0
varTimeYMDHSM = datetime.datetime.now().strftime(
    '%Y%m%d%H%M%S')  # 获取当天日期时间,格式:20161020130318
# varTableDetails = "/Users/linghuchong/Downloads/51/Project/CJL/report/CJLtable" + varTimeYMDHSM + ".html"
# connMongo155 = MongoClient('192.168.2.155', 10005); db = connMongo155.sceneWeb  # mongodb
# connRedis166 = redis.StrictRedis(host='192.168.2.166', port=6379, db=0, password="******")  # redis CJL66
# connRedis167 = redis.StrictRedis(host='192.168.2.167', port=6380, db=0, password="******")  # redis CJL67
# connPersonal = MySQLdb.connect(host='192.168.2.164', user='******', passwd='Dlhy66506043', db='personal', port=3306, use_unicode=True)
# curPersonal = connPersonal.cursor();curPersonal.execute('SET NAMES utf8;');connPersonal.set_character_set('utf8');curPersonal.execute('show tables')
# connScenemsg = MySQLdb.connect(host='192.168.2.164', user='******', passwd='Dlhy66506043', db='scenemsg', port=3306, use_unicode=True)
# curScenemsg = connScenemsg.cursor();curScenemsg.execute('SET NAMES utf8;');connScenemsg.set_character_set('utf8');curScenemsg.execute('show tables')
# connSysparam = MySQLdb.connect(host='192.168.2.164', user='******', passwd='Dlhy66506043', db='sysparam', port=3306, use_unicode=True)
# curSysparam = connSysparam.cursor();curSysparam.execute('SET NAMES utf8;');connSysparam.set_character_set('utf8');curSysparam.execute('show tables')
# connUpload = MySQLdb.connect(host='192.168.2.164', user='******', passwd='Dlhy66506043', db='upload', port=3306, use_unicode=True)
# curUpload = connUpload.cursor();curUpload.execute('SET NAMES utf8;');connUpload.set_character_set('utf8');curUpload.execute('show tables')
bk = xlrd.open_workbook(varExcel, formatting_info=True)
newbk = copy(bk)
sheetMain = bk.sheet_by_name("main")
sheetTestCase = bk.sheet_by_name("testcase")
sheetArea = bk.sheet_by_name("area")
sheetCom = bk.sheet_by_name("com")
sheetSplit = bk.sheet_by_name("split")
styleRed = xlwt.easyxf('font: name Times New Roman, color-index red')
styleBlue = xlwt.easyxf('font: name Times New Roman, color-index blue')
styleGray25 = xlwt.easyxf('font: name Times New Roman, color-index gray25')


class dkdj(unittest.TestCase):
    @classmethod
    def setUpClass(self):

        # 获取手机制造商信息,如 adb shell getprop | grep "model\|version.sdk\|manufacturer\|hardware\|platform\|revision\|serialno\|product.name\|brand"
Пример #44
0
path_read1 = 'C:\\Users\\applee\Desktop\\食材清单.xlsx'
workbook_read1 = xlrd.open_workbook(path_read1)  # 打开读入文件
read1_sheet1 = workbook_read1.sheet_by_index(0)  # 读入文件1的sheet1
read1_sheet1_material = read1_sheet1.col(1)  #文件1中sheet1的第二列,食材
read1_sheet1_material_ID = read1_sheet1.col(0)  #文件1中sheet1的第二列,食材ID

path_read2 = 'C:\\Users\\applee\Desktop\\食谱-食材-评论.xlsx'
workbook_read2 = xlrd.open_workbook(path_read2)  # 打开读入文件
read2_sheet1 = workbook_read2.sheet_by_index(0)  # 读入文件1的sheet1
read2_sheet1_reptile = read2_sheet1.col(1)  #文件1中sheet1的食谱
read2_sheet1_enshrine = read2_sheet1.col(2)  #文件1中sheet1的食谱收藏次数

path_write1 = 'C:\\Users\\applee\Desktop\\食材清单B.xlsx'
workbook_write1 = xlrd.open_workbook(path_write1)  # 打开写入文件
workbook_write1_new = copy(workbook_write1)
#先复制一份Sheet然后再次基础上追加并保存到一份新的Excel文档中去
write_sheet1 = workbook_write1_new.get_sheet(0)  # 写入文件的sheet1
total_enshrine = 0  #某种食材收藏次数的总和
write_total_row = 1

for number_material in range(1, 476):

    read1_sheet1_material_element = read1_sheet1_material[
        number_material].value  #食材单元

    for number_cell_reptile_material in range(1, 13138):

        cell_reptile_material_element = read2_sheet1.cell(
            number_cell_reptile_material, 3).value  #文件1中sheet1的食谱食材
        read2_sheet1_enshrine_element = read2_sheet1_enshrine[
Пример #45
0
def importexcel_func(odoo_or_self_of_wizard,
                     import_key=False,
                     key_tram=False,
                     check_file=False,
                     gen_model_dict_kargs={},
                     is_in_transfer=None):
    print('***import_key', import_key)
    gen_model_dict_kargs['check_file'] = check_file
    self = odoo_or_self_of_wizard
    key_tram = getattr(self, 'key_tram', False) or key_tram
    new_dict = self.env['importexcel.importexcel'].gen_model_dict()
    import_key = import_key or self.import_key
    MD = new_dict[import_key]
    #     if 'cach_tim_location_goc' not in gen_model_dict_kargs:
    cach_tim_location_goc = getattr(self, 'cach_tim_location_goc', None)
    #     if cach_tim_location_goc:
    gen_model_dict_kargs['cach_tim_location_goc'] = cach_tim_location_goc
    if callable(MD):
        MD = MD(self=self,
                key_tram=key_tram,
                gen_model_dict_kargs=gen_model_dict_kargs,
                is_in_transfer=is_in_transfer)
    key_allow = MD.get('key_allow', False)
    key_tram = key_allow and key_tram
    if key_allow and not key_tram:
        raise UserError(u'ban phai chon key_tram')

#     rs = self.env['importexcel.importexcel'].gen_model_dict()
#     out_dict = {}
#     type_out_dict = {}
#     for k,MD_test in rs.items():
#         if callable(MD_test):
#             MD_test = MD_test(self=self, key_tram=key_tram, gen_model_dict_kargs=gen_model_dict_kargs)
#         key_allow = MD_test.get('key_allow',False)
#         export_all_key_list_vals_key_list_type_of_val(MD_test, output_key_list_vals_dict=out_dict, output_key_list_type_of_vals_dict=type_out_dict)
#     raise UserError(u'%s%s%s'%(str(out_dict), '***'*8, convert_dict_to_order_dict_string(type_out_dict)))

    if not self.file:
        raise UserError(u'Bạn phải upload file để import')
    filename = self.filename
    file_content = base64.decodestring(self.file)
    formatting_info = False if '.xlsx' in self.filename else True
    xl_workbook = xlrd.open_workbook(file_contents=file_content,
                                     formatting_info=formatting_info)
    noti_dict = {}

    #R1
    rut_gon_key(MD, key_tram)
    #R2
    ordereddict_fields(MD)
    #R2A
    #     check_val_of_attrs_is_true_type(MD)

    setting = MD.get('setting', {})
    setting.setdefault('allow_write', MD.get('allow_write', True))
    setting.setdefault('st_write_false', MD.get('st_write_false', False))
    setting2 = MD.get('setting2', {})
    if setting2:
        setting.update(setting2)
    self.setting = convert_dict_to_order_dict_string(setting)
    #R3
    add_more_attrs_to_field_MD(self, MD, setting=setting)

    imported_model = self.env[MD.get('model')]
    rs = imported_model.default_get([])  #imported_model._fields)
    all_field_attr_dict, dict_of_att_vs_set_vals = export_some_key_value_of_fields_MD(
        MD, exported_attrs_list=['allow_create'], dict_of_att_vs_set_vals={})
    self.all_field_attr_dict = all_field_attr_dict
    #     raise UserError(u'%s-%s'%(a,b))
    sheet_names = gen_sheet_names(self, MD, xl_workbook, gen_model_dict_kargs)
    needdata = {}
    needdata['self'] = self
    needdata['sheet_names'] = sheet_names
    needdata['file_name'] = filename
    needdata['key_tram'] = key_tram
    needdata['check_file'] = check_file
    is_admin = self.user_has_groups('base.group_erp_manager')
    needdata['is_admin'] = is_admin
    sh_names = xl_workbook.sheet_names()
    if check_file:
        workbook_copy = copy(xl_workbook)
    nrow = 0
    update_list = []

    initf = MD.get('initf')
    if initf:
        initf(self)
    for sheet_name in sheet_names:
        sheet_name = trust_sheet(sheet_name, sh_names)
        COPIED_MD = deepcopy(MD)
        needdata['vof_dict'] = COPIED_MD.get('fields')
        needdata['sheet_name'] = sheet_name
        sheet = xl_workbook.sheet_by_name(sheet_name)
        set_is_largest_map_row_choosing = MD.get(
            'set_is_largest_map_row_choosing'
        )  #set_is_largest_map_row_choosing  is boolean
        nrows = sheet.nrows
        title_rows = xac_dinh_title_rows(self, MD,
                                         set_is_largest_map_row_choosing,
                                         nrows, sheet_name)
        #R4
        row_title_index, largest_map_row, new_title_rows = define_col_index_common(
            title_rows, sheet, COPIED_MD, set_is_largest_map_row_choosing)
        #         if set_is_largest_map_row_choosing:
        #             row_title_index = largest_map_row
        #         raise UserError(str(new_title_rows))

        #R5
        check_compatible_col_index_and_xl_title(self, COPIED_MD, needdata)

        #tim kiem first_row,last_row

        dong_test_in_MD = MD.get('dong_test')
        first_row, last_row = gen_first_and_last_row(self, MD, row_title_index,
                                                     nrows, dong_test_in_MD)

        if check_file:
            index = sh_names.index(sheet_name)
            sheet_of_copy_wb = workbook_copy.get_sheet(index)
            write_get_or_create_title(MD, sheet, sheet_of_copy_wb,
                                      row_title_index)
        else:
            workbook_copy = None
            sheet_of_copy_wb = None
        merge_tuple_list = sheet.merged_cells
        obj_list = []

        for number_row_count, row in enumerate(range(first_row, last_row)):
            print('sheet_name*******', sheet_name, 'row', row)
            obj, obj_id = create_instance(self,
                                          COPIED_MD,
                                          sheet,
                                          row,
                                          merge_tuple_list,
                                          needdata,
                                          noti_dict,
                                          check_file=check_file,
                                          sheet_of_copy_wb=sheet_of_copy_wb,
                                          setting=setting,
                                          sheet_of_copy_wb_para={
                                              'sheet_of_copy_wb':
                                              sheet_of_copy_wb,
                                              'row': row,
                                              'sheet': sheet
                                          },
                                          f_name_call='main_call',
                                          update_list_haha=update_list)
            obj_list.append(obj)

        remove_out_item_func = MD.get('remove_out_item_func', None)
        if remove_out_item_func:
            remove_out_item_func(self, needdata, obj_list)

        if number_row_count:
            nrow = nrow + number_row_count + 1
    self.imported_number_of_row = nrow
    self.test4 = '\n'.join(map(lambda i: str(i), update_list))
    print('***update_list', update_list)
    self.log = noti_dict

    return workbook_copy
def drawonepng(nowdir, width=0.7, height=40):
    global docupath
    global nfile_path
    global area
    global Dark
    judge = 1
    for v in Dark:
        if v in nowdir:
            judge -= 1
            print("暗电流曲线")
    if '.csv' in nowdir and judge == 1:
        c = open(nowdir, "r")  #以r的方式打开csv文件
        read = csv.reader(c)
        nfile_path = nowdir.replace(".csv", ".xls")
        print("准备创建xls")
        file = xlwt.Workbook()  #创建一个工作簿
        table = file.add_sheet('sheet 1')  #创建一个工作表
        n = 0
        for line in read:
            if n > 46 and len(line) > 3:
                a = line[2]
                b = line[3]
                table.write(n - 47, 0, a)  #写入
                table.write(n - 47, 1, b)
            n += 1
        file.save(nfile_path)  #保存
        print(nfile_path + "创建成功!")
    '''if '.xls' in nfile_path and '汇总' not in nfile_path:
        data=xlrd.open_workbook(nfile_path)
        sheet1=data.sheet_by_index(0)
        if sheet1.nrows==600:
            a=0
            for i1 in range(600):
                if float(sheet1.cell(i1,0).value)>0 and a == 0:
                    Isc1 = float(sheet1.cell(i1,1).value) - (float(sheet1.cell(i1,1).value) - float(sheet1.cell(i1-1,1).value))/(float(sheet1.cell(i1,0).value) - float(sheet1.cell(i1-1,0).value)) * float(sheet1.cell(i1,0).value)
                    Isc = Isc1 / area*1000*(-1)
                    bkcopy=copy(data)
                    shcopy=bkcopy.get_sheet(0)
                    a+=1
            b=0
            for i2 in range(600):
                if float(sheet1.cell(i2,1).value)>0 and b == 0:
                    Voc = float(sheet1.cell(i2,0).value) - (float(sheet1.cell(i2,0).value) - float(sheet1.cell(i2-1,0).value))/(float(sheet1.cell(i2,1).value) - float(sheet1.cell(i2-1,1).value)) * float(sheet1.cell(i2,1).value) 
                    bkcopy=copy(data)
                    shcopy=bkcopy.get_sheet(0)
                    b+=1
            pmax=0
            for j in range(600):
                if float(sheet1.cell(j,0).value)*float(sheet1.cell(j,1).value)*(-1) > pmax:
                    pmax = float(sheet1.cell(j,0).value)*float(sheet1.cell(j,1).value)*(-1)
            eff = pmax/area*1000
            Pmax=Isc*Voc
            FF = eff/Pmax
            bkcopy=copy(data)
            shcopy=bkcopy.get_sheet(0)
            shcopy.write(1,6,Isc)
            shcopy.write(1,7,Voc)
            shcopy.write(1,8,FF)
            shcopy.write(1,9,eff)
            bkcopy.save(nfile_path)  #保存
            print("Isc=",format(Isc,".2f"),"mA/cm2 Voc=",format(Voc,".2f"),"V FF=",format(FF*100,".2f"),"% Eff=",format(eff,".2f"),"%")
            if Isc<0 or Isc>50 or Voc<0 or Voc>0.7 or FF<0 or FF>0.9 or eff<5 or eff>20:
                return
    data = xlrd.open_workbook(nfile_path)
    sheet1 = data.sheet_by_index(0)
    x = []
    y = []
    for i in range(600):
        x.append(float(sheet1.cell(i,0).value))
        y.append(-float(sheet1.cell(i,1).value)/area*1000)
    # 通过rcParams设置全局横纵轴字体大小
    matplotlib.pyplot.rcParams['font.sans-serif']=['Arial']
    matplotlib.rcParams['xtick.direction'] = 'in' 
    matplotlib.rcParams['ytick.direction'] = 'in' 
    matplotlib.rcParams['xtick.labelsize'] = 12
    matplotlib.rcParams['ytick.labelsize'] = 12
    font = {'family' : 'Arial',
        'color'  : 'black',
        'weight' : 'normal',
        'size'   : 16,
        }
    
    matplotlib.pyplot.figure('IV Curve',figsize=(6,4.5))
    ax = matplotlib.pyplot.subplot(1,1,1)
    ax.spines['bottom'].set_linewidth(1.3)
    ax.spines['left'].set_linewidth(1.3)
    ax.spines['top'].set_linewidth(1.3)        
    ax.spines['right'].set_linewidth(1.3)
    # 通过'k'指定线的颜色,lw指定线的宽度
    # 第三个参数除了颜色也可以指定线形,比如'r--'表示红色虚线
    # 更多属性可以参考官网:http://matplotlib.org/api/pyplot_api.html
    matplotlib.pyplot.plot(x, y, 'r', lw=1.5)
    matplotlib.pyplot.xlim(0, width)
    matplotlib.pyplot.xlabel('U (V)',fontdict=font)
    matplotlib.pyplot.ylim(0, height)
    matplotlib.pyplot.ylabel('I (mA/cm2)',fontdict=font)
    # scatter可以更容易地生成散点图
    #matplotlib.pyplot.scatter(x, y)
    annotate('area  = ' + str(format(area,".2f")+' cm2'),
             xy=(0, 0), xycoords='data',
             xytext=(+10, +110), textcoords='offset points', fontsize=12)    
    annotate('Isc  = ' + str(format(Isc,".2f")+' mA/cm2'),
             xy=(0, 0), xycoords='data',
             xytext=(+10, +85), textcoords='offset points', fontsize=12)
    annotate('Voc = ' + str(format(Voc,".2f")+'  V'),
             xy=(0, 0), xycoords='data',
             xytext=(+10, +60), textcoords='offset points', fontsize=12)
    annotate('FF   = ' + str(format(FF*100,".2f")+' %'),
             xy=(0, 0), xycoords='data',
             xytext=(+10, +35), textcoords='offset points', fontsize=12)
    annotate('Eff  = ' + str(format(eff,".2f")+'  %'),
             xy=(0, 0), xycoords='data',
             xytext=(+10, +10), textcoords='offset points', fontsize=12)
    matplotlib.pyplot.grid(False)
    # 将当前figure的图保存到文件
    # 将结果汇总
    if docupath:
        xlsfile_path = docupath+'/汇总.xls'
        if not os.path.exists(xlsfile_path):
            file = xlwt.Workbook()  #创建一个工作簿
            table = file.add_sheet('sheet 1')  #创建一个工作表
            table.write(0,0,'文件路径')
            table.write(0,1,'Jsc mA/cm2')
            table.write(0,2,'Voc V')
            table.write(0,3,'FF')
            table.write(0,4,'Eff %')
            table.write(0,5,'rsh')
            table.write(0,6,'rs')
            file.save(xlsfile_path)
            print('汇总文件'+xlsfile_path+'创建成功!') 
        else:
            print('汇总文件已存在')    
        
        bkall = xlrd.open_workbook(xlsfile_path) #获取表格中已有行数
        nxlsfile_path = xlsfile_path.replace(".xls",'')
        shall = bkall.sheet_by_index(0)
        irow = shall.nrows
        print("汇总表格中已有"+str(irow)+"行")
        bkcopyall=copy(bkall)
        shcopyall=bkcopyall.get_sheet(0)
        try:
            #style = xlwt.easyxf(num_format_str='#,##0.00')
            shcopyall.write(irow,0,nowdir)
            shcopyall.write(irow,1,Isc)
            shcopyall.write(irow,2,Voc)
            shcopyall.write(irow,3,FF)
            shcopyall.write(irow,4,eff)
            bkcopyall.save(xlsfile_path)  #保存
        except:
            print(nowdir+"汇总失败!")
    # 删除xls文档
    os.remove(nfile_path)
    print("xls文件删除成功!")
    
    nnfile_path = nowdir.replace(".csv",".png")
    print(nnfile_path+"图片保存成功!")
    matplotlib.pyplot.savefig(nnfile_path, bbox_inches='tight', dpi=150)
    return (nowdir,Isc,Voc,FF,eff)'''

    try:
        if '.xls' in nfile_path and '汇总' not in nfile_path:
            data = xlrd.open_workbook(nfile_path)
            sheet1 = data.sheet_by_index(0)

            if sheet1.nrows == 600:
                a = 0
                for i1 in range(600):
                    if float(sheet1.cell(i1, 0).value) > 0 and a == 0:
                        Isc1 = float(sheet1.cell(i1, 1).value) - (
                            float(sheet1.cell(i1, 1).value) -
                            float(sheet1.cell(i1 - 1, 1).value)) / (
                                float(sheet1.cell(i1, 0).value) -
                                float(sheet1.cell(i1 - 1, 0).value)) * float(
                                    sheet1.cell(i1, 0).value)
                        Isc = Isc1 / area * 1000 * (-1)
                        bkcopy = copy(data)
                        shcopy = bkcopy.get_sheet(0)
                        a += 1
                b = 0
                for i2 in range(600):
                    if float(sheet1.cell(i2, 1).value) > 0 and b == 0:
                        Voc = float(sheet1.cell(i2, 0).value) - (
                            float(sheet1.cell(i2, 0).value) -
                            float(sheet1.cell(i2 - 1, 0).value)) / (
                                float(sheet1.cell(i2, 1).value) -
                                float(sheet1.cell(i2 - 1, 1).value)) * float(
                                    sheet1.cell(i2, 1).value)
                        bkcopy = copy(data)
                        shcopy = bkcopy.get_sheet(0)
                        b += 1
                pmax = 0
                for j in range(600):
                    if float(sheet1.cell(j, 0).value) * float(
                            sheet1.cell(j, 1).value) * (-1) > pmax:
                        pmax = float(sheet1.cell(j, 0).value) * float(
                            sheet1.cell(j, 1).value) * (-1)
                eff = pmax / area * 1000
                Pmax = Isc * Voc
                FF = eff / Pmax
                bkcopy = copy(data)
                shcopy = bkcopy.get_sheet(0)
                shcopy.write(1, 6, Isc)
                shcopy.write(1, 7, Voc)
                shcopy.write(1, 8, FF)
                shcopy.write(1, 9, eff)
                bkcopy.save(nfile_path)  #保存
                print("Isc=", format(Isc, ".2f"), "mA/cm2 Voc=",
                      format(Voc, ".2f"), "V FF=", format(FF * 100, ".2f"),
                      "% Eff=", format(eff, ".2f"), "%")
                if Isc < 0 or Isc > 40 or Voc < 0 or Voc > 0.7 or FF < 0 or FF > 0.9 or eff < 5 or eff > 20:
                    return
        data = xlrd.open_workbook(nfile_path)
        sheet1 = data.sheet_by_index(0)
        x = []
        y = []
        for i in range(600):
            x.append(float(sheet1.cell(i, 0).value))
            y.append(-float(sheet1.cell(i, 1).value) / area * 1000)
        # 通过rcParams设置全局横纵轴字体大小
        matplotlib.pyplot.rcParams['font.sans-serif'] = ['Arial']
        matplotlib.rcParams['xtick.direction'] = 'in'
        matplotlib.rcParams['ytick.direction'] = 'in'
        matplotlib.rcParams['xtick.labelsize'] = 12
        matplotlib.rcParams['ytick.labelsize'] = 12
        font = {
            'family': 'Arial',
            'color': 'black',
            'weight': 'normal',
            'size': 16,
        }

        matplotlib.pyplot.figure('IV Curve', figsize=(6, 4.5))
        ax = matplotlib.pyplot.subplot(1, 1, 1)
        ax.spines['bottom'].set_linewidth(1.3)
        ax.spines['left'].set_linewidth(1.3)
        ax.spines['top'].set_linewidth(1.3)
        ax.spines['right'].set_linewidth(1.3)
        # 通过'k'指定线的颜色,lw指定线的宽度
        # 第三个参数除了颜色也可以指定线形,比如'r--'表示红色虚线
        # 更多属性可以参考官网:http://matplotlib.org/api/pyplot_api.html
        matplotlib.pyplot.plot(x, y, 'r', lw=1.5)
        matplotlib.pyplot.xlim(0, width)
        matplotlib.pyplot.xlabel('U (V)', fontdict=font)
        matplotlib.pyplot.ylim(0, height)
        matplotlib.pyplot.ylabel('I (mA/cm2)', fontdict=font)
        # scatter可以更容易地生成散点图
        #matplotlib.pyplot.scatter(x, y)
        annotate('area  = ' + str(format(area, ".2f") + ' cm2'),
                 xy=(0, 0),
                 xycoords='data',
                 xytext=(+10, +110),
                 textcoords='offset points',
                 fontsize=12)
        annotate('Isc  = ' + str(format(Isc, ".2f") + ' mA/cm2'),
                 xy=(0, 0),
                 xycoords='data',
                 xytext=(+10, +85),
                 textcoords='offset points',
                 fontsize=12)
        annotate('Voc = ' + str(format(Voc, ".2f") + '  V'),
                 xy=(0, 0),
                 xycoords='data',
                 xytext=(+10, +60),
                 textcoords='offset points',
                 fontsize=12)
        annotate('FF   = ' + str(format(FF * 100, ".2f") + ' %'),
                 xy=(0, 0),
                 xycoords='data',
                 xytext=(+10, +35),
                 textcoords='offset points',
                 fontsize=12)
        annotate('Eff  = ' + str(format(eff, ".2f") + '  %'),
                 xy=(0, 0),
                 xycoords='data',
                 xytext=(+10, +10),
                 textcoords='offset points',
                 fontsize=12)
        matplotlib.pyplot.grid(False)
        # 将当前figure的图保存到文件
        # 将结果汇总
        if docupath:
            xlsfile_path = docupath + '/汇总.xls'
            if not os.path.exists(xlsfile_path):
                file = xlwt.Workbook()  #创建一个工作簿
                table = file.add_sheet('sheet 1')  #创建一个工作表
                table.write(0, 0, '文件路径')
                table.write(0, 1, 'Jsc mA/cm2')
                table.write(0, 2, 'Voc V')
                table.write(0, 3, 'FF')
                table.write(0, 4, 'Eff %')
                table.write(0, 5, 'rsh')
                table.write(0, 6, 'rs')
                file.save(xlsfile_path)
                print('汇总文件' + xlsfile_path + '创建成功!')
            else:
                print('汇总文件已存在')

            bkall = xlrd.open_workbook(xlsfile_path)  #获取表格中已有行数
            nxlsfile_path = xlsfile_path.replace(".xls", '')
            shall = bkall.sheet_by_index(0)
            irow = shall.nrows
            print("汇总表格中已有" + str(irow) + "行")
            bkcopyall = copy(bkall)
            shcopyall = bkcopyall.get_sheet(0)
            try:
                #style = xlwt.easyxf(num_format_str='#,##0.00')
                shcopyall.write(irow, 0, nowdir)
                shcopyall.write(irow, 1, Isc)
                shcopyall.write(irow, 2, Voc)
                shcopyall.write(irow, 3, FF)
                shcopyall.write(irow, 4, eff)
                bkcopyall.save(xlsfile_path)  #保存
            except:
                print(nowdir + "汇总失败!")
        # 删除xls文档
        os.remove(nfile_path)
        print("xls文件删除成功!")

        nnfile_path = nowdir.replace(".csv", ".png")
        print(nnfile_path + "图片保存成功!")
        matplotlib.pyplot.savefig(nnfile_path, bbox_inches='tight', dpi=150)
        return (nowdir, Isc, Voc, FF, eff)
    except:
        return
Пример #47
0
# 如果要跑昨天的代码,改这里%%%%%%%%%%%%%%%%%%%%%%%%%
inter = 6
today = datetime.date.today()
# 如果要跑昨天的代码,改这里%%%%%%%%%%%%%%%%%%%%%%%%%
interday = datetime.timedelta(days=34)
day_29_before = today - interday
#rowNumber以及新建excel,打开旧的excel放到第一重循环内
for area in cdn_list:
    print area[4:5]

    oldWb = xlrd.open_workbook(r'/tmp/ErrorReportPro/errorReport/report/CDN_' +
                               area[4:5] + '/GH-CDN-' + area[4:5] +
                               '-NGid5225.xls',
                               formatting_info=True)
    w = xlwt.Workbook(encoding='utf-8', style_compression=2)
    w = copy(oldWb)
    print "2"
    # for sheet in w.sheets():

    # w._Workbook__worksheets = [ worksheet for worksheet in w._Workbook__worksheets if worksheet.name == sheet.name ]
    print "3"
    w._Workbook__worksheets = [
        worksheet for worksheet in w._Workbook__worksheets
        if worksheet.name != str(day_29_before)
    ]
    print 'excel done'
    sys.path.append('..')
    sys.path.append('/tmp/ErrorReportPro/errorReport/report/CDN_' + area[4:5])
    mysqlconn = MySQLdb.connect(host="172.16.169.12",
                                user="******",
                                passwd="ZAQ!XSW@CDE#",
def documentselectinone():
    global area
    global width
    global height
    global Dark
    xall = []
    yall = []
    Iscall = []
    Vocall = []
    FFall = []
    Effall = []
    name = []
    color = [
        "black", "b", "r", "g", "purple", "olive", "chocolate", "deepskyblue",
        "darkorange", "lime", "grey", "royalblue"
    ]
    cnames = {
        'aliceblue': '#F0F8FF',
        'antiquewhite': '#FAEBD7',
        'aqua': '#00FFFF',
        'aquamarine': '#7FFFD4',
        'azure': '#F0FFFF',
        'beige': '#F5F5DC',
        'bisque': '#FFE4C4',
        'black': '#000000',
        'blanchedalmond': '#FFEBCD',
        'blue': '#0000FF',
        'blueviolet': '#8A2BE2',
        'brown': '#A52A2A',
        'burlywood': '#DEB887',
        'cadetblue': '#5F9EA0',
        'chartreuse': '#7FFF00',
        'chocolate': '#D2691E',
        'coral': '#FF7F50',
        'cornflowerblue': '#6495ED',
        'cornsilk': '#FFF8DC',
        'crimson': '#DC143C',
        'cyan': '#00FFFF',
        'darkblue': '#00008B',
        'darkcyan': '#008B8B',
        'darkgoldenrod': '#B8860B',
        'darkgray': '#A9A9A9',
        'darkgreen': '#006400',
        'darkkhaki': '#BDB76B',
        'darkmagenta': '#8B008B',
        'darkolivegreen': '#556B2F',
        'darkorange': '#FF8C00',
        'darkorchid': '#9932CC',
        'darkred': '#8B0000',
        'darksalmon': '#E9967A',
        'darkseagreen': '#8FBC8F',
        'darkslateblue': '#483D8B',
        'darkslategray': '#2F4F4F',
        'darkturquoise': '#00CED1',
        'darkviolet': '#9400D3',
        'deeppink': '#FF1493',
        'deepskyblue': '#00BFFF',
        'dimgray': '#696969',
        'dodgerblue': '#1E90FF',
        'firebrick': '#B22222',
        'floralwhite': '#FFFAF0',
        'forestgreen': '#228B22',
        'fuchsia': '#FF00FF',
        'gainsboro': '#DCDCDC',
        'ghostwhite': '#F8F8FF',
        'gold': '#FFD700',
        'goldenrod': '#DAA520',
        'gray': '#808080',
        'green': '#008000',
        'greenyellow': '#ADFF2F',
        'honeydew': '#F0FFF0',
        'hotpink': '#FF69B4',
        'indianred': '#CD5C5C',
        'indigo': '#4B0082',
        'ivory': '#FFFFF0',
        'khaki': '#F0E68C',
        'lavender': '#E6E6FA',
        'lavenderblush': '#FFF0F5',
        'lawngreen': '#7CFC00',
        'lemonchiffon': '#FFFACD',
        'lightblue': '#ADD8E6',
        'lightcoral': '#F08080',
        'lightcyan': '#E0FFFF',
        'lightgoldenrodyellow': '#FAFAD2',
        'lightgreen': '#90EE90',
        'lightgray': '#D3D3D3',
        'lightpink': '#FFB6C1',
        'lightsalmon': '#FFA07A',
        'lightseagreen': '#20B2AA',
        'lightskyblue': '#87CEFA',
        'lightslategray': '#778899',
        'lightsteelblue': '#B0C4DE',
        'lightyellow': '#FFFFE0',
        'lime': '#00FF00',
        'limegreen': '#32CD32',
        'linen': '#FAF0E6',
        'magenta': '#FF00FF',
        'maroon': '#800000',
        'mediumaquamarine': '#66CDAA',
        'mediumblue': '#0000CD',
        'mediumorchid': '#BA55D3',
        'mediumpurple': '#9370DB',
        'mediumseagreen': '#3CB371',
        'mediumslateblue': '#7B68EE',
        'mediumspringgreen': '#00FA9A',
        'mediumturquoise': '#48D1CC',
        'mediumvioletred': '#C71585',
        'midnightblue': '#191970',
        'mintcream': '#F5FFFA',
        'mistyrose': '#FFE4E1',
        'moccasin': '#FFE4B5',
        'navajowhite': '#FFDEAD',
        'navy': '#000080',
        'oldlace': '#FDF5E6',
        'olive': '#808000',
        'olivedrab': '#6B8E23',
        'orange': '#FFA500',
        'orangered': '#FF4500',
        'orchid': '#DA70D6',
        'palegoldenrod': '#EEE8AA',
        'palegreen': '#98FB98',
        'paleturquoise': '#AFEEEE',
        'palevioletred': '#DB7093',
        'papayawhip': '#FFEFD5',
        'peachpuff': '#FFDAB9',
        'peru': '#CD853F',
        'pink': '#FFC0CB',
        'plum': '#DDA0DD',
        'powderblue': '#B0E0E6',
        'purple': '#800080',
        'red': '#FF0000',
        'rosybrown': '#BC8F8F',
        'royalblue': '#4169E1',
        'saddlebrown': '#8B4513',
        'salmon': '#FA8072',
        'sandybrown': '#FAA460',
        'seagreen': '#2E8B57',
        'seashell': '#FFF5EE',
        'sienna': '#A0522D',
        'silver': '#C0C0C0',
        'skyblue': '#87CEEB',
        'slateblue': '#6A5ACD',
        'slategray': '#708090',
        'snow': '#FFFAFA',
        'springgreen': '#00FF7F',
        'steelblue': '#4682B4',
        'tan': '#D2B48C',
        'teal': '#008080',
        'thistle': '#D8BFD8',
        'tomato': '#FF6347',
        'turquoise': '#40E0D0',
        'violet': '#EE82EE',
        'wheat': '#F5DEB3',
        'white': '#FFFFFF',
        'whitesmoke': '#F5F5F5',
        'yellow': '#FFFF00',
        'yellowgreen': '#9ACD32'
    }
    morecolor = []
    for key in cnames:
        morecolor.append(cnames[key])
    print(morecolor)
    rootdir = askdirectory()
    path.set(rootdir)
    for parent, dirnames, filenames in os.walk(
            rootdir):  #三个参数:分别返回1.父目录 2.所有文件夹名字(不含路径) 3.所有文件名字
        for filename in filenames:
            list1 = os.path.join(parent, filename)
            nowdir = list1.replace('\\', '/')
            npath = parent.replace('\\', '/')
            npath1 = npath + '/AllInOne.png'
            judge = 1
            for v in Dark:
                if v in nowdir:
                    judge -= 1
            if '.csv' in nowdir and judge == 1:
                labelname = filename.replace(".csv", '')
                name.append(labelname)
                print(name)
                c = open(nowdir, "r")  #以r的方式打开csv文件
                read = csv.reader(c)
                nfile_path = nowdir.replace(".csv", ".xls")
                print(nfile_path)
                file = xlwt.Workbook()  #创建一个工作簿
                table = file.add_sheet('sheet 1')  #创建一个工作表
                n = 0
                for line in read:
                    if n > 46 and len(line) > 3:
                        a = line[2]
                        b = line[3]
                        table.write(n - 47, 0, a)  #写入
                        table.write(n - 47, 1, b)
                    n += 1
                file.save(nfile_path)  #保存
                print("xls创建成功!")

                if '.xls' in nfile_path and '汇总' not in nfile_path:
                    data = xlrd.open_workbook(nfile_path)
                    sheet1 = data.sheet_by_index(0)
                    if sheet1.nrows == 600:
                        a = 0
                        for i1 in range(600):
                            if float(sheet1.cell(i1, 0).value) > 0 and a == 0:
                                Isc1 = float(sheet1.cell(i1, 1).value) - (
                                    float(sheet1.cell(i1, 1).value) -
                                    float(sheet1.cell(i1 - 1, 1).value)) / (
                                        float(sheet1.cell(i1, 0).value) -
                                        float(sheet1.cell(i1 - 1, 0).value)
                                    ) * float(sheet1.cell(i1, 0).value)
                                Isc = Isc1 / area * 1000 * (-1)
                                bkcopy = copy(data)
                                shcopy = bkcopy.get_sheet(0)
                                a += 1
                        b = 0
                        for i2 in range(600):
                            if float(sheet1.cell(i2, 1).value) > 0 and b == 0:
                                Voc = float(sheet1.cell(i2, 0).value) - (
                                    float(sheet1.cell(i2, 0).value) -
                                    float(sheet1.cell(i2 - 1, 0).value)) / (
                                        float(sheet1.cell(i2, 1).value) -
                                        float(sheet1.cell(i2 - 1, 1).value)
                                    ) * float(sheet1.cell(i2, 1).value)
                                bkcopy = copy(data)
                                shcopy = bkcopy.get_sheet(0)
                                b += 1
                        pmax = 0
                        for j in range(600):
                            if float(sheet1.cell(j, 0).value) * float(
                                    sheet1.cell(j, 1).value) * (-1) > pmax:
                                pmax = float(sheet1.cell(j, 0).value) * float(
                                    sheet1.cell(j, 1).value) * (-1)
                        eff = pmax / area * 1000
                        Pmax = Isc * Voc
                        FF = eff / Pmax
                        bkcopy = copy(data)
                        shcopy = bkcopy.get_sheet(0)
                        shcopy.write(1, 6, Isc)
                        shcopy.write(1, 7, Voc)
                        shcopy.write(1, 8, FF)
                        shcopy.write(1, 9, eff)
                        bkcopy.save(nfile_path)  #保存
                        print("Isc=", format(Isc, ".2f"), "mA/cm2 Voc=",
                              format(Voc, ".2f"), "V FF=",
                              format(FF * 100, ".2f"), "% Eff=",
                              format(eff, ".2f"), "%")
                        Iscall.append(Isc)
                        Vocall.append(Voc)
                        FFall.append(FF)
                        Effall.append(eff)
                data = xlrd.open_workbook(nfile_path)
                sheet1 = data.sheet_by_index(0)
                x = []
                y = []
                for i in range(600):
                    x.append(float(sheet1.cell(i, 0).value))
                    y.append(-float(sheet1.cell(i, 1).value) / area * 1000)
                xall.append(x)
                yall.append(y)
                # 删除xls文档
                os.remove(nfile_path)
                print("xls文件删除成功!")
    # 通过rcParams设置全局横纵轴字体大小
    matplotlib.pyplot.rcParams['font.sans-serif'] = ['Arial']
    matplotlib.rcParams['xtick.direction'] = 'in'
    matplotlib.rcParams['ytick.direction'] = 'in'
    matplotlib.rcParams['xtick.labelsize'] = 12
    matplotlib.rcParams['ytick.labelsize'] = 12
    font = {
        'family': 'Arial',
        'color': 'black',
        'weight': 'normal',
        'size': 16,
    }

    matplotlib.pyplot.figure('IV Curve', figsize=(6, 4.5))
    ax = matplotlib.pyplot.subplot(1, 1, 1)
    ax.spines['bottom'].set_linewidth(1.3)
    ax.spines['left'].set_linewidth(1.3)
    ax.spines['top'].set_linewidth(1.3)
    ax.spines['right'].set_linewidth(1.3)
    # 通过'k'指定线的颜色,lw指定线的宽度
    # 第三个参数除了颜色也可以指定线形,比如'r--'表示红色虚线
    # 更多属性可以参考官网:http://matplotlib.org/api/pyplot_api.html
    if len(name) < 13:
        lenth = len(name)
        for i in range(lenth):
            matplotlib.pyplot.plot(xall[i],
                                   yall[i],
                                   color[i],
                                   lw=2,
                                   label=name[i])
    else:
        lenth = len(name)
        for i in range(lenth):
            matplotlib.pyplot.plot(xall[i],
                                   yall[i],
                                   morecolor[i],
                                   lw=2,
                                   label=name[i])
    matplotlib.pyplot.xlim(0, width)
    matplotlib.pyplot.xlabel('U (V)', fontdict=font)
    matplotlib.pyplot.ylim(0, height)
    matplotlib.pyplot.ylabel('I (mA/cm2)', fontdict=font)
    # scatter可以更容易地生成散点图
    #matplotlib.pyplot.scatter(x, y)
    matplotlib.pyplot.grid(False)
    matplotlib.pyplot.legend()
    # 将当前figure的图保存到文件

    nnfile_path = nowdir.replace(".csv", ".png")
    print(nnfile_path + "图片保存成功!")
    matplotlib.pyplot.savefig(npath1, bbox_inches='tight', dpi=300)
    matplotlib.pyplot.show()
    print(Dark)
def PhotoScraper(NAME, FB_ID):
    import http.cookiejar
    import urllib.request
    import bs4
    import xlrd
    import time
    import xlwt
    import requests
    from xlutils.copy import copy
    wb = xlrd.open_workbook('photos.xlsx')
    sheet = wb.sheet_by_index(0)
    r = sheet.nrows
    rb = copy(wb)
    sheet = rb.get_sheet(0)

    cj = http.cookiejar.CookieJar()
    opener = urllib.request.build_opener(
        urllib.request.HTTPCookieProcessor(cj))
    urllib.request.install_opener(opener)

    authentication_url = "https://m.facebook.com/login.php"

    payload = {'email': '9877335446', 'pass': '******'}
    data = urllib.parse.urlencode(payload).encode('utf-8')
    req = urllib.request.Request(authentication_url, data)
    resp = urllib.request.urlopen(req)
    contents = resp.read()

    name = NAME  #input("Enter the Name of the Person you want to check")
    fb_id = FB_ID  #input("Enter The Fb id of person")

    #url="https://mbasic.facebook.com/"+fb_id+"/photos"
    url = "https://mbasic.facebook.com/" + fb_id
    data = requests.get(url, cookies=cj)
    soup = bs4.BeautifulSoup(data.text, 'html.parser')
    for i in soup.find_all('a', href=True):
        if i.text.lower() == "photos":
            url = "https://mbasic.facebook.com/" + i['href']
            break

    time.sleep(1)
    data = requests.get(url, cookies=cj)
    soup = bs4.BeautifulSoup(data.text, 'html.parser')
    for i in soup.find_all('a', href=True):
        if i.text.lower() == "see all":
            url = "https://mbasic.facebook.com" + i['href']
            break

    time.sleep(1)
    print("Wait few seconds, we are Downloading Images of " + name)
    data = requests.get(url, cookies=cj)
    soup = bs4.BeautifulSoup(data.text, 'html.parser')
    ph_urls = []
    index = 0
    for i in soup.find_all('a', href=True):
        if (i['href'][0:6]).lower() == "/photo":
            nurl = "https://mbasic.facebook.com" + i['href']
            ph_urls.append(nurl)
            index = index + 1
        if index >= 6:
            break
    photos = []
    for url in ph_urls:
        time.sleep(1)
        data = requests.get(url, cookies=cj)
        soup = bs4.BeautifulSoup(data.text, 'html.parser')
        for i in soup.find_all('a', href=True):
            if i.text.lower() == 'view full size':
                nurl = "https://mbasic.facebook.com" + i['href']
                photos.append(nurl)
                break
    num = 0
    for photo in photos:
        time.sleep(1)
        data = requests.get(photo, cookies=cj)
        soup = bs4.BeautifulSoup(data.text, 'html.parser')
        for i in soup.find_all('meta'):
            time.sleep(1)
            urllib.request.urlretrieve(
                i['content'][6:],
                "images/" + name.split(" ")[0] + str(num) + ".jpg")
            sheet.write(r, 0, name)
            sheet.write(r, 1, fb_id)
            sheet.write(r, 2, name.split(" ")[0] + str(num) + ".jpg")
            r = r + 1
            print("Downloaded " + str(num))
        num = num + 1
    rb.save("photos.xlsx")
    print("Photos Downloaded Sucessfully:)")
Пример #50
0
import xlrd  #导入xls文件的读取库。
from xlutils.copy import copy  #导入复制函数。
wb = xlrd.open_workbook('Chapter-7-11-1.xls')  #读取工作簿。
ws1 = wb.sheet_by_name('全部名单')  #读取'全部名单'工作表。
ws2 = wb.sheet_by_name('已完成名单')  #读取'已完成名单'工作表。
nwb = copy(wb)
ws3 = nwb.get_sheet('未完成名单')  #读取'已完成名单'工作表,具有写入功能。
lst1 = [tuple(v.value for v in l) for l in ws1.get_rows()]  #获取'全部名单'记录。
lst2 = [tuple(v.value for v in l) for l in ws2.get_rows()][1:]  #获取'已完成名单'记录。
dic = dict.fromkeys(lst1)  #将lst1转换为dic字典。
[dic.pop(t) for t in lst2]  #从dic字典删除lst2中的已完成名单。
row_num = 0  #初始化row_num变量为0。
for key in dic.keys():  #将dic字典中的键循环出来。
    ws3.write(row_num, 0, key[0])  #将姓名写入A列。
    ws3.write(row_num, 1, key[1])  #将部分写入B列
    row_num += 1  #row_num变量累加1。
nwb.save('Chapter-7-11-1.xls')  #保存工作簿副本。
Пример #51
0
def add_sheet():
    rb = xlrd.open_workbook("python_excel_test.xls")
    w = copy(rb)
    ws = w.add_sheet('new_sheet')
    ws.write(1, 1, "SS")
    w.save('new.xls')
Пример #52
0
def WriteToExcel(_data2save):
    #如果excel表格不存在,则创建
    if not os.path.exists(record_file):
        workbook = xlwt.Workbook()
        sheet_visit = workbook.add_sheet("visited")
        sheet_comment = workbook.add_sheet("commented")
        sheet_total = workbook.add_sheet("total")
        #写好第一列的说明
        sheet_visit.write(0, 0, "文章名字")
        sheet_comment.write(0, 0, "文章名字")
        sheet_total.write(0, 0, "项目")
        sheet_total.write(1, 0, "原创")
        sheet_total.write(2, 0, "粉丝")
        sheet_total.write(3, 0, "喜欢")
        sheet_total.write(4, 0, "评论")
        sheet_total.write(5, 0, "访问")
        sheet_total.write(6, 0, "积分")
        sheet_total.write(7, 0, "排名")
        workbook.save(record_file)

    #写入数字格式
    style = xlwt.XFStyle()
    style.num_format_str = '0'

    # 打开excel, 为xlrd
    rexcel = xlrd.open_workbook(record_file)
    # 拷贝原来的rexcel, 变成xlwt
    wexcel = copy(rexcel)
    # 得到工作表
    sheet_visit = wexcel.get_sheet(0)
    sheet_comment = wexcel.get_sheet(1)
    sheet_total = wexcel.get_sheet(2)
    # 得到列数、行数
    read_times = rexcel.sheets()[0].ncols - 1
    article_num = rexcel.sheets()[0].nrows - 1

    #得到当前日期
    now_time = int(str(datetime.datetime.now().strftime('%Y%m%d')))

    #避免一天内重复统计
    if rexcel.sheets()[0].cell_value(0, read_times) != now_time:
        read_times += 1
    sheet_visit.write(0, read_times, now_time, style)
    sheet_comment.write(0, read_times, now_time, style)
    sheet_total.write(0, read_times, now_time, style)

    #写入文章数据
    for i in range(len(_data2save) - 1):
        #如果文章名已经存在,则放到对应行,否则在最后一行增加文件名及对应信息
        current_article_index = article_num + 1
        for j in range(rexcel.sheets()[0].nrows - 1):
            if rexcel.sheets()[0].cell_value(j + 1, 0) == _data2save[i][0]:
                current_article_index = j + 1
        if current_article_index > article_num:
            article_num += 1
            sheet_visit.write(current_article_index, 0, _data2save[i][0])
            sheet_comment.write(current_article_index, 0, _data2save[i][0])
        #保存阅读量及评论量
        sheet_visit.write(current_article_index, read_times,
                          int(_data2save[i][1]), style)
        sheet_comment.write(current_article_index, read_times,
                            int(_data2save[i][2]), style)

    #写入统计信息
    for i in range(rexcel.sheets()[2].nrows - 1):
        sheet_total.write(i + 1, read_times,
                          int(_data2save[len(_data2save) - 1][i]), style)

    #保存表格
    wexcel.save(record_file)
Пример #53
0
                booksList = rSheet.cell_value(ii, 4).split(";")
            else:
                booksList = rSheet.cell_value(ii, 4).split(",")
            print len(booksList)
            for book in booksList:
                book = book.strip()
                if bookMap.has_key(book):
                    if bookMap[book].__contains__(rSheet.cell_value(ii, 2)):
                        continue
                    else:
                        bookMap[book].append(rSheet.cell_value(ii, 2))
                else:
                    flagList.append(rSheet.cell_value(ii, 2))
                    bookMap[book] = flagList
outWb = open_workbook("allTags.xls", formatting_info=True)
newbook = copy(outWb)
try:
    oSheet = newbook.get_sheet(1)
except Exception as e:
    oSheet = newbook.add_sheet('书名标签结果', cell_overwrite_ok=True)
for A in range(100):
    oSheet.col(A).width = 4800
oSheet.write(0, 0, '书名', tittle_style)
oSheet.write(0, 1, '标签集', tittle_style)
index = 1
for book in bookMap:
    print book, len(bookMap[book])
    oSheet.write(index, 0, book, normal_style)
    newFlagList = list(set(bookMap[book]))
    newFlagList.sort()
    oSheet.write(index, 1, ",".join(newFlagList), normal_style)
if __name__ == '__main__':
    #if(len(sys.argv) < 3):
    #    print("Usage: autoExtractToExcel.py targetDir outResultDir\n")
    #    sys.exit(1)
    collectDataDir = 'data' #sys.argv[1]
    outResultDir   = 'out'  #sys.argv[2]
    
    if(not os.path.exists(outResultDir)):
        make_all_dir(outResultDir)
    #outExcelData   = outResultDir + delimiter + '__result.csv'
    #create_excel(outExcelData)
    outExcelData = collectDataDir + delimiter + 'MAC-1.xls'         ##该文件是BDBR格式文件,不要修改
    outexcel     = outResultDir   + delimiter + 'MAC-1_result.xls'  ##该文件是统计得到的BDBR数据文件
    create_excel(outexcel)
 
    exceldata = xlrd.open_workbook(outExcelData, encoding_override="gbk")
    datawt    = copy(exceldata)  ##完成xlrd对象向xlwt对象转换
    [files, isfile] = get_raw_log(collectDataDir) #获取当前目录中指定格式的文件
  
    #遍历每个指定格式的文件进行数据提取
    for collectdata in files:
        print('[Process]: '+collectdata) 
        filename = get_file_name(collectdata) ## 获取文件名
        #ret = collect_data_to_excel(outExcelData, collectdata)
        outrawtxt = outResultDir + delimiter + filename + '_format_data.txt'  ##格式化文本数据
        get_data_from_txt(collectdata, outrawtxt) ##提取数据信息到格式化文本数据中
        ret = collect_data_to_excel_target(exceldata, datawt, outrawtxt, outexcel) ## 写入目标Excel中
    if(ret!=0):
        print("--------Process finished!--------")
        os._exit(0)
print(sheet.col_values(1))  #取第一列的数据
for i in range(sheet.ncols):
    print(sheet.col_values(i))  #获取第几列的数据

#只能写不能读
#将一个列表的数据写入excel, 第一行是标题,下面行数具体的数据
stus = [['姓名', '年龄', '性别', '分数'], ['mary', 20, '女', 89.9],
        ['mary', 20, '女', 89.9], ['mary', 20, '女', 89.9],
        ['mary', 20, '女', 89.9]]
book = xlwt.Workbook()  #新建一个excel
sheet = book.add_sheet('case1_sheet')  #添加一个sheet页
row = 0  #控制行
for stu in stus:
    col = 0  #控制列
    for s in stu:  #再循环里面list的值,每一列
        sheet.write(row, col, s)
        col += 1
    row += 1
book.save('stu_1.xls')  #保存到当前目录下

#xlutils:修改excel
#将excel中的某个值修改并重新保存
book1 = xlrd.open_workbook('stu.xls')
book2 = copy(book1)  #拷贝一份原来的excel
# print(dir(book2))
sheet = book2.get_sheet(0)  #获取第几个sheet页,book2现在的是xlutils里的方法,不是xlrd的
sheet.write(1, 3, 0)
sheet.write(1, 0, 'hello')
book2.save('stu.xls')
Пример #56
0
def modify_excel():
    rb = xlrd.open_workbook("python_excel_test.xls")
    w = copy(rb)
    w.get_sheet(1).write(1, 1, "Hello")
    w.save('book2.xls')
Пример #57
0
 def writer_values(self, row, col, value):
     read_data = xlrd.open_workbook(self.file_name)
     write_data = copy(read_data)
     sheet_data = write_data.get_sheet(0)
     sheet_data.write(row, col, value)
     write_data.save(self.file_name)
# make the excel file to store information

style1 = xlwt.XFStyle()
style1.num_format_str = "D-MM-YY"
#
# wb = xlwt.Workbook()
# sheet1 = wb.add_sheet("May 2018", cell_overwrite_ok=True)
# sheet1.write(0, 0, datetime.now(), style1)
#
#
# wb.save("May 2018 orders.xls")

read_workbook = xlrd.open_workbook("May 2018 orders.xls", formatting_info=True)
read_sheet = read_workbook.sheet_by_index(0)

workbook = copy(read_workbook)
sheet = workbook.get_sheet(0)

sheet.write(0, 0, datetime.now(), style1)
sheet.write(0, 1, "Item")
sheet.write(0, 2, "Size")
sheet.write(0, 3, "Color")
sheet.write(0, 4, "Quantity")

number = read_sheet.nrows

print(number)

workbook.save("May 2018 orders.xls")
Пример #59
0
from xls import Property
import xlwt
from xlutils.copy import copy
import xlrd

workbook = xlwt.Workbook(encoding='ascii')
workbook = xlwt.Workbook(encoding='utf-8')
workbook = xlrd.open_workbook("zydr.xls")
# 对数据表格进行复制
old_content = copy(workbook)
worksheet = old_content.get_sheet(0)
# 实例化类
dh = Property()
# 调用类中的方法
name = dh.name()
nation = dh.nation()
telephone = dh.telephone()
identity = dh.identity()
ns = dh.number_seven()
sex = dh.sex()
marry = dh.marry()
collde = dh.collde()
time = dh.local_time()

# 导入excel
worksheet.write(1, 0, ns)
worksheet.write(1, 1, name)
worksheet.write(1, 2, identity)
worksheet.write(1, 3, sex)
worksheet.write(1, 4, marry)
worksheet.write(1, 5, collde)
Пример #60
0
# encoding: utf-8
'''
@author: weiyang_tang
@contact: [email protected]
@file: xlutilsDemo.py
@time: 2019-02-04 23:21
@desc: 对已有的excel文件进行修改
'''

import xlrd
from xlutils.copy import copy

# 打开想要更改的excel文件
old_excel = xlrd.open_workbook('data/weixinFridendList.xls',
                               formatting_info=True)
# 将操作文件对象拷贝,变成可写的workbook对象
new_excel = copy(old_excel)
# 获得第一个sheet的对象
ws = new_excel.get_sheet(0)
# 写入数据
ws.write(0, 0, '第一行,第一列')
ws.write(0, 1, '第一行,第二列')
ws.write(0, 2, '第一行,第三列')
ws.write(1, 0, '第二行,第一列')
ws.write(1, 1, '第二行,第二列')
ws.write(1, 2, '第二行,第三列')
new_excel.save('data/weixinFridendList.xls')
# 另存为excel文件,并将文件命名
new_excel.save('data/new_fileName.xls')
print("excel文件修改完毕")