示例#1
2
def excel(data, name):
    book = Workbook()
    title_info = []
    for i in data:
        for k, v in i.items():
            title_info.append(k)
    title = list(set(title_info))
    if len(data) < 65535:
        excel = book.add_sheet(name, cell_overwrite_ok=True)
        for x in range(len(title)):
            excel.write(0, x, title[x].decode('UTF-8'))

        for i in range(len(data)):
            for j in range(len(data[i])):
                excel.write(i + 1, j, data[i][title[j]])
    else:
        page = len(data) / 65535
        for k in range(1, page + 2):
            excel = book.add_sheet(name + '_' + str(k), cell_overwrite_ok=True)
            for x in range(len(title)):
                excel.write(0, x, title[x].decode('UTF-8'))
            if k < page + 1:
                for i in range(65535 * (k - 1), 65535 * k):
                    for j in title:
                        excel.write(i - 65535 * (k - 1) + 1, j, data[i][title[j]])
            else:
                for i in range(65535 * k, len(data)):
                    for j in title:
                        excel.write(i - 65535 * k + 1, j, data[i][title[j]])

    date = datetime.datetime.now().strftime('%Y%m%d')
    book.save('../static/' + name + '.' + date + '.xls')
class ExcelWorkbook(object):
    def __init__(self, encoding='utf-8', style_compression=0):
        self.wb = Workbook(encoding, style_compression)
        self.sheets = []

    def sheet(self, sheet_name=''):
        """get a sheet by name, if no name creates a new sheet"""
        if not self.sheets:
            return self.add_sheet(sheet_name)

        else:
            for sheet in self.sheets:
                if sheet.name == sheet_name:
                    return sheet

    def add_sheet(self, sheet_name='Sheet 1', headers=[], header_line_no=0, use_borders=False, styleHeaders=None, styleDict={}, widths={}):
        ws = self.wb.add_sheet(sheet_name, cell_overwrite_ok=True)
        self.sheets.append(ExcelSheet(ws, headers, header_line_no, use_borders, styleHeaders, styleDict, widths))
        return self.sheets[-1]

    def save(self, filename):
        """saves excel file"""
        if os.path.splitext(filename)[-1] not in ('.xls', '.xlsx'):
            filename = os.path.splitext(filename)[0] + '.xls'

        for sheet in iter(self):
            sheet.autoFit()

        self.wb.save(filename)
        return filename

    def __iter__(self):
        """generator to iterate through sheets"""
        for sheet in self.sheets:
            yield sheet
def make_group_connectome_lengths_excel_file(subject_names,cff_files,track_name,endpointsmm_name,labels_name, outfile_name):
	"""
	**NEEDS TESTING**
	write docstring
	"""
	import jg_DWI_util
	from xlwt import Workbook
	
	wb = Workbook()
	ws = wb.add_sheet('0')
	col_ind = 1
	for c in range(0, len(cff_files)):
		print 'processing subject ' + str(subject_names[c])
		flen, eud, fl = jg_DWI_util.analyze_connectome_lengths(cff_files[c], track_name,endpointsmm_name,labels_name,0,0)
		col_ind = col_ind+6
		ws.write(0,col_ind,subject_names[c])
		# for each subject, set out a block in the spreadsheet and write in the three variables
		for f in range(0, len(flen)):
			ws.write(f+1,col_ind+1,str(flen[f]))
			ws.write(f+1,col_ind+2,str(eud[f]))
			ws.write(f+1,col_ind+3,str(fl[f][0]))
			ws.write(f+1,col_ind+4,str(fl[f][1]))
		# (also get the mean, stdev, etc. fibre stats and put in on an edgewise,rather than fibrewise, basis)
	# write the excel file
	wb.save(outfile_name)	
示例#4
0
文件: views.py 项目: yvess/smartmin
    def render_to_response(self, context, **response_kwargs):

        from xlwt import Workbook
        book = Workbook()
        sheet1 = book.add_sheet(self.derive_title())
        fields = self.derive_fields()

        # build up our header row
        for col in range(len(fields)):
            field = fields[col]
            sheet1.write(0, col, unicode(self.lookup_field_label(dict(), field)))

        # then our actual values
        for row in range(len(self.object_list)):
            obj = self.object_list[row]
            for col in range(len(fields)):
                field = fields[col]
                value = unicode(self.lookup_field_value(dict(), obj, field))
                # skip the header
                sheet1.write(row + 1, col, value)

        # Create the HttpResponse object with the appropriate header.
        response = HttpResponse(mimetype='application/vnd.ms-excel')
        response['Content-Disposition'] = 'attachment; filename=%s' % self.derive_filename()
        book.save(response)
        return response
示例#5
0
def save_excel(projects, path):
	'''
	Сохраняет нащ список в файл xls
	:param projects: list
	:param path: str
	:return: file
	'''
	wb = Workbook()
	ws = wb.add_sheet('Sheet 1')

	style1 = easyxf('pattern: pattern solid, fore_colour yellow;' + 'font: bold True, height 250')
	ws.col(0).width = 20000
	ws.col(1).width = 11000
	ws.col(2).width = 3000
	ws.col(3).width = 4000
	ws.col(4).width = 4000

	headers = ('Название', 'Ссыдка', 'Размер', 'Раздатчики', 'Картинка')

	for j, header in enumerate(headers):
		ws.write(0, j, header, style1)

	for i, project in enumerate(projects, 1):
		ws.row(i).write(0, project['topic'])
		ws.row(i).write(1, project['link'])
		ws.row(i).write(2, project['size'])
		ws.row(i).write(3, project['seeders'])
		ws.row(i).write(4, project['img'])

	wb.save(path)
示例#6
0
    def test_unicode1(self):
        book = Workbook()
        ws1 = book.add_sheet(six.u('\N{GREEK SMALL LETTER ALPHA}\N{GREEK SMALL LETTER BETA}\N{GREEK SMALL LETTER GAMMA}'))

        ws1.write(0, 0, six.u('\N{GREEK SMALL LETTER ALPHA}\N{GREEK SMALL LETTER BETA}\N{GREEK SMALL LETTER GAMMA}'))
        ws1.write(1, 1, six.u('\N{GREEK SMALL LETTER DELTA}x = 1 + \N{GREEK SMALL LETTER DELTA}'))

        ws1.write(2, 0, six.u('A\u2262\u0391.'))      # RFC2152 example
        ws1.write(3, 0, six.u('Hi Mom -\u263a-!'))    # RFC2152 example
        ws1.write(4, 0, six.u('\u65E5\u672C\u8A9E'))  # RFC2152 example
        ws1.write(5, 0, six.u('Item 3 is \u00a31.'))  # RFC2152 example
        ws1.write(8, 0, six.u('\N{INTEGRAL}'))        # RFC2152 example

        book.add_sheet(six.u('A\u2262\u0391.'))     # RFC2152 example
        book.add_sheet(six.u('Hi Mom -\u263a-!'))   # RFC2152 example
        one_more_ws = book.add_sheet(six.u('\u65E5\u672C\u8A9E'))  # RFC2152 example
        book.add_sheet(six.u('Item 3 is \u00a31.'))  # RFC2152 example

        one_more_ws.write(0, 0, six.u('\u2665\u2665'))

        book.add_sheet(six.u('\N{GREEK SMALL LETTER ETA WITH TONOS}'))

        stream = six.BytesIO()
        book.save(stream)
        md5 = hashlib.md5()
        md5.update(stream.getvalue())
        self.assertEqual('0049fc8cdd164385c45198d2a75a4155', md5.hexdigest())
示例#7
0
class ExcelWriter(object):


	def __init__(self):
		# grab excel file and columns
		self.wb_read = open_workbook('pids.xlsx')
		self.wb_write = Workbook()
		self.output = self.wb_write.add_sheet('PID and Names')

	def go(self):
		row_counter = 1
		for sheet in self.wb_read.sheets():
			for row in range(sheet.nrows):
				pid_to_query = sheet.cell(row,3).value.replace('-','')
				pid_to_query = str(pid_to_query)
				owner_name = pid_getter.get_pid(pid_to_query)

				print row_counter, "Query of PID", sheet.cell(row,0).value, "Owner name = ", owner_name

				# write to new file
				# self.output.write(row_counter,0,pid_to_query)
				# self.output.write(row_counter,1,owner_name)

				# writing to a new file by copying everything else
				for col in range(sheet.ncols):
					self.output.write(row_counter,col,sheet.cell(row,col).value)
				self.output.write(row_counter,sheet.ncols,owner_name)

				self.wb_write.save('names.xlsx')

				row_counter += 1 
示例#8
0
def write_excel(totals):
    """Write out the collected totals to an excel file
    """
    workbook = Workbook()
    worksheet = workbook.add_sheet('New Sheet')

    # write the header for the first block
    total_hours = 0
    total_tasks = 0

    block_tasks, block_total = write_time_block(worksheet, totals)
    total_hours += block_total
    total_tasks += block_tasks

    block_tasks, block_total = write_time_block(worksheet, totals, block_tasks)
    total_hours += block_total
    total_tasks += block_tasks

    # write out the total hours
    worksheet.write(total_tasks + 6, 0, 'Monthly Total', summary_total_header)
    worksheet.write(total_tasks + 6, 1, total_hours, data_cell)

    # write out the user and date
    name = raw_input('Who is this report for? ')
    worksheet.write(total_tasks + 8, 0, 'Name: %s' % (name))
    worksheet.write(total_tasks + 9, 0, 'Date: %s' % (datetime.strftime(datetime.today(), '%m/%d/%Y')))

    # write the signature field
    worksheet.write(total_tasks + 8, 6, 'Signature:')
    # save the file to disk
    curpath = os.path.dirname(__file__)
    workbook.save(os.path.join(curpath, 'test.xls'))
示例#9
0
    def render(self, request, context, **response_kwargs):
        from xlwt import Workbook, XFStyle, easyxf

        w = Workbook(encoding='utf-8')

        ws = w.add_sheet('Report')
        style = XFStyle()

        row = 0
        heading_xf = easyxf('font:height 200; font: bold on; align: wrap on, vert centre, horiz center')
        ws.write(row, 0, '#', style)

        for col, fieldname in enumerate(context['report'].headers, start=1):
            ws.write(row, col, str(fieldname), heading_xf)
            ws.col(col).width = 5000
        ws.row(row).height = 500

        # we have to prepare all the styles before going into the loop
        # to avoid the "More than 4094 XFs (styles)" Error
        styles = self._get_styles(context)
        for rownum, data in enumerate(context['report']):
            ws.write(rownum + 1, 0, rownum + 1)
            for idx, (fieldname, rowvalue) in enumerate(data.items()):
                style = styles[rowvalue.column.name]
                try:
                    ws.write(rownum + 1, idx + 1, with_widget(rowvalue, format='xls'), style)
                except Exception:
                    #logger.warning("TODO refine this exception: %s" % e)
                    ws.write(rownum + 1, idx + 1, smart_str(with_widget(rowvalue)), style)

        f = StringIO.StringIO()
        w.save(f)
        f.seek(0)

        return f.read()
def output_mesg(company_lack):
    book = Workbook()
    sheet1 = book.add_sheet(u'1')
    i = 0
    num = 1
    for key, value in company_lack.items():
        for s, d in value.items():

            sheet1.write(i, 0, key)
            sheet1.write(i, num, s)
            sheet1.write(i, num+1, d)
            i = i + 1
        num = 1


    book.save('4.xls')  # 存储excel
    book = xlrd.open_workbook('4.xls')

    print('----------------------------------------------------------------------------------------')
    print('----------------------------------------------------------------------------------------')
    print(u'计算完成')

    print('----------------------------------------------------------------------------------------')

    print('----------------------------------------------------------------------------------------')

    time.sleep(10)
示例#11
0
def make_xls(sheetname='sheet_1', filename='filename.xls', columns=[], objs=[]):
    book = Workbook()
    sheet = book.add_sheet(sheetname)

    def index_of(key, list_2d):
        for i, one in enumerate(list_2d):
            if key == one[0]:
                return i

    attrs = []
    for inner_list in columns:
        attrs.append(inner_list[0])


    for i in xrange(len(columns)):
        sheet.write(0, i, columns[i][1]) 
        sheet.col(i).width = columns[i][2] * 256
    for i, obj in enumerate(objs, start=1):
        for attr in attrs:
            if isinstance(obj, dict):
                sheet.write(i, index_of(attr, columns), obj[attr])
            else:
                sheet.write(i, index_of(attr, columns), obj.__getattribute__(attr))


    book.save(filename)
示例#12
0
def merge_count_data():
    with open('data/text/summary/2014.json', 'r') as wh:
        press_briefings = json.load(wh)

    with open('data/text/summary/google.json', 'r') as g:
        google_trends = json.load(g)

    from xlwt import Workbook
    
    book = Workbook()

    for word in SEARCH_TERMS:
        sheet = book.add_sheet(word)

        header = sheet.row(0)

        for i, col in enumerate(['Week', 'Count']):
            header.write(i, col)

        for i, sunday in enumerate(all_sundays(2014)):
            row = sheet.row(i + 1)

            sunday = sunday.strftime('%Y-%m-%d')
            count = press_briefings[sunday].get(word, 0)
            #google = google_trends[sunday].get(word, 0)
            
            for i, col in enumerate([sunday, count]):
                row.write(i, col)

    book.save('data/text/summary/terms.xls')
示例#13
0
def create_xls(data):
    work = Workbook(encoding='utf-8')
    work_sheet = work.add_sheet(u'账单')
    #head of table    
    work_sheet.write(0, 0, 'ID')
    work_sheet.write(0, 1, u'名字')
    work_sheet.write(0, 2, u'编码')
    work_sheet.write(0, 3, u'数量')
    work_sheet.write(0, 4, u'单价')
    work_sheet.write(0, 5, u'合计')
    work_sheet.write(0, 6, u'备注')
    i = 1
    total_price = 0
    for row in data:
        work_sheet.write(i, 0, str(i))
        work_sheet.write(i, 1, data[row]['name'])
        work_sheet.write(i, 2, data[row]['code'])
        work_sheet.write(i, 3, data[row]['number'])
        work_sheet.write(i, 4, data[row]['price'])
        work_sheet.write(i, 5, data[row]['total_price'])
        work_sheet.write(i, 6, data[row]['comment'])
        total_price += data[row]['number'] * data[row]['price']
        i = i + 1
    work_sheet.write(i, 4, u'总价:')
    work_sheet.write(i, 5, total_price)
    time_stamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
    file_name = "download_xls/bill_%s.xls" % time_stamp
    work.save(file_name)
    return file_name
示例#14
0
    def do_export(self):
        """
        Does actual export. Called from a celery task.
        """
        book = Workbook()
        self.render_book(book)

        temp = NamedTemporaryFile(delete=True)
        book.save(temp)
        temp.flush()

        org_root = getattr(settings, 'SITE_ORGS_STORAGE_ROOT', 'orgs')
        filename = '%s/%d/%s/%s.xls' % (org_root, self.org_id, self.directory, random_string(20))
        default_storage.save(filename, File(temp))

        self.filename = filename
        self.save(update_fields=('filename',))

        subject = "Your export is ready"
        download_url = self.org.make_absolute_url(reverse(self.download_view, args=[self.pk]))

        send_email([self.created_by], subject, 'utils/email/export', {'download_url': download_url})

        # force a gc
        import gc
        gc.collect()
示例#15
0
def write_file(result_list, deal_date, company_name, filename):
    '''
    given a list, put it into excel file.
    deal_date specifies a string which will be rendered as bold
    company_name and filename are self-explanatory
    '''
    w = Workbook()
    sheet = w.add_sheet(company_name)
    row = 2
    boldfont = easyxf(strg_to_parse='font: bold on')
    normalfont = easyxf(strg_to_parse='')
    sheet.write(0, 0, company_name)
    sheet.write(1, 0, 'Date')
    sheet.write(1, 1, 'Open')
    sheet.write(1, 2, 'Close')

    for line in result_list:
        elements = line.decode('utf8').split(',')
        date_string = elements[0]
        open_value = elements[1]
        close_value = elements[4]
        if datetime.strptime(date_string, '%Y-%m-%d') == deal_date:
            style = boldfont
        else:
            style = normalfont
        sheet.write(row, 0, date_string, style)
        sheet.write(row, 1, open_value, style)
        sheet.write(row, 2, close_value, style)
        row += 1

        print(date_string, open_value, close_value)
    w.save(filename)
示例#16
0
文件: xl3.py 项目: orozcoadrian/jac
def main():
    timestamp = strftime("%Y-%m-%d_%H-%M-%S")
    out_dir = 'outputs/'+timestamp+'_xl'
    os.makedirs(out_dir)
    out_file=out_dir+'/'+timestamp+'_test.xls'


    data_sets=[]
    data_sets.append(DataSet('dss1', [[Cell.from_link('one','http://www.google.com'),Cell.from_display('two')],[Cell.from_display('one2'),Cell.from_display('two2')]]))
    data_sets.append(DataSet('dss2', [[Cell.from_display('oneb'),Cell.from_display('twob')]]))

    book = Workbook()
    for ds in data_sets:
        # my_print_dataset(ds)

        add_data_set_sheet(ds, book)


    # sheet.write(1,0,Formula('HYPERLINK("http://www.google.com";"Python")'),style)


    # if len(row_data) > 0:
    book.save(out_file)


    os.system("start "+out_file)
示例#17
0
def write_datatests(crisis_def_list, location = "./out", suffix = ""):
    book = Workbook()
    sheet1 = book.add_sheet('Sheet 1')
    result_crises = combine_crises(crisis_def_list)
    row_num = 0
    for country_code in sort(result_crises.keys()):
        years = sort(list(result_crises[country_code]))
        try:
            len(years)
        except:
            print(years)
            print(country_code)
        sheet1.write(row_num, 0, country_code)
        crisis_row = sheet1.row(row_num)
        crisis_row.write(1, "crisis")
        for j in range(len(years)):
            crisis_row.write(j+2, years[j])
        normal_row = sheet1.row(row_num+1)
        normal_row.write(1, "normal")
        normal_years = pick_normal_years(years)
        for j in range(len(normal_years)):
            normal_row.write(j+2, normal_years[j])
        row_num+=2
    saveloc = os.path.expanduser(location)+suffix+".xls"
    book.save(saveloc)
示例#18
0
def create_excel_file(db, kurzus):
    book = Workbook(encoding='utf-8')
    #sheet = book.add_sheet('{0} - {1} - {2}'.format(kurzus['nev'],kurzus['nap'],kurzus['sav']))
    sheet = book.add_sheet('Névsor')

    sheet = create_shit_head(sheet, kurzus_infok)
    
    kurzus_hallgatok = get_kurzus_hallgatok(db, kurzus['id'])
    
    sorszam = 1
    
    for user in kurzus_hallgatok:
        sheet.write(sorszam+4,0,sorszam,easyxf(
        'borders: left thick, right thick, top thick, bottom thick;'
        ))
        sheet.write(sorszam+4,1,user['neptun'],easyxf(
        'borders: left thick, right thick, top thick, bottom thick;'
        ))
        sheet.write(sorszam+4,2,user['nev'],easyxf(
        'borders: left thick, right thick, top thick, bottom thick;'
        ))
        for i in xrange(3,20):
            sheet.write(sorszam+4,i,'',easyxf(
        'borders: left thick, right thick, top thick, bottom thick;'
        ))
        sorszam = sorszam + 1
    
    book.save('{0}_{1}_{2}.xls'.format(ekezet_eltunteto(kurzus['nev'].lower()),
        ekezet_eltunteto(kurzus['nap'].lower()),
        kurzus['sav']))
示例#19
0
  def print_responseTime_persecond(self, folder = "." ):
    book = Workbook()
    sheet1 = book.add_sheet("response time")
    sheet2 = book.add_sheet("transactions")
    title = "Test Name: %s" % (self.name)
    sheet1.write(0,0,title)
    sheet2.write(0,0,title)
    column = 1
    for id in self.test_ids:  
      sheet1.write(1, column, "TEST" + id)
      sheet2.write(1, column, "TEST" + id)
      column += 1
    
    results = self.perSecondResult
    rows = range(1, self.max_second /TIME_INTERVAL + 1)

    for row in rows:
      sheet1.write(row + 1, 0, row*TIME_INTERVAL )
      sheet2.write(row + 1, 0, row*TIME_INTERVAL )
      column = 1
      for id in self.test_ids:
        key = id + "-" + str(row*TIME_INTERVAL)
        if results.has_key(key):
          result = results[key]
          sheet1.write(row + 1, column, result.getAvg()) 
          sheet2.write(row + 1, column, result.transactions) 
        column += 1
    
    book.save(folder + "/" + self.name + "_bytime.xls")
示例#20
0
def fog1270():
    """
    Create an xls with active evaluators emails by language
    """
    from xlwt import Workbook
    rates = tst.EvaluatorRate.query.join('evaluator','service_provider','contact').\
                filter_by(active=True).all()
    language_dict = {}
    for rate in rates:
        language_dict.setdefault(rate.language,[]).\
    append(rate.evaluator.service_provider.contact.get_email_address())

    distro = Workbook()
    row = 0

    active = distro.add_sheet('Active Evaluators')

    for language in language_dict:
        emails = language_dict[language]
        emails = list(set(emails))
        email_str = ''
        for email in emails:
            email_str = email_str + '%s;' % email
        email_str = email_str[:-1]
        active.write(row, 0, language.name)
        active.write(row, 2, email_str)
        row+=1

    distro.save('evaluators.xls')
示例#21
0
def tmz():
    from xlwt import Workbook
    rates = tst.EvaluatorRate.query.join('evaluator','service_provider','contact').\
                filter_by(active=True).all()
    language_dict = {}
    for rate in rates:
        language_dict.setdefault(rate.language,[]).\
    append(rate.evaluator.service_provider.contact.time_zone)

    distro = Workbook()
    row = 0

    active = distro.add_sheet('Active Evaluators')

    for language in language_dict:
        tmzs = language_dict[language]
        tmzs = list(set(tmzs))
        tmz_str = ''
        for tmz in tmzs:
            tmz_str = tmz_str + '%s;' % tmz
        tmz_str = tmz_str[:-1]
        active.write(row, 0, language.name)
        active.write(row, 2, tmz_str)
        row+=1

    distro.save('evaluators.xls')
示例#22
0
def fwriteinexcel(xlsname, results_gen, results_load_lambda, results_branch):
    book = Workbook()
    Sheet1 = book.add_sheet('Sheet1')

    Sheet1.write(0, 0, 'Bus')
    Sheet1.write(0, 1, 'Generation (MW)')
    Sheet1.write(0, 2, 'Load (MW)')
    Sheet1.write(0, 3, 'Lambda (€/MWh)')
    Sheet1.write(0, 5, 'From Bus')
    Sheet1.write(0, 6, 'To Bus')
    Sheet1.write(0, 7, 'P (MW)')

    for i in range(len(results_gen)):
        Sheet1.write(i+1, 0, results_gen[i, 0])
        Sheet1.write(i+1, 1, results_gen[i, 1])

    compteur = 0
    for j in range(len(results_load_lambda)):
        if j != 15 and j != 16 and j != 32 and j != 38 and j != 39:
            Sheet1.write(compteur+1, 2, results_load_lambda[j, 1])
            Sheet1.write(compteur+1, 3, results_load_lambda[j, 2])
            compteur += 1

    for k in range(len(results_branch)):
        Sheet1.write(k+1, 5, results_branch[k, 0])
        Sheet1.write(k+1, 6, results_branch[k, 1])
        Sheet1.write(k+1, 7, results_branch[k, 2])

    book.save(xlsname)
示例#23
0
def configErrorReporting(headers):
  """
  Configure import exception log, which is an Excel spreadsheet in the same 
  format as the input format, but with an extra column added - "Error",
  which contains the error message.
  
  Can only be called after first row of input Excel spreadsheet is read
  to initialize the global, "headers"
  """
  dateFmt = easyxf(
    'font: name Arial, bold True, height 200;',
    #'borders: left thick, right thick, top thick, bottom thick;',
     num_format_str='MM-DD-YYYY'
  )
  
  headerFmt = easyxf(
    'font: name Arial, bold True, height 200;',
  )
  global errorsWorkbook, erroutSheet, erroutRow
  errorsWorkbook = Workbook()
  erroutSheet = errorsWorkbook.add_sheet('Import Errors')

  for colnum in range(0, len(headers)):
    erroutSheet.write(0, colnum, headers[colnum][0], 
    tern(headers[colnum][0]==xlrd.XL_CELL_DATE, dateFmt, headerFmt))
   
  # Add extra column for error message
  erroutSheet.write(0, len(headers), "Error", headerFmt)
  erroutSheet.flush_row_data()
  erroutRow = 1
  errorsWorkbook.save('errors.xls')
示例#24
0
    def write(self, collection, filename=None, fail_silently=False):
        # create new book
        book = Workbook()

        # write dataset
        for name, dataset in collection.items():
            self._write_dataset(dataset, book)

        # write peakset if there are more than single dataset
        if len(dataset) > 1:
            sheet = book.add_sheet('peakset')
            offsets = [0, 1]
            for name, dataset in collection.items():
                # write classify name
                # Note: +1 for heading line
                sheet.write(offsets[0]+1, 0, get_sheet_name(name))
                # write peakset
                self._write_peakset(dataset, offsets, sheet,
                                    self.peakset_basecolumn,
                                    self.peakset_method,
                                    self.peakset_where_function)
                # update offsets
                offsets[0] += len(dataset) + 1

        # save
        book.save(filename or self.default_filename)
示例#25
0
class XLWriter(BookWriter):
    """
    xls, xlsx and xlsm writer
    """
    def __init__(self, file, encoding='ascii',
                 style_compression=2, **keywords):
        """Initialize a xlwt work book


        :param encoding: content encoding, defaults to 'ascii'
        :param style_compression: undocumented, but 2 is magically
                                  better
        reference: `style_compression <https://groups.google.com/
        forum/#!topic/python-excel/tUZkMRi8ITw>`_
        """
        BookWriter.__init__(self, file, **keywords)
        self.wb = Workbook(style_compression=style_compression,
                           encoding=encoding)

    def create_sheet(self, name):
        """Create a xlwt writer"""
        return XLSheetWriter(self.wb, None, name)

    def close(self):
        """
        This call actually save the file
        """
        self.wb.save(self.file)
示例#26
0
文件: views.py 项目: samba6/recons2
    def write_report_xl(self, itfs):
        headers = ("S/N", "LC NUMBER", "CUSTOMER NAME",
                   "CCY", "AMOUNT", 'RELATIONSHIP MANAGER')

        xl = Workbook()
        self.xlsh = xl.add_sheet("ITF-REPORT")
        row_in_xlsh = 0

        self.write_xl_row(("", "", "ITF INTEREST REPORT",), row_in_xlsh)
        row_in_xlsh += 1

        self.write_xl_row(headers, row_in_xlsh)
        row_in_xlsh += 1

        sequence = 1
        for itf in itfs:
            self.write_xl_row(
                (sequence, itf.lc_number, itf.customer.name, itf.currency(),
                 itf.amount, itf.rm_name(),),
                row_in_xlsh)
            row_in_xlsh += 1
            sequence += 1

        xl.save(open(itf_report_xl_path, 'wb'))

        return '%d Itfs reported for the given period' % len(itfs)
def process_test(request):
    start_date=request.GET.get('start','')
    end_date=request.GET.get('end','')
    
    now = dt.datetime.now().isocalendar()
    this_week_start,this_week_end = get_week_days(now[0],now[1])
    if start_date == '':
        start_date=this_week_start.strftime("%Y-%m-%d")
    if end_date == '':
        end_date=this_week_end.strftime("%Y-%m-%d")
    
    start_date=time.strptime(start_date,'%Y-%m-%d')
    start_date=dt.datetime.fromtimestamp(time.mktime(start_date))
    end_date=time.strptime(end_date,'%Y-%m-%d')
    end_date=dt.datetime.fromtimestamp(time.mktime(end_date))
    end_date=end_date+dt.timedelta(1)
    
    wb = Workbook()
    ws = wb.add_sheet('Sheetname')
    ws.write(0, 0, 'Firstname')
    ws.write(0, 1, 'Surname')
    ws.write(1, 0, 'Hans')
    ws.write(1, 1, 'Muster')

    fname = 'process_excel-testfile.xls'
    response = HttpResponse(mimetype="application/ms-excel")
    response['Content-Disposition'] = 'attachment; filename=%s' % fname

    wb.save(response)

    return response
示例#28
0
    def write(self, dirname=None):

        if dirname is None:
            dirname = self.description

        self.create_workbooks()
        dir = self.safe_mkdir(dirname)

        print 'Saving annotation in directory %s' % dir
        for workbook_name, sheets in self.workbooks.items():

            workbook_name = self.escape_name(workbook_name)
            workbook = Workbook()

            for sheet_name, sentences in sorted(sheets.items()):
                sheet_name = self.escape_name(sheet_name)
                sheet = workbook.add_sheet(sheet_name)
                sheet.col(1).width = 0x3000
                sheet.col(3).width = 0x3000

                for index, sentence in enumerate(sentences):
                    self.write_splitted(sentence, sheet, index)

            meta_sheet = workbook.add_sheet('meta')
            meta_sheet.write(0, 0, self.description)
            meta_sheet.write(1, 0, str(datetime.now()))

            outfile = os.path.join(dir, '%s.xls' % workbook_name)
            workbook.save(outfile)

        sentence_file_name = os.path.join(dir, 'sentences.json')
        write_sentence_file(self.sentences, sentence_file_name)
示例#29
0
def merge_synonym_counts():
    """
    Merge counts for synonyms.
    """
    with open('data/text/summary/2014.json', 'r') as wh:
        press_briefings = json.load(wh)

    from xlwt import Workbook

    book = Workbook()

    for synonyms in SYNONYMS:
        sheet = book.add_sheet('%s (+%i)' % (synonyms[0], len(synonyms)))

        header = sheet.row(0)

        for i, col in enumerate(['Week', 'Count']):
            header.write(i, col)

        for i, sunday in enumerate(all_sundays(2014)):
            row = sheet.row(i + 1)

            sunday = sunday.strftime('%Y-%m-%d')

            count = 0

            for word in synonyms:
                count += press_briefings[sunday].get(word, 0)
            
            for i, col in enumerate([sunday, count]):
                row.write(i, col)

    book.save('data/text/summary/synonyms.xls')
示例#30
0
 def exportToExcel(self,objectProject):
     
     book = Workbook();
     sheet1 = book.add_sheet('Sheet 1')
     if( objectProject):
         i=0;
         
         row1 = sheet1.row(i) ;
         row1.write(0, ('ประเภท').decode('UTF8') );
         row1.write(1, ('ชื่อโครงการ').decode('UTF8'));
         
         row1.write(2, ('รายละเอืยด').decode('UTF8') );
         row1.write(3, ('งบประมาณรวม').decode('UTF8') );
         row1.write(4, ('งบประมาณ').decode('UTF8') );
         row1.write(5, ('เงินบำรุง').decode('UTF8') );
         row1.write(6, ('งบประมาณอื่น').decode('UTF8') );
         row1.write(7, ('งบประมาณอื่นจาก').decode('UTF8') );
         row1.write(8, ('ผู้รับผิดชอบ').decode('UTF8') );
         row1.write(9, ('กลุ่ม').decode('UTF8') );
         row1.write(10, ('หน่วย/งาน').decode('UTF8') );
         
         i=i+1; 
         style = XFStyle();
         style.num_format_str = '#,##0.00';
         
         for value in  objectProject:
             
             row1 = sheet1.row(i) ;
             
             row1.write(0, value.get('project_type').decode('UTF8') );
             row1.write(1, value.get('project_name').decode('UTF8') );
              
             row1.write(2, value.get('detail').decode('UTF8') );
             row1.write(3, value.get('allBudget') ,style  );
             row1.write(4, value.get('project_budget'  ) ,style  );
             row1.write(5, value.get('maintenance_funds_budget'),style   );
             row1.write(6, value.get('budget_other') ,style  );
            
             if(value.get('budget_other_from')):
                 row1.write(7, value.get('budget_other_from').decode('UTF8') );
             if(value.get('user_name')):
                 row1.write(8, value.get('user_name').decode('UTF8') );
             
             row1.write(9, value.get('division').decode('UTF8') );
             row1.write(10, value.get('section').decode('UTF8') );
              
              
             i=i+1; 
     
      
     dirTempFile = gettempdir() + _os.sep + str('simple.xls');
             
     book.save(dirTempFile);
     #book.save(TemporaryFile());
     
     return dirTempFile;
      
     
     
      
示例#31
0
texts=stemmed_all
#from gensim.corpora import Dictionary
dictionary  = Dictionary(stemmed_all)

dictionary .filter_extremes(no_below=pr, keep_n=None)

  
# Workbook is created 
wb = Workbook() 
  
# add_sheet is used to create sheet. 
sheet1 = wb.add_sheet('dict') 
for i in range(len(dictionary)):
    sheet1.write(i+1, 0, dictionary[i]) 

wb.save('Dictionary_after_2%_words_removal.xls') 

# remov%
#all_tokens = sum(texts, [])
#tokens_once = set(word for word in set(all_tokens) if all_tokens.count(word) < (len(dictionary))*0.02)
#texts = [[word for word in text if word not in tokens_once]
#        for text in texts]
#stemmed_all=texts
csv.register_dialect('myDialect',
                     delimiter=',',
                     quoting=csv.QUOTE_ALL)
with open('Stemmed_documents.csv', 'w', newline='') as file:
    writer = csv.writer(file, dialect='myDialect')
    writer.writerows(stemmed_all)
mydict = corpora.Dictionary()
#dtm
from xlwt import Workbook

book = Workbook()
sheet1 = book.add_sheet('result 1')

for row_index in range(sheet0.nrows):

    keyword = sheet0.cell(row_index, 0).value
    params = {'query': keyword}
    enc_params = urllib.urlencode(params)

    request = urllib2.Request('http://search.naver.com/' + 'search.naver' +
                              '?' + enc_params)
    request.add_header('User-agent', 'Mozilla/5.0')
    request.add_header('Accept-encoding', 'gzip')
    response = urllib2.urlopen(request)

    compressedstream = StringIO.StringIO(response.read())
    gzipper = gzip.GzipFile(fileobj=compressedstream)

    data = gzipper.read().encode('utf-8')

    if (data.find('people_info section') != -1):
        sheet1.write(row_index, 6, 'yes')
        book.save('result.xls')
        book.save(TemporaryFile())

        print 'yes'
    else:
        print 'no'
示例#33
0
                   c1=10,
                   r2=8,
                   c2=10,
                   label='Giá trị còn lại',
                   style=style)
sheet1.write_merge(r1=7,
                   c1=11,
                   r2=8,
                   c2=11,
                   label='TL còn lại (%)',
                   style=style)
sheet1.write_merge(r1=6, c1=12, r2=6, c2=14, label='Chênh lệch', style=style)
sheet1.write_merge(r1=7, c1=12, r2=8, c2=12, label='Số lượng', style=style)
sheet1.write_merge(r1=7, c1=13, r2=8, c2=13, label='Nguyên giá', style=style)
sheet1.write_merge(r1=7,
                   c1=14,
                   r2=8,
                   c2=14,
                   label='Giá trị còn lại',
                   style=style)
sheet1.write_merge(r1=6, c1=15, r2=8, c2=15, label='Ghi chú', style=style)
j = 0
for x in range(9, 11, 1):
    for i in range(1, 16, 1):
        sheet1.write(x, i, "x" + "i")
    j = x

sheet1.write_merge(r1=j + 1, c1=1, r2=j + 1, c2=2, label='Cộng', style=style)

wb.save('example4.xlsx')
示例#34
0
	header = worksheet.row_values(0)
	data.append(header)
	for row_index in range(1,worksheet.nrows):
			row_list = []
			sale_amount = worksheet.cell_value(row_index, sale_amount_column_index)
			if sale_amount > 1400.0:
				for column_index in range(worksheet.ncols):
					cell_value = worksheet.cell_value(row_index,column_index)
					cell_type = worksheet.cell_type(row_index, column_index)
					if cell_type == 3:
						date_cell = xldate_as_tuple(cell_value,workbook.datemode)
						date_cell = date(*date_cell[0:3]).strftime('%m/%d/%Y')
						row_list.append(date_cell)
					else:
						row_list.append(cell_value)
			if row_list:
				data.append(row_list)

	for list_index, output_list in enumerate(data):
		for element_index, element in enumerate(output_list):
			output_worksheet.write(list_index, element_index, element)

output_workbook.save(output_file)







示例#35
0
key = b"Jsp3nd762MAO283N"
#iv=b"This is an IV456"
iv = Random.new().read(ARC2.block_size)
print("Block Size: ", ARC2.block_size)

#message=b'A really secret message. Not for prying eyes.'
for i in range(8):
    f = open('ciphertext_1mb.txt', encoding="ANSI")
    inp = f.read()
    message = 'null'
    for j in range(2**i):
        message += inp
    output.write(i + 1, 0, round(len(message) / (1024 * 1024)))
    print("Message size(mb): ", len(message) / (1024 * 1024))
    message = str.encode(message)
    cipher_text, time_enc = encryption(message)
    plain_text, time_dec = decryption(cipher_text)
    print(message == plain_text)
    #print("Message size(mb): ", len(message)/(1024*1024))
    print("Cipher text size(mb): ", len(cipher_text) / (1024 * 1024))
    print("\n\n")
    output.write(i + 1, 1, time_enc)
    output.write(i + 1, 2, time_dec)
    output.write(i + 1, 3, len(cipher_text) / (1024 * 1024))

wb.save('ARC2.xls')

#print(plain_text)

#print(timeit.timeit(encryption))
示例#36
0
    try:
        odds = driver.find_element_by_xpath(
            '/html/body/div[1]/div/div[2]/div[6]/div[1]/div/div[1]/div[2]/div[1]/div[8]/div['
            + str(i) + ']/div/strong/a')
        #/html/body/div[1]/div/div[2]/div[6]/div[1]/div/div[1]/div[2]/div[1]/div[8]/div[2]/div/strong/a
        print(odds.text)
        odd = odds.text.split(' ')[1]
        i += 1
        sheet1.write(row, col, odd)
        sheet1.write(row - 1, col, 'over-under')
        col += 1
        print(i)
    except:
        break
wb.save('databse.xls')

#over_under_button = driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/div[6]/div[1]/div/div[1]/div[2]/div[1]/div[6]/table/tbody/tr[4]/td[2]/a'+'#over-under;')
# .click() to mimic button click
#over_under_button.click()

#time.sleep(1)
### locate email form by_class_name
##username = driver.find_element_by_xpath('/html/body/nav/section[2]/form/div[1]/div[1]/input')
### send_keys() to simulate key strokes
##username.send_keys('*****@*****.**')
### sleep for 0.5 seconds
###sleep(0.5)
### locate password form by_class_name
##password = driver.find_element_by_xpath('/html/body/nav/section[2]/form/div[1]/div[2]/input')
### send_keys() to simulate key strokes
            count = 0
            i = i + 1
            img = cv2.imread(f)
            for x in range(0, len(img) - 1):
                for y in range(0, len(img[x]) - 1):
                    if (list(img[x][y]) == [0, 0, 0]):
                        count += 1
                        print(img[x][y])
            sheet1.write(i, 0, f)
            sheet1.write(i, 1, count / 60.0)
            if (count / 60.0 > 0):
                sheet1.write(i, 1, 1)
            else:
                sheet1.write(i, 1, 0)
        image_list = []
        wb.save('Pixel_Density_' + k[0:1] + "_" + k[2:3] + '.xls')
x_list = []
for g in glob.glob("*.xls"):
    x_list.append(g)
x_list.sort()
print x_list
kkk = 0
for z in x_list:
    xls_file = pd.ExcelFile(z)
    df = xls_file.parse('Sheet 1')
    df = df.sort_values(by='IMAGE NAME')
    df = df.drop('IMAGE NAME', 1)
    df = df.T
    df = df.reset_index(drop=True)
    df.to_csv('file' + str(kkk) + '.csv')
    df = read_csv('file' + str(kkk) + '.csv')
示例#38
0
                    sheet1.write(count + 2, i,
                                 statusCodeDict[int(splitline[i], 10)])
                    # getting the status code to write vs the number.

        else:
            splitline = line.split()
            for i in range(len(splitline)):
                # taking advantage of the dictionary created earlier
                if i != 1:
                    sheet1.write(count + 2, i, splitline[i])
                    #Writing of the line to the excel sheet
                else:
                    print(splitline[i])
                    sheet1.write(count + 2, i,
                                 statusCodeDict[int(splitline[i], 10)])
                    # getting the status code to write vs the number.
        count += 1
    return line


#call get event will get us to "<event>"
line = getEvent(line)
line = writeEvent(line, wb, "1")
line = getEvent(line)
line = writeEvent(line, wb, "2")
end = True
count = 1  # establish a count to keep track of what line in the event we are at

#increment the count by one after iterating through once
wb.save("event.xls")
def Q_Learning(Pr_des, eps_unc, eps_unc_learning, N_EPISODES, SHOW_EVERY, LEARN_RATE, DISCOUNT, EPS_DECAY, epsilon, i_s, pa, energy_pa, pa2ts, pa_s, pa_t, act_num, possible_acts_not_pruned, possible_acts_pruned, possible_next_states_not_pruned,possible_next_states_pruned, pick_up, delivery,  pick_ups, deliveries, test_n, n_samples, ts_size):
	
	wb = Workbook()
	sheet_name = 'Simulation' + str(test_n+1) 
	s1 = wb.add_sheet(sheet_name)

	s1.write(1,0,'Task-1')
	s1.write(1+N_EPISODES/SHOW_EVERY,0,'Task-2')
	s1.write(1+2*N_EPISODES/SHOW_EVERY,0,'Task-3')
	s1.write(1+3*N_EPISODES/SHOW_EVERY,0,'Task-4')

	s1.write(0,1,'# of Hit')
	s1.write(0,2,' Avg. Reward')
	s1.write(0,3,' Discounted Avg. Reward')

	s1.write(0,11,' Discounted Episode Reward - Task 1')
	s1.write(0,12,' Discounted Episode Reward - Task 2')
	s1.write(0,13,' Discounted Episode Reward - Task 3')
	s1.write(0,14,' Discounted Episode Reward - Task 4')

	s1.write(0,6,'Total Run Time')
	s1.write(0,7,'Total Avg. Reward')

	inx = 0


	QL_start_time = timeit.default_timer()

	EVERY_PATH = []
	episode_rewards = []

	# Initialize the Q - table (Between -0.01 and 0)
	pa_size = []
	q_table = []	
	agent_s = []
	hit_count = []
	mission_tracker = []
	ep_per_task = []
	disc_ep_per_task = []
	old_q_tables = []
	all_samples = []
	for i in range(len(energy_pa)):		
		pa_size.append(len(pa[i].g.nodes()))
		agent_s.append(i_s[i])      # Initialize the agent's location
		hit_count.append(0)
		mission_tracker.append(0)
		ep_per_task.append([])
		disc_ep_per_task.append([])
		all_samples.append([])
		q_table.append([])
		old_q_tables.append([])
		for t in range(ep_len+1):
			q_table[i].append(np.random.rand(pa_size[i],9) * 0.001 - 0.001)  # of states x # of actions
			old_q_tables[i].append(q_table[i][t])

	ep_rewards = [] 
	ep_trajectories_pa = []

	agent_upt_i = []
	agent_upt = []
	for j in range(len(energy_pa)):
		for i in range(len(pa[j].g.nodes())):
			if pa[j].g.nodes()[i][1] == 0 or str(pa[j].g.nodes()[i][0]) == 'r'+str(pick_up[j]) :#or str(pa[j].g.nodes()[i][0]) == 'r'+str(delivery[j]): # If the mission changes check here
				agent_upt_i.append(pa2ts[j][i])
			else:
				agent_upt_i.append([])
		agent_upt.append(agent_upt_i)

	for episode in range(N_EPISODES):
		# if episode > 900000: # can be switch to only exploitation after some episode
		# 	epsilon = 0

		which_pd = np.random.randint(len(energy_pa)) # randomly chosing the pick_up delivery states

		mission_tracker[which_pd] = mission_tracker[which_pd] + 1
		hit = []
		ep_rew = []
		for i in range(len(energy_pa)):
			hit.append(0)

		ep_traj_pa = [agent_s[which_pd]] # Initialize the episode trajectory
		ep_rew     = 0         # Initialize the total episode reward
		disc_ep_rew = 0

		for t_ep in range(ep_len):

			old_q_tables[which_pd][t_ep] = q_table[which_pd][t_ep]

			possible_acts = possible_acts_not_pruned[which_pd]
			possible_next_states = possible_next_states_not_pruned[which_pd]

			if hit[which_pd] == 0:      					                                                                                    
				if energy_pa[which_pd][agent_s[which_pd]] == 0:  # Raise the 'hit flag' if the mission is achieved 
					hit[which_pd] = 1                  # 	
					agent_s[which_pd] = agent_upt[which_pd].index(pa2ts[which_pd][agent_s[which_pd]]) # 	
					hit_count[which_pd] = hit_count[which_pd] + 1
				else:
					possible_acts = possible_acts_pruned[which_pd]
					possible_next_states = possible_next_states_pruned[which_pd]

			if len(possible_acts[t_ep][agent_s[which_pd]]) == 0:
				agent_s[which_pd] = agent_upt[which_pd].index(pa2ts[which_pd][agent_s[which_pd]])
					
			if np.random.uniform() > epsilon:                              # Exploit
				possible_qs = q_table[which_pd][t_ep][agent_s[which_pd], possible_acts[t_ep][agent_s[which_pd]]] # Possible Q values for each action
				next_ind    = np.argmax(possible_qs)                       # Pick the action with max Q value 
			else:                                                          # Explore
				next_ind  = np.random.randint(len(possible_acts[t_ep][agent_s[which_pd]])) # Picking a random action
			# Taking the action
			prev_state = agent_s[which_pd]
			intended_action = possible_acts[t_ep][prev_state][next_ind]
			if np.random.uniform() < eps_unc_learning:
				[chosen_act, next_state] = action_uncertainity(intended_action, pa_s[which_pd], pa_t[which_pd], act_num[which_pd], agent_s[which_pd])
				action    = chosen_act
				s_a       = (agent_s[which_pd], action)                                   # State & Action pair
				agent_s[which_pd]   = next_state # possible_next_states
			else:
				action    = intended_action
				s_a       = (agent_s[which_pd], action)                                   # State & Action pair
				agent_s[which_pd]   = possible_next_states[t_ep][agent_s[which_pd]][next_ind]        # moving to next state  (s,a)

			ep_traj_pa.append(agent_s[which_pd])			
			current_q = q_table[which_pd][t_ep][prev_state, intended_action]
			max_future_q = np.amax(q_table[which_pd][t_ep+1][agent_s[which_pd], :])                                          # Find the max future q 	
			rew_obs      = rewards_pa[which_pd][agent_s[which_pd]] * np.random.binomial(1, 1-rew_uncertainity)   # Observe the rewards of the next state
			new_q        = (1 - LEARN_RATE) * current_q + LEARN_RATE * (rew_obs + DISCOUNT * max_future_q)
			q_table[which_pd][t_ep][prev_state, intended_action] = new_q 

			disc_ep_rew += rew_obs * (DISCOUNT ** t_ep)                                                 
			ep_rew += rew_obs

			# Adding sample to the memory
			all_samples[which_pd].append([prev_state, intended_action, rew_obs, agent_s[which_pd], t_ep]) # S,A,R,S' and time
			
			# Sample n times
			for i in range(n_samples):
				random_sample_index = np.random.choice(len(all_samples[which_pd]))
				sample_s       = all_samples[which_pd][random_sample_index][0]
				sample_action  = all_samples[which_pd][random_sample_index][1]
				sample_r       = all_samples[which_pd][random_sample_index][2]
				sample_s_prime = all_samples[which_pd][random_sample_index][3]
				sample_t       = all_samples[which_pd][random_sample_index][4]

				current_q = q_table[which_pd][sample_t][sample_s, sample_action]
				max_future_q = np.amax(q_table[which_pd][sample_t+1][sample_s_prime, :])
				new_q        = (1 - LEARN_RATE) * current_q + LEARN_RATE * (sample_r + DISCOUNT * max_future_q)
				q_table[which_pd][sample_t][sample_s, sample_action] = new_q 

		agent_s[which_pd] = agent_upt[which_pd].index(pa2ts[which_pd][agent_s[which_pd]]) # Re-initialize after the episode is finished
		ep_rewards.append(ep_rew)
		ep_trajectories_pa.append(ep_traj_pa)
		epsilon = epsilon * EPS_DECAY
		disc_ep_per_task[which_pd].append(disc_ep_rew)
		ep_per_task[which_pd].append(ep_rew)		
		if (episode+1) % SHOW_EVERY == 0:
			inx = inx + 1
			for ind in range(len(energy_pa)):
				avg_per_task = np.mean(ep_per_task[ind])
				disc_avg_per_task = np.mean(disc_ep_per_task[ind])
				print('Episode # ' + str(episode+1) + ' : Task-' + str(ind) + '   # of Hit=' + str(len(ep_per_task[ind])) + '   Avg.=' + str(avg_per_task))
				s1.write(ind*N_EPISODES/SHOW_EVERY+inx,1,len(ep_per_task[ind]))
				s1.write(ind*N_EPISODES/SHOW_EVERY+inx,2,avg_per_task)
				s1.write(ind*N_EPISODES/SHOW_EVERY+inx,3,disc_avg_per_task)

		if (episode+1) % SHOW_EVERY == 0:
			avg_rewards = np.mean(ep_rewards[episode-SHOW_EVERY +1: episode])
			print('Episode # ' + str(episode+1) + ' : Epsilon=' + str(round(epsilon, 4)) + '    Avg. reward in the last ' + str(SHOW_EVERY) + ' episodes=' + str(round(avg_rewards,2)))
	
	best_episode_index = ep_rewards.index(max(ep_rewards))
	optimal_policy_pa  = ep_trajectories_pa[N_EPISODES-1]#ep_trajectories_pa[best_episode_index] # Optimal policy in pa  ep_trajectories_pa[N_EPISODES-1]#
	optimal_policy_ts  = []                                     # optimal policy in ts
	opt_pol            = []                                     # optimal policy in (m, n, h) format for visualization
	for ind, val in enumerate(optimal_policy_pa):
		optimal_policy_ts.append(pa2ts[which_pd][val])
		opt_pol.append((math.floor(optimal_policy_ts[ind]/n), optimal_policy_ts[ind]%n, 0))
	
	print('Tajectory at the last episode : ' + str(optimal_policy_ts))


	indices=[0,1,2]#, 50000,50001,50001,100000,100001,100002,299997,299998,299999,N_EPISODES-3,N_EPISODES-2,N_EPISODES-1
	optimal_policy_pas = []
	for i in range(len(indices)):		
		optimal_policy_pas.append(ep_trajectories_pa[indices[i]])
		optimal_policy_ts  = []
		for ind, val in enumerate(optimal_policy_pas[i]):
			optimal_policy_ts.append(pa2ts[which_pd][val])
		#print('Tajectory at the episode ' +  str(indices[i]) + ' : '+str(optimal_policy_ts))

	QL_timecost =  timeit.default_timer() - QL_start_time
	success_ratio = []
	for i in range(len(energy_pa)):
		success_ratio.append(100*hit_count[i]/mission_tracker[i])
		print("Successful Mission Ratio[%] = " + str(success_ratio[i]))
		print("Successful Missions = " + str(hit_count[i]) + " out of " + str(mission_tracker[i]))
	d_maxs = []
	for i in range(len(energy_pa)):
		d_maxs.append(max(energy_pa[i]))
	max_energy   = max(d_maxs)

	for i in range(len(energy_pa)):
		for j in range(ep_len):
			#name_diff = "q_table_diff_perc_" + str(i) + ".npy"
			name_q    = "Env3_Converged_Q_TABLE_GNC" + str(n_samples) + '_task'+ str(i) + '_t' + str(j) + ".npy"
			#np.save(name_diff,q_table_diff_perc)
			np.save(os.path.join('Q_TABLES',name_q) ,q_table[i][j])
	
	print('Total time for Q-Learning : ' + str(QL_timecost) + ' seconds')
	print('Action uncertainity[%] = ' + str(eps_unc*100))
	print('# of Samples = ' + str(n_samples))
	print("Desired Minimum Success Ratio[%] = " + str(100*Pr_des))
	print("Episode Length = " + str(ep_len) + "  and  Max. Energy of the System = " + str(max_energy))    
	print('Reward at last episode = '+str(ep_rewards[-1]))

	# for task in range(len(energy_pa)):
	# 	for ind in range(len(disc_ep_per_task[task])):
	# 		s1.write(1+ind,11+task,disc_ep_per_task[task][ind])

	s1.write(1,6,QL_timecost)
	s1.write(1,7,np.mean(ep_rewards))
	filename = 'model_based_GNC_' +str(n_samples) +'.xls'
	filename = filename
	wb.save(filename) 

	return opt_pol
示例#40
0
def Traitement(duree = 60, facteur = 10):
    duree_traitement_video = datetime.datetime.now()
    command = ['ffmpeg.exe', '-i', chemin, '-f', 'image2pipe', '-pix_fmt','rgb24','-vcodec','rawvideo','-']
    vid = sp.Popen(command, stdout = sp.PIPE, bufsize=10**8)
    Namefile = str(name)+'.xls'
    book = Workbook()
    feuil1 = book.add_sheet('Vitesse(temps)')
    feuil1.write(0,0,'Secondes de la video')
    feuil1.write(1,0,'Vitesses mesurees')
    
    mn09=-2.9335
    mn1=-1.9446
    mn15=-1
    mn2=-0.72440310
    mn3=-0.463642477347
    mn4=-0.269393042370
    mn5=-0.192672237
    mn6=0.1379402202341
    xabs1 = [mn09,mn1,mn15,mn2,mn3,mn4,mn5,mn6]
    yord1=[0.9,1,1.5,2,3,4,5,6]
    Mod = interpolate.interp1d(xabs1, yord1, fill_value='extrapolate')
    # Add taux d'erreur et vitesse myenne
    # Supp elements seuillage
    # Finir durée
    X = []
    Y = []
    
    for t in range(duree):                          #boucle sur chaque seconde de la video
        duree_traitement_par_sec = datetime.datetime.now() #compteur de temps pour chaque seconde de traitement
        regression_moy_sec = 0                       #(ré)initialisation de la valeur de regression moyenne sur le nombre d'image par seconde
        for i in range(25*t,25*(1+t),facteur):      #boucle sur x image par seconde de video (x entre 1 et 25)
            image = vid.stdout.read(1080*1920*3) # Extraction de l'ensemble des données d'une image
            image = np.fromstring(image, dtype='uint8') # Normalisation des données en int 8 bits
            image = image.reshape((1080,1920,3)) # Mise en forme des donnée en une image en RGB de taille 1080x1920
            vid.stdout.flush() # Suppression du buffer          image = image[xmin:ymin,xmax:ymax] 
            image = image[xmin:xmax,ymin:ymax] #Image recadrée autour du drapeau
            image1 = image[:,:,0] #Image en niveau de rouge
            list_pt_x = []                      #Liste de regression des pixels d'absice pour la ie image de la te seconde
            list_pt_y = []                      #Liste de regression des pixels d'ordonnées pour la ie image de la te seconde
            for p in range(len(image)):         #On parcourt l'image
                for pp in range(len(image[0])):
                    if image1[p][pp] > 175:              #On rajoute les coordonnées des pixels appartenant au drapeau dans les listes
                        list_pt_x.append(p)
                        list_pt_y.append(pp)
            reg = np.polyfit(list_pt_y, list_pt_x, 1)       #Liste des valeurs de regression a et b (y=ax+b)
            if -0.0973 >= reg[0] >= -2.7864:     #si la valeurs correspond a un vent dans le cadre du modele alors
                X.append(i/25)                #on ajoute le temps dans la liste des absices
                Y.append(float(Mod(reg[0])))  #On ajoue la valeur du vent dans la liste des ordonnées
            if i == 25*t:                                  #Affichage images seuillées + droites de reg.(une image par sec affichée)
                for p in range(len(image1)):
                    for pp in range(len(image1[0])):
                        if image1[p][pp] > 175:
                            image[p,pp,0] = 255
                            image[p,pp,1] = 255
                            image[p,pp,2] = 255
                        else :
                            image[p,pp,0] = 0
                            image[p,pp,1] = 0
                            image[p,pp,2] = 0
                        y = int(reg[0]*pp + reg[1])
                        if y > 0 and y < len(image1):
                            image[y,pp,0] = 127
                            image[y,pp,1] = 127
                            image[y,pp,2] = 127
                image = Image.fromarray(image)
                global img
                img = ImageTk.PhotoImage(image)
                
                canvas_img.create_image(0, 0, anchor=NW, image=img)
                canvas_img.update_idletasks() 
                fenetre.update()
        PourcExe(t+1,duree)
        TempsExe((datetime.datetime.now()-duree_traitement_par_sec).total_seconds(), duree)
        canvas_pourc.itemconfigure(text_pourc, text=pourc) 
        canvas_temps.itemconfigure(text_temps, text=time) 
        
        regression_moy_sec = -sum(Y)/len(Y)
        if regression_moy_sec<-2.7865:
            feuil1.write(0,t+1,t)
            feuil1.write(1,t+1,'Vent trop faible')
            canvas_vit.itemconfigure(text_vit, text='faible')
        elif regression_moy_sec > -0.0973:
            feuil1.write(0,t+1,t)
            feuil1.write(1,t+1,'Vent trop fort')
            canvas_vit.itemconfigure(text_vit, text='faible')
        else:
            feuil1.write(0,t+1,t)
            feuil1.write(1,t+1,float(Mod(regression_moy_sec)))
            vall=int(Mod(regression_moy_sec)*1000)
            val=str(float(vall)/1000)+" m/s"
            canvas_vit.itemconfigure(text_vit, text=val)
        fenetre.update
        print("Temps video :",t,"s")
        print((datetime.datetime.now()-duree_traitement_par_sec).total_seconds(),"secondes pour traiter une seconde de video")
    pl.plot(X,Y)  #affichage graphique
    pl.show()
    book.save(Namefile) #sauvegarde du fichier excel
    temps_traitement_total=(datetime.datetime.now()-duree_traitement_video).total_seconds()
    print("Temps traitement total :",temps_traitement_total,"s") #temps tratement total
    print(sum(Y)/len(Y))
    showinfo("Boite de dialogue", "Le traitement de la video est terminé") 
示例#41
0
NO_OF_ROWS = 38

# Give the location of the file
loc = ("/home/rahul/Desktop/FormsToBeDeleted.xlsx")

# To open Workbook
wb = xlrd.open_workbook(loc)
sheet = wb.sheet_by_index(0)

# For row 0 and column 0
for i in range(0, NO_OF_ROWS):
    row = []
    for j in range(0, NO_OF_COLUMNS):
        #print(type(sheet.cell_value(i,j)))
        row.append(sheet.cell_value(i, j))
    print(row)

data = sheet.cell_value(0, 3)
print(type(data))
print(datetime.datetime.fromtimestamp(data))
#data = sheet.cell_value(0, 0)

#print(data)

loc = ("/home/rahul/Desktop/FormsToBeDeleted.xlsx")

# To open Workbook
wb = Workbook()
sheet1 = wb.add_sheet('Sheet 1')
wb.save('xlwt example.xls')
示例#42
0
generalSheet = wb.add_sheet('general', cell_overwrite_ok=True)
bronzeSheet = wb.add_sheet('bronze', cell_overwrite_ok=True)
silverSheet = wb.add_sheet('silver', cell_overwrite_ok=True)
goldSheet = wb.add_sheet('gold', cell_overwrite_ok=True)
platinumSheet = wb.add_sheet('platinum', cell_overwrite_ok=True)
advancedSheet = wb.add_sheet('advanced', cell_overwrite_ok=True)

links = [("https://usaco.guide/general/using-this-guide", generalSheet),
         ("https://usaco.guide/bronze/time-comp", bronzeSheet),
         ('https://usaco.guide/silver/binary-search-sorted', silverSheet),
         ('https://usaco.guide/gold/divis', goldSheet),
         ('https://usaco.guide/plat/seg-ext', platinumSheet),
         ('https://usaco.guide/adv/springboards', advancedSheet)]

tcount = 70

for link, sheet in links:
    tcount += 1
    for i in get_all_links(link, d):
        px = px + get_all_unique_problems(i, d)
    cu = 1
    for i in px:
        write_problem_at_row(i, sheet, cu)
        cu += 1
    px.clear()
    wb.save(f'{tcount}.xls')

d.close()
wb.save('final.xls')
示例#43
0

#time function
def time():
    now = datetime.datetime.today()
    return now


#write txt file
class txt_files(Thread):
    def run(self):
        txt_file = open("user input.txt", "a")
        txt_file.write((user_input + "\n" + str(time()) + "\n"))


#write excel file
class xlsx_files(Thread):
    def run(self):
        sheet.write(i, 0, user_input)
        sheet.write(i, 1, str(time()))


#user input name
for i in range(2):
    user_input = input("enter your name: ")
    txt_files2 = txt_files()
    xlsx_files2 = xlsx_files()
    txt_files2.start()
    xlsx_files2.start()
xlsx_file.save("user input.xls")
from bs4 import BeautifulSoup
from xlwt import Workbook
from fake_useragent import UserAgent

excel_name = u'douban_hot_review.xls'
sheet_name = u'豆瓣影评'
column = [u'标题', u'作者', u'影片', u'影评']

douban_excel = Workbook(excel_name)
douban_excel = Workbook(encoding='utf-8')
douban_sheet = douban_excel.add_sheet(sheet_name, cell_overwrite_ok=True)
douban_sheet.write(0, 0, u'标题')
douban_sheet.write(0, 1, u'作者')
douban_sheet.write(0, 2, u'影片')
douban_sheet.write(0, 3, u'影评')
douban_excel.save(excel_name)


def get_movie_review():
    #html = get_html(url, 1, 3)

    #    titles = soup.select('.main-bd h2')
    #    i = 0
    #    for row in (range(1+page*10,11+page*10)):
    #        douban_sheet.write(row,0,titles[i].text)
    #        i = i + 1 #print(titles[i].text)

    #    names = soup.select('.name')
    #    i = 0
    #    for row in (range(1+page*10,11+page*10)):
    #        douban_sheet.write(row,1,names[i].text)
示例#45
0
                                                  today_date, 5)
    ws_index.write_merge(row_no, row_no + 1, 0, 0, each_index, style_name)
    col_no = 1
    temp_series = temp_result[u"涨跌幅"]
    Xls_Writer_pctchg(ws_index, temp_series, all_data, row_no, col_no, u"涨跌幅")
    col_no = col_no + 2
    for each_field in [u"振幅", u"日内波动率", u"成交额", u"换手率"]:
        temp_series = temp_result[each_field]
        Xls_Writer(ws_index, temp_series, row_no, col_no, each_field)
        col_no = col_no + 2
    row_no = row_no + 2

row_no = row_no + 1

for each_index in bond_index_list:
    temp_result, all_data = Bondindex_Performance(each_index,
                                                  daily_backtest_start_date,
                                                  today_date)
    ws_index.write_merge(row_no, row_no + 1, 0, 0, each_index, style_name)
    col_no = 1
    for each_field in [u"涨跌幅", u"振幅", u"日内波动率", u"成交额", u"换手率"]:
        try:
            temp_series = temp_result[each_field]
            Xls_Writer(ws_index, temp_series, row_no, col_no, each_field)
        except:
            pass
        col_no = col_no + 2
    row_no = row_no + 2

ws.save("D:\\test.xls")
示例#46
0
    next_page = tree.xpath('//div[@class="unified pagination js_pageLinks"]/a/@href')
    max_length = max(len(res_name), len(res_comments), len(comment_hrefs), len(res_scores), len(res_rank))
    res_name = res_name + ['NULL' for i in range(max_length-len(res_name))]
    res_comments = res_comments + ['NULL' for i in range(max_length-len(res_comments))]
    comment_hrefs = comment_hrefs + ['NULL' for i in range(max_length-len(comment_hrefs))]
    res_scores = res_scores + ['NULL' for i in range(max_length-len(res_scores))]
    res_rank = res_rank + ['NULL' for i in range(max_length-len(res_rank))]
    for i in range(max_length):
        res_info.append([res_name[i], '厦门', res_scores[i], res_rank[i], res_comments[i], herf+comment_hrefs[i]])
    if len(res_info) >= 3000:
        for i in range(len(res_info)):
            for j in range(len(res_info[i])):
                sheet.write(row_count, j, unicode(res_info[i][j]))
            row_count += 1
        print('Generate file count:' + str(file_count))
        workbook.save('Xiamen_Maotuying_Restaurant_Information%d.xls' % file_count)
        del res_info[:]
        workbook = Workbook()
        sheet = workbook.add_sheet('Restaurant food information')
        write_excel_title(sheet, title_name)
        row_count = 1
        file_count += 1
    if len(next_page) > 0:
        next_url = herf + next_page[-1]
        url_queue.put(next_url)

for i in range(len(res_info)):
    for j in range(res_info[i]):
        sheet.write(row_count, j, res_info[i][j])
    row_count += 1
workbook.save('Maotuying_Restaurant_Information%d.xls' % file_count)
示例#47
0
def teste(args, semente=None):

    numJogadores = 101
    numJogadas = 10000

    memoria = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    jogadores = []

    vuns = []
    vzeros = []
    somaTotal = []
    random.seed(semente)
    for i in range(numJogadores):
        nome = "PerceptronSimples-" + str(i)
        p = Perceptron(numInputs=13,
                       taxaAprendizado=args,
                       semente=random.randint(0, 1000))
        jogador = Jogador(nome, p)
        jogadores.append(jogador)

    #Essa parte e usada para escrever a planilha
    wb = Workbook()
    sheet1 = wb.add_sheet('tabela')
    for i in range(1, numJogadas + 1):
        sheet1.write(i, 0, "jogada " + str(i))
    sheet1.write(0, 1, "Uns")
    sheet1.write(0, 2, "Zeros")
    sheet1.write(0, 3, "Soma")
    sheet1.write(0, 4, "Diferença")
    #[fim] planilha

    for i in range(numJogadas):
        soma = 0
        jogadas = []

        for j in range(numJogadores):
            jogada = jogadores[j].jogar(memoria[-13:])
            jogadas.append(jogada)
            soma += jogada

        minoria = -np.sign(soma)

        for j in range(numJogadores):
            if jogadas[j] == minoria:
                jogadores[j].addVitorias(i)
            else:
                jogadores[j].treinar(minoria, memoria[-13:])

        if (minoria < 0):
            vuns.append(abs(math.floor(soma / 2) - (numJogadores - 1) / 2))
            vzeros.append(
                abs(math.floor(soma / 2) + ((numJogadores - 1) / 2) + 1))
        else:
            vuns.append(
                abs(math.ceil(soma / 2) - ((numJogadores - 1) / 2) - 1))
            vzeros.append(abs(math.ceil(soma / 2) + ((numJogadores - 1) / 2)))

        #Usado para planilha
        sheet1.write(i + 1, 1, vuns[i])
        sheet1.write(i + 1, 2, vzeros[i])
        sheet1.write(i + 1, 3, vuns[i] + vzeros[i])
        sheet1.write(i + 1, 4, int(abs(soma)))
        #[fim] planilha

        memoria.append(minoria)

        somaTotal.append(abs(soma))
        print(soma)

        #os.system('clear')

    #salva a planilha
    wb.save('resultados/' + str(numJogadas) + ' - 101jogadores - 13inpts - ' +
            str(args) + 'eta - 02.ods')
    return somaTotal
示例#48
0
        }).find_all('tr')
    try:
        for corredor in corredores_pagina:
            if corredor.find_all('a'):
                for link in corredor.find_all('a'):
                    row += 1
                    link_corredor = link_base + link.get('href')
                    vista_corredor = urlopen(link_corredor)
                    soup_corredor = BeautifulSoup(vista_corredor,
                                                  'html.parser')
                    empresa = soup_corredor.find('h1').text
                    campos = soup_corredor.find_all('td',
                                                    attrs={'class': 'Campo'})
                    valores = soup_corredor.find_all('td',
                                                     attrs={'class': 'Valor'})
                    sheet1.write(row, 0, empresa)
                    for i, campo in enumerate(campos):
                        valor = re.sub(' +', ' ', valores[i].text)
                        if campo.text == 'Teléfonos':
                            sheet1.write(row, 1, valor)
                        elif campo.text == 'Email':
                            sheet1.write(row, 2, valor)
                        elif campo.text == 'Contacto':
                            sheet1.write(row, 3, valor)
                        elif campo.text == 'Dirección':
                            sheet1.write(row, 4, valor)
    except:
        pass

wb.save('corredores.xls')
示例#49
0
        tokens = nltk.word_tokenize(raw)
        # print(tokens)

        # text = nltk.Text(tokens)
        # print(text)

        tagged = nltk.pos_tag(tokens)
        # print(tagged)

        for i in range(0, len(tagged)):
            label = str(tagged[i][1])
            if (label in nounList):
                numNouns += 1
            elif (label in verbList):
                numVerbs += 1
            elif (label in modList):
                numMods += 1
            else:
                others += 1
        total = numNouns + numVerbs + numMods

        # write to Excel
        sheet1.write(row, 0, emot)
        sheet1.write(row, 1, str(numNouns / total))
        sheet1.write(row, 2, str(numVerbs / total))
        sheet1.write(row, 3, str(numMods / total))
        row += 1
        print(row)

    statsWB.save(title + '.xls')
示例#50
0
worddic = word_dic(tierslist, tiernames)
#print(worddic, '\n')

#writes worddic to sheet
wb = Workbook()
sheet1 = wb.add_sheet('Textgrid')
numtiers = len(tiernames)

#for every interval tier
for wordi, elem in enumerate(worddic):
    #write interval tier name, elem, starttime,endtime
    sheet1.write(numtiers * wordi * 2, 0, tiernames[0])
    j = 1
    for tup in elem:
        sheet1.write(numtiers * wordi * 2, j, tup)
        j += 1

    #for each point tier, write landmark and time row
    for namei, name in enumerate(tiernames):
        if namei != 0:
            sheet1.write(numtiers * wordi * 2 + 2 * namei - 1, 0, name)
            sheet1.write(numtiers * wordi * 2 + 2 * namei, 0, 'Time')
            j = 1
            for lmtime in worddic[elem][name]:
                sheet1.write(numtiers * wordi * 2 + 2 * namei - 1, j,
                             lmtime[1])
                sheet1.write(numtiers * wordi * 2 + 2 * namei, j, lmtime[0])
                j += 1

wb.save(savexlsas)
示例#51
0
from tempfile import TemporaryFile
from xlwt import Workbook

book = Workbook()
sheet1 = book.add_sheet('Wangzi')
sheet1.write(0, 0, '0,0')
sheet1.write(0, 1, '0,1')

book.save('D:/Desktop/simple.xls')
#book.save(TemporaryFile())
示例#52
0
def render_excel(filename, title_list, data_list, file_extension='.xls'):
    if file_extension == '.csv':
        response = HttpResponse(mimetype='text/csv')
        response['Content-Disposition'] = 'attachment; filename=' + filename
        csv_writer = csv.writer(response)

        csv_writer.writerow(title_list)

        for row_item_list in data_list:
            for i in xrange(0, len(row_item_list)):
                if row_item_list[i]:
                    if isinstance(row_item_list[i], datetime.datetime):
                        row_item_list[i] = row_item_list[i].strftime(
                            '%Y-%m-%d %H:%M:%S')
                    elif isinstance(row_item_list[i], datetime.date):
                        row_item_list[i] = row_item_list[i].strftime(
                            '%Y-%m-%d')
                    elif isinstance(row_item_list[i], datetime.time):
                        row_item_list[i] = row_item_list[i].strftime(
                            '%H:%M:%S')
                if isinstance(row_item_list[i], basestring):
                    row_item_list[i] = row_item_list[i].encode("utf-8")

            csv_writer.writerow(row_item_list)
    else:
        import StringIO
        output = StringIO.StringIO()
        export_wb = Workbook()
        export_sheet = export_wb.add_sheet('Sheet1')
        col_idx = 0
        for col_title in title_list:
            export_sheet.write(0, col_idx, "%s" % col_title)
            col_idx += 1
        row_idx = 1
        for row_item_list in data_list:
            col_idx = 0
            for cell_value in row_item_list:
                if cell_value:
                    cell_value_is_date = False
                    if isinstance(cell_value, datetime.datetime):
                        cell_value = xlrd.xldate.xldate_from_datetime_tuple(
                            (cell_value.year, cell_value.month, cell_value.day,
                             cell_value.hour, cell_value.minute,
                             cell_value.second), 0)
                        cell_value_is_date = True
                    elif isinstance(cell_value, datetime.date):
                        cell_value = xlrd.xldate.xldate_from_date_tuple(
                            (cell_value.year, cell_value.month,
                             cell_value.day), 0)
                        cell_value_is_date = True
                    elif isinstance(cell_value, datetime.time):
                        cell_value = xlrd.xldate.xldate_from_time_tuple(
                            (cell_value.hour, cell_value.minute,
                             cell_value.second))
                        cell_value_is_date = True
                    elif isinstance(cell_value, models.Model):
                        cell_value = str(cell_value)
                    if cell_value_is_date:
                        s = XFStyle()
                        s.num_format_str = 'M/D/YY'
                        export_sheet.write(row_idx, col_idx, cell_value, s)
                    else:
                        export_sheet.write(row_idx, col_idx, cell_value)
                col_idx += 1
            row_idx += 1
        export_wb.save(output)
        output.seek(0)
        str_out = output.getvalue()
        response = HttpResponse(str_out)
        response['Content-Type'] = 'application/vnd.ms-excel'
        response['Content-Disposition'] = 'attachment; filename=' + filename

    return response
示例#53
0
def write_excel(data):
    file_w = Workbook()
    sheet1 = file_w.add_sheet(u'Data', cell_overwrite_ok=True)  # 创建sheet
    write_data(data, sheet1)
    file_w.save('data.xls')
    return 0
def main():
    usdinr = []
    usdinr_change = []
    dates = []
    diffrates = []
    ratesD = []
    ratesF = []

    # expiry in 12 months
    T = 12

    wb = excel.open_workbook(filename='India_data.xlsx')
    wb1 = Workbook()
    ws1 = wb1.add_sheet('Prices')

    # code outputs tons of data to an excel spreadsheet including prices using
    # 1. Vanilla monte carlo
    # 2. Antithetic monte carlo
    # 3. Black Scholes Model
    # and also outputs the standard error of these price estimations

    # We simulate 4 different paths with strike prices a few std's away from the actual prevailing price on the date.
    # The labels below aren't accurate. We settled on different sigma values in the final simulation.

    ws1.write(0, 0, 'Date')
    ws1.write(0, 1, 'USDINR')
    ws1.write(0, 2, 'Vanilla K=-1sigma')
    ws1.write(0, 3, 'Vanilla K=-0.5sigma')
    ws1.write(0, 4, 'Vanilla K=+0.5sigma')
    ws1.write(0, 5, 'Vanilla K=+1sigma')
    ws1.write(0, 6, 'Antithetic K=-1sigma')
    ws1.write(0, 7, 'Antithetic K=-0.5sigma')
    ws1.write(0, 8, 'Antithetic K=+0.5sigma')
    ws1.write(0, 9, 'Antithetic K=+1sigma')
    ws1.write(0, 10, 'BS K=-1sigma')
    ws1.write(0, 11, 'BS K=-0.5sigma')
    ws1.write(0, 12, 'BS K=+0.5sigma')
    ws1.write(0, 13, 'BS K=+1sigma')
    ws1.write(0, 14, 'Strike Price K=-1sigma')
    ws1.write(0, 15, 'Strike Price K=-0.5sigma')
    ws1.write(0, 16, 'Strike Price K=+0.5sigma')
    ws1.write(0, 17, 'Strike Price K=+1sigma')
    ws1.write(0, 18, 'Std Error Vanilla K=-1sigma')
    ws1.write(0, 19, 'Std Error Vanilla K=-0.5sigma')
    ws1.write(0, 20, 'Std Error Vanilla K=+0.5sigma')
    ws1.write(0, 21, 'Std Error Vanilla K=+1sigma')
    ws1.write(0, 22, 'Std Error Antithetic K=-1sigma')
    ws1.write(0, 23, 'Std Error Antithetic K=-0.5sigma')
    ws1.write(0, 24, 'Std Error Antithetic K=+0.5sigma')
    ws1.write(0, 25, 'Std Error Antithetic K=+1sigma')

    # cell co-ordinates work like array indexes within the spreadsheet
    for i in range(2, 121):
        single_usd_inr = wb.sheet_by_name("Sheet1").cell_value(i, 3)
        usdinr.append(single_usd_inr)
        usdinr_change.append(
            math.log(1 + wb.sheet_by_name("Sheet1").cell_value(i, 4)))
        single_date = wb.sheet_by_name("Sheet1").cell_value(i, 0)
        ws1.write(i, 0, single_date)
        ws1.write(i, 1, single_usd_inr)
        dates.append(
            datetime.fromordinal(
                datetime(1900, 1, 1).toordinal() + int(single_date) - 2))
        ratesD.append(
            math.log(1 + wb.sheet_by_name("Sheet1").cell_value(i, 1) / 100) /
            12)
        ratesF.append(
            math.log(1 + wb.sheet_by_name("Sheet1").cell_value(i, 9) / 100) /
            12)
        diffrates.append(ratesD[i - 2] - ratesF[i - 2])

    optionPricesVanilla = []
    optionPricesAntithetic = []
    optionPricesBS = []
    for i in range(12, len(dates) - T):
        sigma1 = np.std(usdinr_change[i - 12:i - 1])
        sigma = np.std(usdinr[i - 12:i - 1])
        mean = np.average(usdinr_change[i - 12:i - 1])

        # Strike prices are decided as +/- [0.75, 0.25] sigma from spot price.
        # Since this is a simulation, we calculate the option price through simulated paths and compare it against the
        # actual strike price during the selected expiry date to decide if the option is exercised or not.
        K = [
            usdinr[i] - 0.75 * sigma * math.sqrt(T),
            usdinr[i] - 0.25 * sigma * math.sqrt(T),
            usdinr[i] + 0.25 * sigma * math.sqrt(T),
            usdinr[i] + 0.75 * sigma * math.sqrt(T)
        ]
        ws1.write(i, 14, K[0])
        ws1.write(i, 15, K[1])
        ws1.write(i, 16, K[2])
        ws1.write(i, 17, K[3])

        optionPrice = []
        stderr = []
        for k in K:
            optionPrice_single, stderr_single = price_asian_option_vanilla(
                usdinr[i], ratesD[i], ratesF[i], diffrates[i], sigma1, T, 100,
                120, k)
            optionPrice.append(optionPrice_single)
            stderr.append(stderr_single)
        optionPricesVanilla.append(optionPrice)
        ws1.write(i, 2, optionPrice[0])
        ws1.write(i, 3, optionPrice[1])
        ws1.write(i, 4, optionPrice[2])
        ws1.write(i, 5, optionPrice[3])
        ws1.write(i, 18, stderr[0])
        ws1.write(i, 19, stderr[1])
        ws1.write(i, 20, stderr[2])
        ws1.write(i, 21, stderr[3])

        optionPrice = []
        stderr = []
        for k in K:
            optionPrice_single, stderr_single = price_asian_option_antithetic(
                usdinr[i], ratesD[i], ratesF[i], diffrates[i], sigma1, T, 100,
                120, k)
            optionPrice.append(optionPrice_single)
            stderr.append(stderr_single)
        optionPricesAntithetic.append(optionPrice)
        ws1.write(i, 6, optionPrice[0])
        ws1.write(i, 7, optionPrice[1])
        ws1.write(i, 8, optionPrice[2])
        ws1.write(i, 9, optionPrice[3])
        ws1.write(i, 22, stderr[0])
        ws1.write(i, 23, stderr[1])
        ws1.write(i, 24, stderr[2])
        ws1.write(i, 25, stderr[3])

        optionPrice = [
            price_option_BS(usdinr[i], ratesD[i], ratesF[i], diffrates[i],
                            sigma1, T, 100, k) for k in K
        ]
        optionPricesBS.append(optionPrice)
        ws1.write(i, 10, optionPrice[0])
        ws1.write(i, 11, optionPrice[1])
        ws1.write(i, 12, optionPrice[2])
        ws1.write(i, 13, optionPrice[3])

    optionPricesVanilla = np.array(optionPricesVanilla)
    optionPricesAntithetic = np.array(optionPricesAntithetic)
    optionPricesBS = np.array(optionPricesBS)

    wb1.save('Option Prices.xls')

    # finally, we plot all the different option prices from our simulations.
    axis1 = plotter.subplot(2, 2, 1)
    axis1.plot_date(dates[12:len(dates) - T],
                    optionPricesVanilla[:, 0],
                    label="-2sigma",
                    linestyle='solid',
                    marker=',')
    axis1.plot_date(dates[12:len(dates) - T],
                    optionPricesVanilla[:, 1],
                    label="-1sigma",
                    linestyle='solid',
                    marker=',')
    axis1.plot_date(dates[12:len(dates) - T],
                    optionPricesVanilla[:, 2],
                    label="+1sigma",
                    linestyle='solid',
                    marker=',')
    axis1.plot_date(dates[12:len(dates) - T],
                    optionPricesVanilla[:, 3],
                    label="+2sigma",
                    linestyle='solid',
                    marker=',')
    axis1.set_ylabel('Option Prices')
    axis1.legend()
    axis2 = axis1.twinx()
    axis2.plot_date(dates,
                    usdinr,
                    label='USDINR',
                    linestyle='solid',
                    marker=',')
    axis2.set_ylabel('USD / INR Prices')
    axis2.legend()
    plotter.title('Vanilla Monte Carlo Simulation')

    axis1 = plotter.subplot(2, 2, 2)
    axis1.plot_date(dates[12:len(dates) - T],
                    optionPricesBS[:, 0],
                    label="-2sigma",
                    linestyle='solid',
                    marker=',')
    axis1.plot_date(dates[12:len(dates) - T],
                    optionPricesBS[:, 1],
                    label="-1sigma",
                    linestyle='solid',
                    marker=',')
    axis1.plot_date(dates[12:len(dates) - T],
                    optionPricesBS[:, 2],
                    label="+1sigma",
                    linestyle='solid',
                    marker=',')
    axis1.plot_date(dates[12:len(dates) - T],
                    optionPricesBS[:, 3],
                    label="+2sigma",
                    linestyle='solid',
                    marker=',')
    axis1.set_ylabel('Option Prices')
    axis1.legend()
    # axis2 = axis1.twinx()
    # axis2.plot_date(dates, usdinr, label='USDINR', linestyle='solid', marker=',')  # , secondary_y=True)
    # axis2.set_ylabel('USD / INR Prices')
    # axis2.legend()
    plotter.title('Black Scholes Calculation')

    plotter.subplot(2, 2, 3)
    plotter.plot_date(dates[12:len(dates) - T],
                      optionPricesAntithetic[:, 0],
                      label="-2sigma",
                      linestyle='solid',
                      marker=',')
    plotter.plot_date(dates[12:len(dates) - T],
                      optionPricesAntithetic[:, 1],
                      label="-1sigma",
                      linestyle='solid',
                      marker=',')
    plotter.plot_date(dates[12:len(dates) - T],
                      optionPricesAntithetic[:, 2],
                      label="+1sigma",
                      linestyle='solid',
                      marker=',')
    plotter.plot_date(dates[12:len(dates) - T],
                      optionPricesAntithetic[:, 3],
                      label="+2sigma",
                      linestyle='solid',
                      marker=',')
    plotter.legend()
    plotter.title('Antithetic Monte Carlo Simulation')
    plotter.ylabel('Option Prices')

    plotter.show()
示例#55
0

if __name__ == "__main__":
    w = Workbook()
    today = datetime.date.today()
    yesterday = datetime.date.today() - datetime.timedelta(days=1)
    all_city = gansu + xinjiang + qinghai + ningxia + shanxi
    dict_all = {}
    for city in all_city:
        dict_all[city] = 0
    get_all_wenshu()
    dict_b = OrderedDict()
    dict_b["甘肃"] = gansu
    dict_b["新疆"] = xinjiang
    dict_b["青海"] = qinghai
    dict_b["宁夏"] = ningxia
    dict_b["陕西"] = shanxi
    mysql_db = get_conn()
    mysql_cur = mysql_db.cursor()
    for a, b in dict_b.items():
        logger.info(a)
        ktgg_ = ktgg(a)
        wenshu_ = wenshu(b)
        main(a, wenshu_, ktgg_)
    w.save("5province_increase-{}.xls".format(yesterday))
    my_email.hz_send(
        "兰州项目西北五省涉诉站点统计-{}".format(yesterday),
        "5province_increase-{}.xls".decode("utf8").format(yesterday),
        "*****@*****.**")
    mysql_db.close()
示例#56
0
                valid = validators.url(webs)
                if valid:
                    print(webs)
                    sheet1.write(count1, 1, webs)
                else:
                    print('no website')
                    sheet1.write(count1, 1, 'NONE')
                element.click()
            else:
                driver.back()
                time.sleep(10)
        else:
            count2 = count2 + 1
            name1 = code1.surgeon(1, dau)
            sheet2.write(count2, 0, name1)
            ph = code1.surgeon(2, dau)
            sheet2.write(count2, 1, ph)
            email = code1.surgeon(3, dau)
            sheet2.write(count2, 2, email)

        driver.execute_script("window.scrollBy(0,110)", "")

    driver.close()


url = "TYPE IN REQUIRED EMAIL"
count = 0
begin(url)
wb.save('doctors.xls')
wb1.save('surgeons.xls')
from datetime import date
from xlwt import Workbook, XFStyle, Borders, Pattern, Font

fnt = Font()
fnt.name = 'Arial'

borders = Borders()
borders.left = Borders.THICK
borders.right = Borders.THICK
borders.top = Borders.THICK
borders.bottom = Borders.THICK

pattern = Pattern()
pattern.pattern = Pattern.SOLID_PATTERN
pattern.pattern_fore_colour = 0x0A

style = XFStyle()
style.num_format_str = 'YYYY-MM-DD'
style.font = fnt
style.borders = borders
style.pattern = pattern

book = Workbook()
sheet = book.add_sheet('A Date')
sheet.write(1, 1, date(2009, 3, 18), style)

book.save('date.xls')
示例#58
0
                    sheet.cell_value(vallist.index(response), numCol))
                averageVal = respCounter / counter

        robject = word, averageVal
        resultList.append(robject)

    # Writing value to another excel sheet
    openWB(KeyDeiverName, resultList)


# Creating the lookup table
analyzeInitSurvey("Employee Development", 5, 6)
analyzeInitSurvey("Culture", 11, 12)
analyzeInitSurvey("Work-Life Balance", 17, 18)
analyzeInitSurvey("Leadership", 23, 24)
wb.save(saveFileName)


def getLookupTable(sheet):
    wb = xlrd.open_workbook(saveFileName)
    qSheet = wb.sheet_by_name(sheet)
    # print(qSheet.cell_value(0, 0))

    qlist = []
    for i in range(qSheet.nrows):
        qlist.append(qSheet.row_values(i))

    return qlist


def isImportant(comment):
示例#59
0
class MakeExcel(object):
    def __init__(self,excelinfo=None):
        self.STARTLINE=1
        engine=Engine()
        timemanager=TimeManager()
        self.workbook = Workbook()
        self.sheet = self.workbook.add_sheet(u'公示信息',cell_overwrite_ok=False)
        self.inforsheet=self.workbook.add_sheet(u'文档信息',cell_overwrite_ok=False)

        _tableTitle=[u"一卡通",u"学号",u"姓名",u"明细",u"说明",u"得分",u"总分"]

        #设置列宽(固定宽度)
        self.sheet.col(0).width=4000
        self.sheet.col(1).width=4000
        self.sheet.col(2).width=3000
        self.sheet.col(3).width=20000
        self.sheet.col(4).width=20000
        self.sheet.col(5).width=2000
        self.sheet.col(6).width=2000


        #定义info栏的字体
        # (<element>:(<attribute> <value>,)+;)+
        xls_title =easyxf(
            'font: name Arial,height 400,colour black;'
            'pattern: pattern solid, fore_colour pale_blue;'
            'alignment: horizontal center,vertical center;'
            )
        xls_info=easyxf(
            'font: name Arial,height 250,colour black;'
            'pattern: pattern solid, fore_colour white;'
            'alignment: horizontal center,vertical center;'
            'borders:top medium,bottom medium,left medium,right medium;'
            )
        self.xls_detail=easyxf(
            'font: name Arial,height 250,colour black;'
            'pattern: pattern solid, fore_colour white;'
            'alignment: horizontal center,vertical center;'
            'borders:top medium,bottom medium,left medium,right medium;'
            )

        self.sumary=easyxf(
            'font: name Arial,height 250,colour black;'
            'pattern: pattern solid, fore_colour white;'
            'alignment: horizontal center,vertical center;'
            'borders:top medium,bottom medium,left medium,right medium;'
            )
        self.details=easyxf(
            'font: name Arial,height 250,colour black;'
            'pattern: pattern solid, fore_colour yellow;'
            'alignment: horizontal center,vertical center;'
            'borders:top medium,bottom medium,left medium,right medium,bottom_colour violet;'

            )



        self.inforsheet.write_merge(0,1,0,6,excelinfo["filename"], xls_title)
        self.inforsheet.write_merge(2,3,2,6,excelinfo["admin"],xls_info)
        self.inforsheet.write_merge(2,3,0,1,u"创建者:",xls_info)
        self.inforsheet.write_merge(4,5,2,6,excelinfo["grade"],xls_info)
        self.inforsheet.write_merge(4,5,0,1,u"公示年级:",xls_info)
        #self.inforsheet.write_merge(6,7,2,6,timemanager.strTime(excelinfo["maketime"]),xls_info)
        self.inforsheet.write_merge(6,7,0,1,u"创建时间:",xls_info)
        self.inforsheet.write_merge(8,9,2,6,excelinfo["start"]+u"至"+excelinfo["end"],xls_info)
        self.inforsheet.write_merge(8,9,0,1,u"统计区间:",xls_info)
        self.inforsheet.write_merge(10,11,2,6,excelinfo["note"],self.xls_detail)
        self.inforsheet.write_merge(10,11,0,1,u"备注:",xls_info)


        for i in range(len(_tableTitle)):#Make table title
            self.sheet.write(0,i,_tableTitle[i],self.xls_detail)







    def _writeuser(self,rowNo,infobuf,lines):
        """
        写用户总体信息
        """
        _info=[infobuf["campID"],infobuf["studentID"],infobuf["name"],u"无",u"无",u"0",float(infobuf["sum"])]
        if lines==0:#如果无加分
            for i in range(len(_info)):
                self.sheet.write_merge(rowNo,rowNo+lines,i,i,_info[i],self.sumary)
            return lines+1
        else:
            for i in range(len(_info)):
                if i!=3 and i!=4 and i!=5:#明细,分值留空
                    self.sheet.write_merge(rowNo,rowNo+lines-1,i,i,_info[i],self.sumary)
            return lines

    def _writedetail(self,rowNo,items,lines):
        """
        写具体得分细则
        """
        i=0
        for item in items:
            _info=[item["item_name"],item["note"],item["add"]]
            for s in range(len(_info)):
                self.sheet.write(rowNo+i,3+s,_info[s],self.details)
            i+=1

    def saveAs(self,filename):
        self.workbook.save(filename)

    def run(self,userlist,starttime,endtime):
        i=self.STARTLINE
        _count=0
        for user in userlist:#写用户总体信息
            engine=Engine()
            result=engine.getUserDetail(user,start_time=starttime,end_time=endtime,is_jsonify=False)
            lines=len(result["items"])
            if result is not None:
                _count+=1#增加一条记录

            self._writedetail(i,result["items"],lines)
            i+=self._writeuser(i,result,lines)




        if _count>0:
            return True #至少有一个条目
        else:
            return False #没有任何条目
        return True
示例#60
0
mytree = ET.parse('data.xml.xml')
myroot = mytree.getroot()
#print(myroot.tag) [for head node of data]

z = 1
ct = 0
for x in myroot[1].findall('Stroke'):
    ct = ct + 1

#print no. of strokes
print(ct)

sheet1.write(0, 0, "X_value")
sheet1.write(0, 1, "Y_value")
sheet1.write(0, 2, "Time")
#excessing each child
for x in myroot[1]:
    for y in x:
        sheet1.write(z, 0, y.get('x'))
        sheet1.write(z, 1, y.get('y'))
        sheet1.write(z, 2, y.get('time'))
        z = z + 1
    sheet1.write(z, 0, '\n')
    sheet1.write(z, 1, '\n')
    sheet1.write(z, 2, '\n')
    z = z + 1

#saving data from sheet to xls file
wb.save('datafile.xls')