from xlwt import Workbook, easyxf tl = easyxf('border: left thick, top thick') t = easyxf('border: top thick') tr = easyxf('border: right thick, top thick') r = easyxf('border: right thick') br = easyxf('border: right thick, bottom thick') b = easyxf('border: bottom thick') bl = easyxf('border: left thick, bottom thick') l = easyxf('border: left thick') w = Workbook() ws = w.add_sheet('Border') ws.write(1, 1, style=tl) ws.write(1, 2, style=t) ws.write(1, 3, style=tr) ws.write(2, 3, style=r) ws.write(3, 3, style=br) ws.write(3, 2, style=b) ws.write(3, 1, style=bl) ws.write(2, 1, style=l) w.save('borders.xls')
def Q_Learning(Pr_des, eps_unc, N_EPISODES, SHOW_EVERY, LEARN_RATE, DISCOUNT, EPS_DECAY, epsilon, i_s, pa, energy_pa, pa2ts, pa_s, pa_t, act_num, possible_acts, possible_next_states, pick_up, delivery, pick_ups, deliveries,test_n): wb = Workbook() sheet_name = 'Simulation' + str(test_n+1) s1 = wb.add_sheet(sheet_name) s1.write(1,0,'Task-1') s1.write(1+N_EPISODES/SHOW_EVERY,0,'Task-2') s1.write(1+2*N_EPISODES/SHOW_EVERY,0,'Task-3') s1.write(1+3*N_EPISODES/SHOW_EVERY,0,'Task-4') s1.write(0,1,'# of Hit') s1.write(0,2,' Avg. Reward') s1.write(0,3,' Discounted Avg. Reward') s1.write(0,11,' Discounted Episode Reward - Task 1') s1.write(0,12,' Discounted Episode Reward - Task 2') s1.write(0,13,' Discounted Episode Reward - Task 3') s1.write(0,14,' Discounted Episode Reward - Task 4') s1.write(0,6,'Total Run Time') s1.write(0,7,'Total Avg. Reward') inx = 0 QL_start_time = timeit.default_timer() EVERY_PATH = [] episode_rewards = [] # Initialize the Q - table (Between -0.01 and 0) pa_size = [] q_table = [] agent_s = [] hit_count = [] mission_tracker = [] ep_per_task = [] disc_ep_per_task = [] old_q_tables = [] for i in range(len(energy_pa)): pa_size.append(len(pa[i].g.nodes())) q_table.append(np.random.rand(pa_size[i] * ep_len,9) * 0.001 - 0.001) # of states x # of actions agent_s.append(i_s[i]) # Initialize the agent's location hit_count.append(0) mission_tracker.append(0) ep_per_task.append([]) disc_ep_per_task.append([]) old_q_tables.append(q_table[i]) ep_rewards = [] ep_trajectories_pa = [] agent_upt_i = [] agent_upt = [] for j in range(len(energy_pa)): for i in range(len(pa[j].g.nodes())): if pa[j].g.nodes()[i][1] == 0 or str(pa[j].g.nodes()[i][0]) == 'r'+str(pick_up[j]) :#or str(pa[j].g.nodes()[i][0]) == 'r'+str(delivery[j]): # If the mission changes check here agent_upt_i.append(pa2ts[j][i]) else: agent_upt_i.append([]) agent_upt.append(agent_upt_i) for episode in range(N_EPISODES): #if episode > 8000: # epsilon = 0 which_pd = np.random.randint(len(energy_pa)) # randomly chosing the pick_up delivery states old_q_tables[which_pd] = q_table[which_pd] mission_tracker[which_pd] = mission_tracker[which_pd] + 1 hit = [] ep_rew = [] for i in range(len(energy_pa)): hit.append(0) ep_traj_pa = [agent_s[which_pd]] # Initialize the episode trajectory ep_rew = 0 # Initialize the total episode reward disc_ep_rew = 0 possible_next_states_copy = copy.deepcopy(possible_next_states[which_pd]) possible_acts_copy = copy.deepcopy(possible_acts[which_pd]) for t_ep in range(ep_len): k_ep = ep_len - t_ep # Remaning episode time if hit[which_pd] == 0: #print(which_pd) #print(agent_s[which_pd]) if energy_pa[which_pd][agent_s[which_pd]] == 0: # Raise the 'hit flag' if the mission is achieved hit[which_pd] = 1 # re-initialize the agent_s to prevent stuck agent_s[which_pd] = agent_upt[which_pd].index(pa2ts[which_pd][agent_s[which_pd]]) # Reinitiliaze the pa(region, 0) hit_count[which_pd] = hit_count[which_pd] + 1 #break en_list = [energy_pa[which_pd][i] for i in possible_next_states_copy[agent_s[which_pd]]] # Energies of the next possible states not_possible_index = [] ind_minholder = en_list.index(min(en_list))#np.argmin(np.array(en_list)) possible_next_states_minholder = possible_next_states_copy[agent_s[which_pd]][ind_minholder] possible_acts_minholder = possible_acts_copy[agent_s[which_pd]][ind_minholder] for j in range(len(possible_next_states_copy[agent_s[which_pd]])): d_max = en_list[j] + 1 i_max = int(math.floor((k_ep - 1 - d_max) / 2)) thr_fun = 0 for i in range(i_max+1): thr_fun = thr_fun + np.math.factorial(k_ep) / (np.math.factorial(k_ep-i) * np.math.factorial(i)) * eps_unc**i * (1-eps_unc)**(k_ep-i) if thr_fun > Pr_des or i_max < 0: #energy_pa[possible_next_states_copy[agent_s][j]] > k_ep-2: # not_possible_index.append(j) #print("r1pos = " + str(len(possible_next_states_copy[agent_s[which_pd]]))) for ind in sorted(not_possible_index, reverse=True): del possible_next_states_copy[agent_s[which_pd]][ind] del possible_acts_copy[agent_s[which_pd]][ind] #print("r2pos = " + str(len(possible_next_states_copy[agent_s[which_pd]]))) if len(possible_next_states_copy[agent_s[which_pd]]) == 0: # not possible_next_states_copy[agent_s]: # possible_next_states_copy[agent_s[which_pd]].append(possible_next_states_minholder) possible_acts_copy[agent_s[which_pd]].append(possible_acts_minholder) if np.random.uniform() > epsilon: # Exploit possible_qs = q_table[which_pd][agent_s[which_pd], possible_acts_copy[agent_s[which_pd]]] # Possible Q values for each action next_ind = np.argmax(possible_qs) # Pick the action with max Q value else: # Explore next_ind = np.random.randint(len(possible_acts_copy[agent_s[which_pd]])) # Picking a random action # Taking the action real_agent_s = agent_s[which_pd] real_action = possible_acts_copy[real_agent_s][next_ind] if np.random.uniform() < eps_unc: [chosen_act, next_state] = action_uncertainity(possible_acts_copy[agent_s[which_pd]][next_ind], pa_s[which_pd], pa_t[which_pd], act_num[which_pd], agent_s[which_pd]) action = chosen_act s_a = (agent_s[which_pd], action) # State & Action pair #current_q = q_table[which_pd][agent_s[which_pd], action] # (save the current q for the q_table update later on) agent_s[which_pd] = next_state # possible_next_states[agent_upt.index(pa2ts[agent_s])][next_ind] # moving to next state (s,a) else: action = possible_acts_copy[agent_s[which_pd]][next_ind] s_a = (agent_s[which_pd], action) # State & Action pair #current_q = q_table[which_pd][agent_s[which_pd], action] # (save the current q for the q_table update later on) agent_s[which_pd] = possible_next_states_copy[agent_s[which_pd]][next_ind] # moving to next state (s,a) ep_traj_pa.append(agent_s[which_pd]) current_q = q_table[which_pd][real_agent_s * ep_len + (ep_len-t_ep-1), real_action] max_future_q = np.amax(q_table[which_pd][agent_s[which_pd] * ep_len + (ep_len-t_ep-2), :]) # Find the max future q rew_obs = rewards_pa[which_pd][agent_s[which_pd]] #np.random.binomial(1, 1-rew_uncertainity) * # Observe the rewards of the next state new_q = (1 - LEARN_RATE) * current_q + LEARN_RATE * (rew_obs + DISCOUNT * max_future_q) q_table[which_pd][real_agent_s * ep_len + (ep_len-t_ep-1), real_action] = new_q #print("episode = " + str(episode)) # for i in range(len(energy_pa)): # if i != which_pd: # hypo_agent_s = agent_s[i] # next_physical = pa2ts[which_pd][agent_s[which_pd]] # if pa[i].g.nodes()[i][1] == 0: # if next_physical == pick_ups[i]: # picked = 'r'+str(pick_ups[i]) # agent_s[i] = pa_i.g.nodes().index((picked,2)) # else: # agent_s[i] = agent_upt[i].index(pa2ts[which_pd][agent_s[which_pd]]) # elif pa[i].g.nodes()[i][1] == 2: # if next_physical == deliveries[i]: # agent_s[i] = agent_upt[i].index(pa2ts[which_pd][agent_s[which_pd]]) #agent_s[i] = energy_pa[i].index(0) # else: # if next_physical == pa2ts[which_pd][i_s[which_pd]]: # next_phy = 'Base1' # else: # next_phy = 'r'+ str(next_physical) # agent_s[i] = pa[i].g.nodes().index((next_phy,2)) # else: # agent_s[i] = agent_upt[i].index(pa2ts[which_pd][agent_s[which_pd]]) # current_q = q_table[i][hypo_agent_s * ep_len + (ep_len-t_ep-1), real_action] # max_future_q = np.amax(q_table[i][agent_s[i] * ep_len + (ep_len-t_ep-2), :]) # new_q = (1 - LEARN_RATE) * current_q + LEARN_RATE * (rew_obs + DISCOUNT * max_future_q) # Calculate the new q value # q_table[i][hypo_agent_s * ep_len + (ep_len-t_ep-1), real_action] = new_q # Update the table disc_ep_rew += rew_obs * (DISCOUNT ** t_ep) ep_rew += rew_obs q_table_diff_perc = (old_q_tables[which_pd] - q_table[which_pd]) / old_q_tables[which_pd] * 100 agent_s[which_pd] = agent_upt[which_pd].index(pa2ts[which_pd][agent_s[which_pd]]) # for i in range(len(energy_pa)): # agent_s[i] = agent_upt[i].index(pa2ts[i][agent_s[i]])# Reinitiliaze the pa(region, 0) ep_rewards.append(ep_rew) ep_trajectories_pa.append(ep_traj_pa) epsilon = epsilon * EPS_DECAY disc_ep_per_task[which_pd].append(disc_ep_rew) ep_per_task[which_pd].append(ep_rew) if (episode+1) % SHOW_EVERY == 0: inx = inx + 1 for ind in range(len(energy_pa)): avg_per_task = np.mean(ep_per_task[ind]) disc_avg_per_task = np.mean(disc_ep_per_task[ind]) print('Episode # ' + str(episode+1) + ' : Task-' + str(ind) + ' # of Hit=' + str(len(ep_per_task[ind])) + ' Avg.=' + str(avg_per_task)) s1.write(ind*N_EPISODES/SHOW_EVERY+inx,1,len(ep_per_task[ind])) s1.write(ind*N_EPISODES/SHOW_EVERY+inx,2,avg_per_task) s1.write(ind*N_EPISODES/SHOW_EVERY+inx,3,disc_avg_per_task) if (episode+1) % SHOW_EVERY == 0: avg_rewards = np.mean(ep_rewards[episode-SHOW_EVERY +1: episode]) print('Episode # ' + str(episode+1) + ' : Epsilon=' + str(round(epsilon, 4)) + ' Avg. reward in the last ' + str(SHOW_EVERY) + ' episodes=' + str(round(avg_rewards,2))) best_episode_index = ep_rewards.index(max(ep_rewards)) optimal_policy_pa = ep_trajectories_pa[N_EPISODES-1]#ep_trajectories_pa[best_episode_index] # Optimal policy in pa ep_trajectories_pa[N_EPISODES-1]# optimal_policy_ts = [] # optimal policy in ts opt_pol = [] # optimal policy in (m, n, h) format for visualization for ind, val in enumerate(optimal_policy_pa): optimal_policy_ts.append(pa2ts[which_pd][val]) opt_pol.append((math.floor(optimal_policy_ts[ind]/n), optimal_policy_ts[ind]%n, 0)) print('\n Tajectory at the last episode : ' + str(optimal_policy_ts)) QL_timecost = timeit.default_timer() - QL_start_time success_ratio = [] for i in range(len(energy_pa)): success_ratio.append(100*hit_count[i]/mission_tracker[i]) print("Successful Mission Ratio[%] = " + str(success_ratio[i])) print("Successful Missions = " + str(hit_count[i]) + " out of " + str(mission_tracker[i])) d_maxs = [] for i in range(len(energy_pa)): d_maxs.append(max(energy_pa[i])) max_energy = max(d_maxs) np.save("Converged_Q_TABLE.npy",q_table[0]) print('\n Total time for Q-Learning : ' + str(QL_timecost) + ' seconds' + "\n") print('Action uncertainity[%] = ' + str(eps_unc*100)) print("Desired Minimum Success Ratio[%] = " + str(100*Pr_des)) print("Episode Length = " + str(ep_len) + " and Max. Energy of the System = " + str(max_energy) + "\n") print('\n Reward at last episode = ',ep_rewards[-1]) # for task in range(len(energy_pa)): # for ind in range(len(disc_ep_per_task[task])): # s1.write(1+ind,11+task,disc_ep_per_task[task][ind]) s1.write(1,6,QL_timecost) s1.write(1,7,np.mean(ep_rewards)) #filename = 'testing'+str(test_n+1) +'.xls' filename = 'time_included9.xls' wb.save(filename) return opt_pol
#!/usr/bin/env python3 import glob import os import sys from datetime import date from xlrd import open_workbook, xldate_as_tuple from xlwt import Workbook input_folder = sys.argv[1] output_file = sys.argv[2] output_workbook = Workbook() output_worksheet = output_workbook.add_sheet('sums_and_averages') all_data = [] sales_column_index = 3 header = [ 'workbook', 'worksheet', 'worksheet_total', 'worksheet_average', 'workbook_total', 'workbook_average' ] all_data.append(header) for input_file in glob.glob(os.path.join(input_folder, '*.xls*')): with open_workbook(input_file) as workbook: list_of_totals = [] list_of_numbers = [] workbook_output = [] for worksheet in workbook.sheets(): total_sales = 0
def exelColData(GF): # Creation of the new file book2 = Workbook() sheet1 = book2.add_sheet('Col_Data') valCol = [] nomCol = [] couCol = [] today = str(date.today()) if getRealTData: for k in (colTimeData(valeur, GF)): valCol.append(k) for l in valeur: nomCol.append(l) if getDateCali: valCol.append(dateCali(GF)) nomCol.append("Last calibration") if getDateRec: valCol.append(dateRec(GF)) nomCol.append("Last recovery") var1 = len(valCol) nomCol.append('Date') valCol.append(today) if getStatus: valCol.append(Status(GF)) nomCol.append("Statut") lenVal = len(valCol) #couleur pour les limites de valeurs if float(valCol[0][0:len(valCol[0]) - 4]) < 26: couCol.append("red") else: couCol.append("black") if float(valCol[1][0:len(valCol[1]) - 3]) < -30 or float( valCol[1][0:len(valCol[0]) - 3]) > 50: couCol.append("red") else: couCol.append("black") if float(valCol[2][0:len(valCol[2]) - 4]) < 30: couCol.append("red") else: couCol.append("black") aujd = datetime.today() dc = datetime.strptime(dateCali(GF), '%Y-%m-%d') delta = aujd - dc lastCali = int(str(delta.days)[0:2]) if lastCali > 14: couCol.append("red") elif lastCali > 7 and lastCali < 15: couCol.append("orange") else: couCol.append("black") dr = datetime.strptime(dateRec(GF), '%Y-%m-%d') lastReco = int(str(aujd - dr)[0:2]) if lastReco > 30: couCol.append("red") elif lastReco > 21 and lastCali < 31: couCol.append("orange") else: couCol.append("black") for i in range(0, lenVal): sheet1.write(i, 0, nomCol[i]) sheet1.write(i, 1, valCol[i]) for k in range(0, var1): sheet1.write(k, 2, couCol[k]) return book2
import sys from xlrd import open_workbook # xlrd : excel 파일을 읽는 library from xlwt import Workbook # xlwt : excel 파일을 쓰는 library input_file = sys.argv[1] output_file = sys.argv[2] output_workbook = Workbook() # 객체를 인스턴스화하여 결과를 출력하여 excel 를 쓸수있음 output_worksheet = output_workbook.add_sheet('jan_2013_output') # xlwt의 add_sheet() 함수를 사용하여 excel 안에 worksheet 추가 with open_workbook(input_file) as workbook: # input_file 을 염 worksheet = workbook.sheet_by_name( 'january_2013') # input_file 안 january_2013 sheet 로 감 for row_index in range( worksheet.nrows): # january_2013 worksheet 의 row 갯수 를 range 돌림 for column_index in range( worksheet.ncols ): # january_2013 worksheet 의 column 갯수 를 range 돌림 output_worksheet.write( row_index, column_index, worksheet.cell_value(row_index, column_index)) output_workbook.save(output_file)
def print_report_excel(self, cr, uid, ids, context=None): if context is None: context = {} asset_obj = self.pool.get('account.asset.asset') asset_categ_obj = self.pool.get('account.asset.category') company_obj = self.pool.get('res.company') year_obj = self.pool.get('account.fiscalyear') period_obj = self.pool.get('account.period') asset_depreciation_line_obj = self.pool.get( 'account.asset.depreciation.line') fnt = Font() fnt.name = 'Arial' fnt.height = 220 fnt1 = Font() fnt1.name = 'Arial' fnt1.height = 220 fnt1.bold = 'on' # Define the font attributes for header content_fnt = Font() content_fnt.name = 'Arial' content_fnt.height = 220 align_content = Alignment() align_content.horz = Alignment.HORZ_LEFT borders = Borders() borders.left = 0x02 borders.right = 0x02 borders.top = 0x02 borders.bottom = 0x02 # The text should be centrally aligned align = Alignment() align.horz = Alignment.HORZ_LEFT align.vert = Alignment.VERT_TOP align.wrap = Alignment.WRAP_AT_RIGHT # The text should be right aligned align1 = Alignment() align1.horz = Alignment.HORZ_RIGHT align1.vert = Alignment.VERT_TOP align1.wrap = Alignment.WRAP_AT_RIGHT # The content should be left aligned align2 = Alignment() align2.horz = Alignment.HORZ_LEFT align2.vert = Alignment.VERT_TOP align2.wrap = Alignment.WRAP_AT_RIGHT # The content should be right aligned align3 = Alignment() align3.horz = Alignment.HORZ_RIGHT align3.vert = Alignment.VERT_TOP align3.wrap = Alignment.WRAP_AT_RIGHT # We set the backgroundcolour here pattern = Pattern() pattern.pattern = Pattern.SOLID_PATTERN pattern.pattern_fore_colour = 0x1F # We set the backgroundcolour here pattern1 = Pattern() pattern1.pattern = Pattern.SOLID_PATTERN pattern1.pattern_fore_colour = 0x17 # We set the backgroundcolour here pattern2 = Pattern() pattern2.pattern = Pattern.SOLID_PATTERN pattern2.pattern_fore_colour = 0xFF # We set the backgroundcolour here pattern3 = Pattern() pattern3.pattern = Pattern.SOLID_PATTERN pattern3.pattern_fore_colour = 0xFF # apply the above settings to the row(0) header style_header = XFStyle() style_header.font = fnt1 style_header.pattern = pattern style_header.borders = borders style_header.alignment = align style_header_right = XFStyle() style_header_right.font = fnt1 style_header_right.pattern = pattern style_header_right.borders = borders style_header_right.alignment = align3 # apply the above settings to the row(1) header style_header1 = XFStyle() style_header1.font = fnt style_header1.pattern = pattern1 style_header1.borders = borders style_header1.alignment = align1 # apply the above settings to the content style_content_left = XFStyle() style_content_left.font = fnt style_content_left.pattern = pattern2 style_content_left.borders = borders style_content_left.alignment = align2 style_content_right = XFStyle() style_content_right.font = fnt style_content_right.pattern = pattern3 style_content_right.borders = borders style_content_right.alignment = align3 style_content = XFStyle() style_content.alignment = align_content style_content.font = content_fnt wb = Workbook() ws = wb.add_sheet('Sheet 1') ws.row(0).height = 500 ws.col(0).width = 6500 ws.col(1).width = 6500 ws.col(2).width = 6500 ws.col(3).width = 6500 ws.col(4).width = 6500 ws.col(5).width = 6500 ws.col(6).width = 6500 ws.col(7).width = 6500 ws.col(8).width = 6500 ws.col(9).width = 6500 ws.col(10).width = 6500 ws.col(11).width = 6500 ws.col(12).width = 6500 ws.col(13).width = 6500 style = xlwt.easyxf('font: bold on,height 240,color_index 0X36;' 'align: horiz center;') ws.write(0, 2, 'Asset Report', style) data = self.read(cr, uid, ids, [], context=context)[0] company = company_obj.browse(cr, uid, data['company_id'][0]) if data['fiscalyear_id']: year = year_obj.browse(cr, uid, data['fiscalyear_id'][0]).name else: year = '' filter = '' if data['filter'] == 'filter_date': filter = 'Dates' elif data['filter'] == 'filter_period': filter = 'Periods' else: filter = 'No Filters' from_date = False to_date = False ws.row(2).height = 500 ws.write(2, 0, 'Company Name', style_header) ws.write(2, 1, company.name, style_header) ws.row(3).height = 500 ws.write(3, 0, 'Report Run', style_header) ws.write(3, 1, time.strftime('%Y-%m-%d %H:%M:%S'), style_header) ws.row(4).height = 500 ws.write(4, 0, 'Fiscal Year', style_header) ws.write(4, 1, year, style_header) ws.row(5).height = 500 ws.write(5, 0, 'Filters', style_header) ws.write(5, 1, filter, style_header) ws.row(6).height = 500 if data['filter'] == 'filter_period': from_period_id = data['period_from'][0] to_period_id = data['period_to'][0] if from_period_id and to_period_id: from_date = period_obj.browse(cr, uid, from_period_id).date_start to_date = period_obj.browse(cr, uid, to_period_id).date_stop elif from_period_id and to_period_id: from_date = period_obj.browse(cr, uid, from_period_id).date_start to_date = period_obj.browse(cr, uid, from_period_id).date_stop ws.write(6, 0, 'Start Period', style_header) ws.write(6, 1, data['period_from'][1], style_header) ws.write(7, 0, 'End Period', style_header) ws.write(7, 1, data['period_to'][1], style_header) elif data['filter'] == 'filter_date': from_date = data['date_from'] to_date = data['date_to'] ws.write(6, 0, 'Start Date', style_header) ws.write( 6, 1, datetime.strptime(data['date_from'], '%Y-%m-%d').strftime('%m/%d/%Y'), style_header) ws.write(7, 0, 'End Date', style_header) ws.write( 7, 1, datetime.strptime(data['date_to'], '%Y-%m-%d').strftime('%m/%d/%Y'), style_header) row = 10 from_date = False to_date = False if data['filter'] == 'filter_period': from_period_id = data['period_from'][0] to_period_id = data['period_to'][0] if from_period_id and to_period_id: from_date = period_obj.browse(cr, uid, from_period_id).date_start to_date = period_obj.browse(cr, uid, to_period_id).date_stop elif from_period_id and to_period_id: from_date = period_obj.browse(cr, uid, from_period_id).date_start to_date = period_obj.browse(cr, uid, from_period_id).date_stop elif data['filter'] == 'filter_date': from_date = data['date_from'] to_date = data['date_to'] else: if data['fiscalyear_id']: from_date = year_obj.browse( cr, uid, data['fiscalyear_id'][0]).date_start to_date = year_obj.browse(cr, uid, data['fiscalyear_id'][0]).date_stop else: pass if data['asset_categ_ids']: categories_ids = data['asset_categ_ids'] else: categories_ids = asset_categ_obj.search(cr, uid, []) if data['company_id']: company_ids = [data['company_id'][0]] else: company_ids = company_obj.search(cr, uid, []) for categ_id in categories_ids: ws.row(row).height = 500 domain = [] if from_date and to_date: #domain.append(('purchase_date', '>=', from_date)) #domain.append(('purchase_date', '<=', to_date)) pass domain.append(('category_id', '=', categ_id)) domain.append(('company_id', 'in', company_ids)) domain.append(('state', '=', 'open')) asset_ids = asset_obj.search(cr, uid, domain) asset_categ = asset_categ_obj.browse(cr, uid, categ_id) count = 1 ws.row(row).height = 800 ws.write(row, 0, 'Asset Category:', style_header) ws.write(row, 1, asset_categ.name, style_header) row += 1 ws.row(row).height = 700 ws.write(row, 0, 'No', style_header) ws.write(row, 1, 'Asset Tag No', style_header) ws.write(row, 2, 'Asset Description', style_header) ws.write(row, 3, 'Location', style_header) ws.write(row, 4, 'Document Reference', style_header) ws.write(row, 5, 'Requisition Date', style_header) ws.write(row, 6, 'Requisition Value', style_header) ws.write(row, 7, 'Salvage Value', style_header) ws.write(row, 8, 'Depreciation Method', style_header) ws.write(row, 9, 'Number of Usage', style_header) ws.write(row, 10, 'B/F Accumulated Depreciation', style_header) ws.write(row, 11, 'Depreciation', style_header) ws.write(row, 12, 'Accumulated Depreciation', style_header) ws.write(row, 13, 'Net Book Value', style_header) row += 1 total_req_val = 0.0 total_sal_val = 0.0 total_bf_accum_depr = 0.0 total_next_depr = 0.0 total_accum_depr = 0.0 total_net_book = 0.0 for asset in asset_obj.browse(cr, uid, asset_ids): bf_accum_depr = 0.0 next_amount_depr = 0.0 accum_depr_val = 0.0 net_book_val = 0.0 method = '' if asset.method == 'linear': method = 'Linear' elif asset.method == 'degressive': method = 'Degressive' # location = '' # # if asset.move_id and asset.move_id.location_dest_id: # location = asset.move_id.location_dest_id.name ws.row(row).height = 500 ws.write(row, 0, count, style_content_left) ws.write(row, 1, asset.code or '', style_content_left) ws.write(row, 2, asset.product_desc or '', style_content_left) ws.write(row, 3, asset.asset_location or '', style_content_left) ws.write(row, 4, asset.picking_id.name or '', style_content_left) row_date = datetime.strptime( asset.purchase_date, '%Y-%m-%d').strftime('%m/%d/%Y') or '' ws.write(row, 5, row_date, style_content_left) ws.write(row, 6, asset.purchase_value or 0.0, style_content_right) ws.write(row, 7, asset.salvage_value or 0.0, style_content_right) ws.write(row, 8, method or '', style_content_left) ws.write(row, 9, asset.method_number or '', style_content_left) if from_date and to_date: period_id = period_obj.find(cr, uid, dt=from_date) bf_line_ids = asset_depreciation_line_obj.search( cr, uid, [('asset_id', '=', asset.id), ('effective_period_id', '=', period_id[0])]) if bf_line_ids: asset_bf_line = asset_depreciation_line_obj.browse( cr, uid, bf_line_ids[0]) bf_accum_depr = asset_bf_line.depreciated_value else: bf_line_ids = asset_depreciation_line_obj.search( cr, uid, [('asset_id', '=', asset.id), ('move_check', '=', True)], order='effective_date desc', limit=1) if bf_line_ids: asset_bf_line = asset_depreciation_line_obj.browse( cr, uid, bf_line_ids[0]) bf_accum_depr = asset_bf_line.depreciated_value #To find current depreciation value: Need to search depreciation line on depreciation board for respected "Amount already depreciated". # Here we will match "Amount already depreciated" (bf_accum_depr) in depreciation line and matched depreciation line's current depreciation will be taken. if from_date and to_date: period_id = period_obj.find(cr, uid, dt=from_date) bf_line_ids = asset_depreciation_line_obj.search( cr, uid, [('asset_id', '=', asset.id), ('effective_date', '>=', from_date), ('effective_date', '<=', to_date), ('move_check', '=', True)]) if bf_line_ids: for asset_bf_line in asset_depreciation_line_obj.browse( cr, uid, bf_line_ids): next_amount_depr += asset_bf_line.amount else: period_id = period_obj.find(cr, uid, dt=from_date) bf_line_ids = asset_depreciation_line_obj.search( cr, uid, [('asset_id', '=', asset.id), ('move_check', '=', True)]) if bf_line_ids: for asset_bf_line in asset_depreciation_line_obj.browse( cr, uid, bf_line_ids): next_amount_depr += asset_bf_line.amount ws.write(row, 10, bf_accum_depr, style_content_right) ws.write(row, 11, next_amount_depr, style_content_right) accum_depr_val = (bf_accum_depr + next_amount_depr) ws.write(row, 12, accum_depr_val, style_content_right) net_book_val = (asset.purchase_value - accum_depr_val) ws.write(row, 13, net_book_val, style_content_right) total_req_val += asset.purchase_value total_sal_val += asset.salvage_value total_bf_accum_depr += bf_accum_depr total_next_depr += next_amount_depr total_accum_depr += accum_depr_val total_net_book += net_book_val row += 1 count += 1 ws.row(row).height = 500 ws.write(row, 0, '', style_header_right) ws.write(row, 1, '', style_header_right) ws.write(row, 2, 'Total', style_header_right) ws.write(row, 3, '', style_header_right) ws.write(row, 4, '', style_header_right) ws.write(row, 5, '', style_header_right) ws.write(row, 6, total_req_val, style_header_right) ws.write(row, 7, total_sal_val, style_header_right) ws.write(row, 8, '', style_header_right) ws.write(row, 9, '', style_header_right) ws.write(row, 10, total_bf_accum_depr, style_header_right) ws.write(row, 11, total_next_depr, style_header_right) ws.write(row, 12, total_accum_depr, style_header_right) ws.write(row, 13, total_net_book, style_header_right) row += 3 f = cStringIO.StringIO() wb.save(f) out = base64.encodestring(f.getvalue()) return { 'name': 'Assets Register Reports', 'res_model': 'xls.report.wizard', 'type': 'ir.actions.act_window', 'view_type': 'form', 'view_mode': 'form', 'target': 'new', 'nodestroy': True, 'context': { 'data': out, 'name': 'Asset Register Report.xls' } }
def getPanCard(): # from PIL import Image, ImageFile x_index = 1 mypath = 'D:\Personal\Machine Learning\PAN CARD' onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))] files_length = len(onlyfiles) print(files_length) # Workbook is created wb = Workbook() # add_sheet is used to create sheet. sheet1 = wb.add_sheet('Sheet 1', cell_overwrite_ok=True) # Write Headers to sheet sheet1.write(0, 0, 'S.No') sheet1.write(0, 1, 'PAN Number') sheet1.write(0, 2, 'Date of Birth') sheet1.write(0, 3, 'Name') sheet1.write(0, 4, 'Fathers Name') sheet1.write(0, 5, 'File Name') ixsheet = 0 # print(files_lenth) while ixsheet < files_length: ImageFile.LOAD_TRUNCATED_IMAGES = True x = "'D:\Personal\Machine Learning\PAN CARD\'" dir_path = x.replace("'", "") file_path = onlyfiles[ixsheet] join_path = join(dir_path, file_path) print(join_path) im = Image.open(join_path) # load the example image and convert it to grayscale image = cv2.imread(join_path) gray = prePrcoessing(image) # write the grayscale image to disk as a temporary file so we can # apply OCR to it filename = "{}.png".format(os.getpid()) cv2.imwrite(filename, gray) ''' A blurring method may be applied. We apply a median blur when the --preprocess flag is set to blur. Applying a median blur can help reduce salt and pepper noise, again making it easier for Tesseract to correctly OCR the image. After pre-processing the image, we use os.getpid to derive a temporary image filename based on the process ID of our Python script. The final step before using pytesseract for OCR is to write the pre-processed image, gray, to disk saving it with the filename from above ''' ############################################################################################################## ######################################## Section 3: Running PyTesseract ###################################### ############################################################################################################## # load the image as a PIL/Pillow image, apply OCR, and then delete # the temporary file pytesseract.pytesseract.tesseract_cmd = 'D:\\Tesseract-OCR\\tesseract.exe' text = pytesseract.image_to_string(Image.open(filename), lang='eng') # add +hin after eng within the same argument to extract hindi specific text - change encoding to utf-8 while writing os.remove(filename) # print(text) # show the output images # cv2.imshow("Image", image) # cv2.imshow("Output", gray) # cv2.waitKey(0) # writing extracted data into a text file text_output = open('outputbase.txt', 'w', encoding='utf-8') text_output.write(text) text_output.close() file = open('outputbase.txt', 'r', encoding='utf-8') text = file.read() # print(text) # Cleaning all the gibberish text text = ftfy.fix_text(text) text = ftfy.fix_encoding(text) bad_chars = [ '~', '`', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '{', '}', "'", '[', ']', '|', ':', ';', ',', '<', '>', '.', '?', '+', '=', '_' ] for i in bad_chars: text = text.replace(i, '') '''for god_damn in text: if nonsense(god_damn): text.remove(god_damn) else: print(text)''' # print(text) ############################################################################################################ ###################################### Section 4: Extract relevant information ############################# ############################################################################################################ # Initializing data variable name = None fname = None dob = None pan = None nameline = [] dobline = [] panline = [] text0 = [] text1 = [] text2 = [] # Searching for PAN lines = text.split('\n') for lin in lines: s = lin.strip() s = lin.replace('\n', '') s = s.rstrip() s = s.lstrip() text1.append(s) text1 = list(filter(None, text1)) # print(text1) # to remove any text read from the image file which lies before the line 'Income Tax Department' lineno = 0 # to start from the first line of the text file. for wordline in text1: xx = wordline.split('\n') if ([ w for w in xx if re.search( '(INCOMETAXDEPARWENT @|mcommx|INCOME|TAX|GOW|GOVT|GOVERNMENT|OVERNMENT|VERNMENT|DEPARTMENT|EPARTMENT|PARTMENT|ARTMENT|INDIA|NDIA)$', w) ]): text1 = list(text1) lineno = text1.index(wordline) break # text1 = list(text1) text0 = text1[lineno + 1:] # print(text0) # Contains all the relevant extracted text in form of a list - uncomment to check def findword(textlist, wordstring): lineno = -1 for wordline in textlist: xx = wordline.split() if ([w for w in xx if re.search(wordstring, w)]): lineno = textlist.index(wordline) textlist = textlist[lineno + 1:] return textlist return textlist ############################################################################################################### ######################################### Section 5: Dishwasher part ########################################## ############################################################################################################### try: # Cleaning first names, better accuracy name = text0[0] name = name.rstrip() name = name.lstrip() name = name.replace("8", "B") name = name.replace("0", "D") name = name.replace("6", "G") name = name.replace("1", "I") name = re.sub('[^a-zA-Z] +', ' ', name) # Cleaning Father's name fname = text0[1] fname = fname.rstrip() fname = fname.lstrip() fname = fname.replace("8", "S") fname = fname.replace("0", "O") fname = fname.replace("6", "G") fname = fname.replace("1", "I") fname = fname.replace("\"", "A") fname = re.sub('[^a-zA-Z] +', ' ', fname) # Cleaning DOB dob = re.findall(r'\d{2}[-/|-]\d{2}[-/|-]\d{4}', text) # Cleaning PAN Card details text0 = findword( text1, '(Pormanam|Number|umber|Account|ccount|count|Permanent|ermanent|manent|wumm)$' ) pan = re.findall( r'\w{2}[a-zA-Z]\w{0}[P,C,H,A,B,G,J,L,F,T]\w{0}[A-Z]\w{3}[0-9]\w{0}[A-Z]', text) if pan == []: pan = re.findall(r'\w{7}[A-Z]', text) finlen = len(pan) pan = pan[finlen - 1] print(pan) except: pass # Making tuples of data data = {} data['Name'] = name data['Father Name'] = fname data['Date of Birth'] = dob data['PAN'] = pan sheet1.write(ixsheet + 1, 0, ixsheet + 1) sheet1.write(ixsheet + 1, 1, data['PAN']) sheet1.write(ixsheet + 1, 3, data['Name']) sheet1.write(ixsheet + 1, 4, data['Father Name']) if dob: sheet1.write(ixsheet + 1, 2, data['Date of Birth']) sheet1.write(ixsheet + 1, 5, join_path) ixsheet = ixsheet + 1 wb.save('PAN CARD DATA.xls')
def classify(filenames, directory): def mean(l): return sum(l) / len(l) if directory[-1] != '/': directory += '/' wb = Workbook() sheet = wb.add_sheet('Sheet 1') sheet.write(0, 0, 'Filename') sheet.write(0, 1, 'Ratio of Peaks Found') sheet.write(0, 2, 'Ratio of Peaks to Ideal') sheet.write(0, 3, 'Ratio of Range') sheet.write(0, 4, 'Inverse Standard Deviation') sheet.write(0, 5, 'Smoothing Error') sheet.write(0, 6, 'Curve Score') n, m = dimensions(len(filenames)) fig, axs = plt.subplots(n, m) axs = axs.flatten() fig.tight_layout(pad=1.5) allScores = [] #allPeaks = [] allNewPeaks = [] for i, filename in zip(range(len(filenames)), filenames): sheet.write(i + 1, 0, filename) df = pd.read_excel(directory + filename) #print(df) df = df.to_numpy() df = removeData(df, 20) #df = df[0:198, :] yhat = savgol_filter(df[:, 1], 31, 6) smoothError = 0 for j in range(len(yhat)): smoothError += abs(yhat[j] - df[j, 1]) #print(1/(smoothError/len(yhat))) #yhat = df[:, 1] #yhat = savgol_filter(df[:, 1], 21, 6) #yhat = savgol_filter(yhat, 21, 6) peaks, _ = find_peaks(yhat) peaks += 20 diff = [] if peaks[0] == 1: peaks = peaks[1:] #allPeaks.append(peaks) y = np.concatenate([np.zeros(20), yhat]) newPeaks = findTruePeaks(peaks, y) #newPeaks = newPeaks[0:6] if len(newPeaks) > 6: newPeaks = findBestPeaks(newPeaks) allNewPeaks.append(newPeaks) diff = [newPeaks[i] - newPeaks[i - 1] for i in range(1, len(newPeaks))] #print(i) sheet.write(i + 1, 1, len(newPeaks) / len(peaks)) sheet.write(i + 1, 2, peaksMin(len(newPeaks))) sheet.write(i + 1, 3, ((newPeaks[-1] - newPeaks[0]) / 180)) sheet.write(i + 1, 4, (1 / np.std(diff))) sheet.write(i + 1, 5, 1 / (smoothError / len(yhat))) p, q, r, s, t = 909 / 945, 945 / 945, 416 / 945, 889 / 945, 500 / 997 score = (len(newPeaks) / len(peaks))**p * (peaksMin( len(newPeaks))**q) * (( (newPeaks[-1] - newPeaks[0]) / 180)**r) * (1 / np.std(diff))**s score = round(score, 4) if score == np.inf: score = 0 sheet.write(i + 1, 6, score) wb.save('Correlation3.xls') allScores.append(score) order = [str(i) for i in range(len(filenames))] allScores, order = (list(t) for t in zip(*sorted(zip(allScores, order)))) filenames = [filenames[int(i)] for i in order] allNewPeaks = [allNewPeaks[int(i)] for i in order] filenames = filenames[::-1] allScores = allScores[::-1] allNewPeaks = allNewPeaks[::-1] for filename, row, score, newPeaks in zip(filenames, axs, allScores, allNewPeaks): df = pd.read_excel(directory + filename) df = df.to_numpy() df = removeData(df, 20) df = df[0:180, :] yhat = savgol_filter(df[:, 1], 31, 6) #yhat = savgol_filter(yhat, 21, 6) y = np.concatenate([np.zeros(20), yhat]) row.plot(df[:, 0], df[:, 1], c='b') row.plot(df[:, 0], yhat, c='r') row.plot(newPeaks, [y[i] for i in newPeaks], 'x', c='g') row.set_title(str(score)) #returnTitle(filename)+ ': ' + str(score)) row.set_xticks([]) row.set_yticks([]) plt.show() return filenames, allScores
def createNewFile(filename, sheetname="sheet1"): w = Workbook() ws = w.add_sheet(sheetname) w.save(filename)
def computeAverage(inpath,projectDict,excelName,flag=1): for projectName in projectDict: projectFile = projectName + '项目实验/' wb = Workbook() resSheet = wb.add_sheet('BPResult', cell_overwrite_ok=True) colIndex=0 for tmpi in [1, 2, 3, 4, 5, 10]: resSheet.write(0, colIndex, 'top' + str(tmpi)) resSheet.write(1, colIndex, 'precision') resSheet.write(1, colIndex + 1, 'recall') resSheet.write(1, colIndex + 2, 'f1') colIndex+=3 BPprecisionDict = {1: [], 2: [], 3: [], 4: [], 5: [], 6: []} ##存储excel中每次训练的结果,这里top10以下标6的形式记录 BPrecallDict = {1: [], 2: [], 3: [], 4: [], 5: [], 6: []} ##存储excel中每次训练的结果 BPF1Dict = {1: [], 2: [], 3: [], 4: [], 5: [], 6: []} ##存储excel中每次训练的结果 for i in xrange(1, projectDict[projectName] + 1): trainNumberFile = '第' + str(i) + '次训练/' try: rb = xlrd.open_workbook(unicode(inpath + projectFile + trainNumberFile+'推荐结果/' +excelName, 'utf-8')) except IOError as e: print(unicode(inpath + projectFile + trainNumberFile +'推荐结果/'+excelName, 'utf-8')) sheet1 = rb.sheets()[0] if flag: if excelName == '100_Corpus_BPresult.xls': cols = sheet1.ncols else: cols = sheet1.ncols - 1 for j in range(0, cols): if j % 3 == 0: BPprecisionDict[j / 3 + 1].append(sheet1.cell(2, j).value) elif (j - 1) % 3 == 0: BPrecallDict[(j - 1) / 3 + 1].append(sheet1.cell(2, j).value) elif (j - 2) % 3 == 0: BPF1Dict[(j - 2) / 3 + 1].append(sheet1.cell(2, j).value) else: cols = sheet1.ncols for j in range(1, cols): if (j-1) % 3 == 0: BPprecisionDict[(j-1) / 3 + 1].append(sheet1.cell(2, j).value) elif (j - 2) % 3 == 0: BPrecallDict[(j - 2) / 3 + 1].append(sheet1.cell(2, j).value) elif (j - 3) % 3 == 0: BPF1Dict[(j - 3) / 3 + 1].append(sheet1.cell(2, j).value) colIndex = 0 for i in [1, 2, 3, 4, 5, 6]: BPprecisionValue = sum(BPprecisionDict[i]) / len(BPprecisionDict[i]) BPrecallValue = sum(BPrecallDict[i]) / len(BPrecallDict[i]) try: BPF1Value = sum(BPF1Dict[i]) / len(BPF1Dict[i]) except ZeroDivisionError,e: print(inpath + projectFile + trainNumberFile +excelName) resSheet.write(2, colIndex, BPprecisionValue) resSheet.write(2, colIndex + 1, BPrecallValue) resSheet.write(2, colIndex + 2, BPF1Value) colIndex += 3 wb.save(unicode(inpath + projectFile + 'average'+projectName+excelName, 'utf-8'))
def mergeTagMulResAndBPRes(): outpath = r'/media/mamile/DATA1/tagRecommendation_github/BP_rails/三个项目的实验对比结果2.xls' allProjectWb = Workbook() allProjectSheet = allProjectWb.add_sheet('allProjectResult', cell_overwrite_ok=True) index = 0 for tmpi in [1, 2, 3, 4, 5, 10]: allProjectSheet.write(0, index+2, 'top' + str(tmpi)) allProjectSheet.write(1, index+2, 'precision') allProjectSheet.write(1, index + 3, 'recall') allProjectSheet.write(1, index + 4, 'f1') index += 3 allProjectSheet_line = 2 basicFilepath = r'/media/mamile/DATA1/tagRecommendation_github/BP_rails/10个项目的BP神经网络实验/' projectDict = smallCorpusTrain.computePRofTestCorpusNumber() print(projectDict) for projectName in projectDict: BPprecisionDict = {1:[],2:[],3:[],4:[],5:[],6:[]} ##存储excel中每次训练的结果,这里top10以下标6的形式记录 BPrecallDict = {1:[],2:[],3:[],4:[],5:[],6:[]} ##存储excel中每次训练的结果 BPF1Dict = {1:[],2:[],3:[],4:[],5:[],6:[]} ##存储excel中每次训练的结果 TagMulprecisionDict = {1:[],2:[],3:[],4:[],5:[],6:[]} ##存储excel中每次训练的结果 TagMulrecallDict = {1:[],2:[],3:[],4:[],5:[],6:[]} ##存储excel中每次训练的结果 TagMulF1Dict = {1:[],2:[],3:[],4:[],5:[],6:[]} ##存储excel中每次训练的结果 # if projectName not in ['ceph','symfony','owncloud']: if projectName != 'tgstation': continue projectFile = projectName + '项目实验/' wb = Workbook() wb1 = Workbook() allBPRes_sheet = wb.add_sheet('result', cell_overwrite_ok=True) allTagMulRecRes_sheet = wb1.add_sheet('result', cell_overwrite_ok=True) lineNumber = 2 lineNumber1 = 2 index = 0 for tmpi in [1, 2, 3, 4, 5, 10]: allBPRes_sheet.write(0, index, 'top' + str(tmpi)) allBPRes_sheet.write(1, index, 'precision') allBPRes_sheet.write(1, index + 1, 'recall') allBPRes_sheet.write(1, index + 2, 'f1') allTagMulRecRes_sheet.write(0, index, 'top' + str(tmpi)) allTagMulRecRes_sheet.write(1, index, 'precision') allTagMulRecRes_sheet.write(1, index + 1, 'recall') allTagMulRecRes_sheet.write(1, index + 2, 'f1') index += 3 for i in xrange(1, projectDict[projectName] + 1): # for i in xrange(1, 2): trainNumberFile = '第' + str(i) + '次训练/' trainBasicFile = basicFilepath + projectFile + trainNumberFile + 'trainCorpus/' testBasicFile = basicFilepath + projectFile + trainNumberFile + 'testCorpus/' resultExcel = unicode(basicFilepath + projectFile + trainNumberFile + 'tagMulRecRes.xls','utf-8') TagMulRec_rd = xlrd.open_workbook(resultExcel) TagMulRec_sheet = TagMulRec_rd.sheets()[0] TagMulRec_ncols = TagMulRec_sheet.ncols print(TagMulRec_ncols) try: for j in range(1,TagMulRec_ncols): if projectName == 'ceph': ##由于ceph项目计算了好几次的对比结果,所以需要采用特殊处理 if (j-1)%3 == 0: TagMulprecisionDict[(j-1)/3+1].append(TagMulRec_sheet.cell(2, j).value) BPprecisionDict[(j-1)/3+1].append(TagMulRec_sheet.cell(4, j).value) elif (j-2)%3 == 0: TagMulrecallDict[(j -2) / 3 + 1].append(TagMulRec_sheet.cell(2, j).value) BPrecallDict[(j -2) / 3 + 1].append(TagMulRec_sheet.cell(4, j).value) elif (j-3)%3 == 0: TagMulF1Dict[(j -3) / 3 + 1].append(TagMulRec_sheet.cell(2, j).value) BPF1Dict[(j -3) / 3 + 1].append(TagMulRec_sheet.cell(4, j).value) else: if (j-1)%3 == 0: TagMulprecisionDict[(j-1)/3+1].append(TagMulRec_sheet.cell(2, j).value) BPprecisionDict[(j-1)/3+1].append(TagMulRec_sheet.cell(3, j).value) elif (j-2)%3 == 0: TagMulrecallDict[(j -2) / 3 + 1].append(TagMulRec_sheet.cell(2, j).value) BPrecallDict[(j -2) / 3 + 1].append(TagMulRec_sheet.cell(3, j).value) elif (j-3)%3 == 0: TagMulF1Dict[(j -3) / 3 + 1].append(TagMulRec_sheet.cell(2, j).value) BPF1Dict[(j -3) / 3 + 1].append(TagMulRec_sheet.cell(3, j).value) except IndexError as e: print("===="+projectName) print("===="+j) colIndex = 0 allProjectSheet.write(allProjectSheet_line, colIndex, projectName) allProjectSheet.write(allProjectSheet_line, colIndex+1, "TagMulRec") allProjectSheet.write(allProjectSheet_line+1, colIndex+1, "BP") for i in [1,2,3,4,5,6]: BPprecisionValue = sum(BPprecisionDict[i]) / len(BPprecisionDict[i]) BPrecallValue = sum(BPrecallDict[i]) / len(BPrecallDict[i]) BPF1Value = sum(BPF1Dict[i]) / len(BPF1Dict[i]) TagMulprecisionValue = sum(TagMulprecisionDict[i]) / len(TagMulprecisionDict[i]) TagMulrecallValue = sum(TagMulrecallDict[i]) / len(TagMulrecallDict[i]) TagMulF1Value = sum(TagMulF1Dict[i]) / len(TagMulF1Dict[i]) allTagMulRecRes_sheet.write(lineNumber, colIndex, TagMulprecisionValue) allTagMulRecRes_sheet.write(lineNumber, colIndex + 1, TagMulrecallValue) allTagMulRecRes_sheet.write(lineNumber, colIndex + 2, TagMulF1Value) allBPRes_sheet.write(lineNumber1,colIndex,BPprecisionValue) allBPRes_sheet.write(lineNumber1,colIndex+1,BPrecallValue) allBPRes_sheet.write(lineNumber1,colIndex+2,BPF1Value) allProjectSheet.write(allProjectSheet_line,colIndex+2,TagMulprecisionValue) allProjectSheet.write(allProjectSheet_line,colIndex+3,TagMulrecallValue) allProjectSheet.write(allProjectSheet_line,colIndex+4,TagMulF1Value) allProjectSheet.write(allProjectSheet_line+1,colIndex+2,BPprecisionValue) allProjectSheet.write(allProjectSheet_line+1,colIndex+3,BPrecallValue) allProjectSheet.write(allProjectSheet_line+1,colIndex+4,BPF1Value) colIndex+=3 lineNumber+=1 lineNumber1+=1 allProjectSheet_line+=4 wb.save(unicode(basicFilepath + projectFile +projectName+ '_allBRresult.xls','utf-8')) wb1.save(unicode(basicFilepath + projectFile + projectName+'_allTagMulRecRes.xls','utf-8')) allProjectWb.save(unicode(outpath,'utf-8'))
from xlwt import Workbook import xlwt import os book = Workbook(encoding="utf-8") sheet1 = book.add_sheet("Sheet 0") style = xlwt.XFStyle() align = xlwt.Alignment() align.horz = xlwt.Alignment.HORZ_CENTER align.vert = xlwt.Alignment.VERT_CENTER style.alignment = align sheet1.write_merge(0, 1, 0, 0, "编号", style=style) sheet1.write_merge(0, 1, 1, 1, "方向", style=style) sheet1.write_merge(0, 1, 2, 2, "小汽车", style=style) sheet1.write_merge(0, 0, 3, 5, "大车", style=style) sheet1.write_merge(1, 1, 3, 3, "公交", style=style) sheet1.write_merge(1, 1, 4, 4, "货车", style=style) sheet1.write_merge(1, 1, 5, 5, "其他", style=style) files = [file for file in os.listdir("./data") if "txt" in file] currentID = "" currentRow = 2 for file in files: if file[:6] != currentID: currentID = file[:6] sheet1.write(currentRow, 0, currentID, style=style) sheet1.write(currentRow, 1, file[6: -4], style=style) stat = { "1": 0, "2": 0, "3": 0, "4": 0 }
import requests import json import random import requests.utils import math from urllib import request from bs4 import BeautifulSoup import xlwt from xlwt import Workbook excel_name = 'Taobao_Comments.xls' sheet_name = 'Taobao_Comments' Taobao_Comments_excel = Workbook(excel_name) Taobao_Comments_excel = Workbook(encoding='utf-8') Taobao_Comments_sheet = Taobao_Comments_excel.add_sheet(sheet_name, cell_overwrite_ok=True) Taobao_Comments_sheet.write(0, 0, u'NO.') Taobao_Comments_sheet.write(0, 1, u'USER') Taobao_Comments_sheet.write(0, 2, u'COMMENTS') Taobao_Comments_excel.save(excel_name) #爬虫头 Cookie = " " #real cookie header = { "Connection": "keep-alive", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134", "Cookie": Cookie, "Accept":
def createExcel(espece, gene, meilleur, CDS): # choix du chemin #path = r"C:\chemin\vers\fichier.xls" # création book = Workbook() # # création de la feuille 1 feuil1 = book.add_sheet('feuille 1') # ajout des en-têtes feuil1.write(0, 0, 'Espèce') #ecrit dans la feuille 1 en ligne 0, col 0 feuil1.col(0).width = 5000 feuil1.write(0, 1, 'Gène') #ecrit dans la feuille 1 en ligne 0, col 1 feuil1.col(1).width = 5000 feuil1.write(0, 2, 'sgRNA') #ecrit dans la feuille 1 en ligne 0, col 2 feuil1.col(2).width = 10000 feuil1.write(0, 3, 'Nombre d appariement sur 20 bases' ) #ecrit dans la feuille 1 en ligne 0, col 3 feuil1.col(3).width = 5000 feuil1.write(0, 4, 'Nombre d appariement sur 12 bases' ) #ecrit dans la feuille 1 en ligne 0, col 3 feuil1.col(4).width = 5000 feuil1.write(0, 5, 'TM') #ecrit dans la feuille 1 en ligne 0, col 3 feuil1.col(5).width = 5000 feuil1.write(0, 6, 'CDS') #ecrit dans la feuille 1 en ligne 0, col 3 feuil1.col(6).width = 5000 # ajout des valeurs dans la ligne suivante ligne = feuil1.row(1) #ligne 1 ligne.write(0, espece) #ligne 1 col 0 ligne.write(1, gene) #ligne 1 col 1 ligne.write(3, meilleur[0]['hit_20mer']) ligne.write(4, meilleur[0]['hit_12mer']) ligne.write(5, meilleur[0]['tm']) ligne.write(6, CDS) ligne = feuil1.row(4) #ligne 1 ligne.write(3, meilleur[1]['hit_20mer']) ligne.write(4, meilleur[1]['hit_12mer']) ligne.write(5, meilleur[1]['tm']) liste = [[meilleur[0]['sequence'], [meilleur[0]["start"]]], [meilleur[1]['sequence'], [meilleur[1]["start"]]]] cpt = 0 for i in liste: #i : liste avec 2sgRNA compl cpt = cpt + 1 for j in i: #j : 1sgRNA ligne = "ligne" + str(cpt) ligne = feuil1.row(cpt) ligne.write(2, j) #ecriture du sgRNA cpt = cpt + 1 # création matérielle du fichier résultant #ajouter securité si possible si fichier ouvert try: book.save(espece + gene + '.xls') except PermissionError: print("permission non accordée - pensez à fermer le fichier excel") #espece = "Vitis Vinifera" #gene = "HT5" #liste = [["atcg","tagc"],["ccaa","ggtt"]] #hit_20mer,tm,hit_12mer,meilleur = 'NOPE' #createExcel(espece, gene, liste, hit_20mer, tm, hit_12mer,test)
wb = xlrd.open_workbook(loc) sheet = wb.sheet_by_index(0) # To collect input # reqcol=input("Enter column name wanted") # To get number of rows and columns rowsn=sheet.nrows colsn=sheet.ncols # print(rowsn) # print(colsn) # Workbook is created global wb2 wb2 = Workbook() # add_sheet is used to create sheet. global sheet1 sheet1 = wb2.add_sheet('Sheet 1') # To create a list that will check if any columns are blank yn_list=[0]*colsn counter=0 second_counter=0 sheet1.write(0,0,"SKU") sheet1.write(0,1,"Description") sheet1.write(0,2,"SRP")
def writeGlobalResult(self): #Executes for each poll. for printPoll in self.usedPolls: #Executes if it is not attendance Quiz if "Attendance" not in printPoll.pollName: wb = Workbook() sheet1 = wb.add_sheet('Sheet 1') row = 0 #Writes question to first row for column in range(len(printPoll.questions)): sheet1.write(row, column + 1, printPoll.questions[column].text) #Writes all students first name and last name for student in self.students: row = row + 1 sheet1.write(row, 0, student.FirstName + " " + student.LastName) #Checks all student's poll. for check in range(len(student.polls)): if row == 1 and check == 0: sheet1.write( row - 1, len(student.polls[check].questions) + 1, "Number of Questions") sheet1.write( row - 1, len(student.polls[check].questions) + 2, "Success Rate") sheet1.write( row - 1, len(student.polls[check].questions) + 3, "Success Percentage") if printPoll.pollName == student.polls[check].pollName: for column in range( len(student.polls[check].questions)): #If answer is correct it prints 1 otherwise it prints 0. if student.polls[check].questions[ column].correct == True: sheet1.write(row, column + 1, '1') else: sheet1.write(row, column + 1, '0') #Prints total question, true answers and success rate. sheet1.write( row, len(student.polls[check].questions) + 1, len(student.polls[check].questions)) sheet1.write( row, len(student.polls[check].questions) + 2, "Total: " + str(len(student.polls[check].questions)) + " Correct: " + str(student.polls[check].calculateCorrect())) sheet1.write( row, len(student.polls[check].questions) + 3, "%" + str(student.polls[check].calculateCorrect() / len(student.polls[check].questions))) wb.save(str(printPoll.pollName) + ".xls")
from xlwt import Workbook from tkinter.filedialog import asksaveasfile Wb__obj = Workbook() my_sheet = Wb__obj.add_sheet('python_class') my_sheet.write(0, 0, 'sparsh') my_sheet.write(0, 1, 'prabal') my_sheet.write(0, 2, 'dhruv') f = asksaveasfile(mode='w', defaultextension='.csv') if f is not None: Wb__obj.save(f.name) f.close()
def save_output(x,y,z,u,ud,udn,uks,uma,ubut,dist,dud,dudn,duks,duma,dubut,N,k,dt,output_file_name): wb = Workbook() # add_sheet is used to create sheet. sheet1 = wb.add_sheet('Sheet 1') acc=[None]*N accd = [None] * N accdn = [None] * N accks = [None] * N accma = [None] * N accbut = [None] * N for j in range(N): acc[j]=np.diff(u[j])/dt accd[j] = np.diff(ud[j]) / dt accdn[j] = np.diff(udn[j]) / dt accks[j] = np.diff(uks[j]) / dt accma[j] = np.diff(uma[j]) / dt accbut[j] = np.diff(ubut[j]) / dt sheet1.write(0, j, f"x{j+1}") sheet1.write(0, j+N, f"y{j+1}") sheet1.write(0, j+(2*N), f"z{j+1}") sheet1.write(0, j+(3*N), f"u{j+1}") sheet1.write(0, j+(4*N), f"ud{j+1}") sheet1.write(0, j + (5 * N), f"udn{j+1}") sheet1.write(0, j + (6 * N), f"uks{j + 1}") sheet1.write(0, j + (7 * N), f"uma{j + 1}") sheet1.write(0, j + (8 * N), f"ubut{j + 1}") sheet1.write(0, j + (9 * N), f"acc{j + 1}") sheet1.write(0, j + (10 * N), f"accd{j + 1}") sheet1.write(0, j + (11 * N), f"accdn{j + 1}") sheet1.write(0, j + (12 * N), f"accks{j + 1}") sheet1.write(0, j + (13 * N), f"accma{j + 1}") sheet1.write(0, j + (14 * N), f"accbut{j + 1}") sheet1.write(0, j + (15 * N), f"dist{j + 1}") sheet1.write(0, j + (16 * N), f"dud{j + 1}") sheet1.write(0, j + (17 * N), f"dudn{j + 1}") sheet1.write(0, j + (18 * N), f"duks{j + 1}") sheet1.write(0, j + (19 * N), f"duma{j + 1}") sheet1.write(0, j + (20 * N), f"dubut{j + 1}") for i in range(k): sheet1.write(i+1, j, x[j][i]) sheet1.write(i+1, j + N, y[j][i]) sheet1.write(i+1, j + (2 * N), z[j][i]) sheet1.write(i+1, j + (3 * N), u[j][i]) sheet1.write(i+1, j + (4 * N), ud[j][i]) sheet1.write(i + 1, j + (5 * N), udn[j][i]) sheet1.write(i + 1, j + (6 * N), uks[j][i]) sheet1.write(i + 1, j + (7 * N), uma[j][i]) sheet1.write(i + 1, j + (8 * N), ubut[j][i]) for i in range(k-1): sheet1.write(i + 1, j + (9 * N), acc[j][i]) sheet1.write(i + 1, j + (10 * N), accd[j][i]) sheet1.write(i + 1, j + (11 * N), accdn[j][i]) sheet1.write(i + 1, j + (12 * N), accks[j][i]) sheet1.write(i + 1, j + (13 * N), accma[j][i]) sheet1.write(i + 1, j + (14 * N), accbut[j][i]) for i in range(k - 2): sheet1.write(i + 1, j + (15 * N), dist[j][i+1]) sheet1.write(i + 1, j + (16 * N), dud[j][i+1]) sheet1.write(i + 1, j + (17 * N), dudn[j][i+1]) sheet1.write(i + 1, j + (18 * N), duks[j][i+1]) sheet1.write(i + 1, j + (19 * N), duma[j][i+1]) sheet1.write(i + 1, j + (20 * N), dubut[j][i+1]) # for j in range(N-1): # sheet1.write(0, j + (7 * N), f"ivs{j + 1}") # for i in range(k): # sheet1.write(i + 1, j + (7 * N), ivs[j][i]) wb.save(output_file_name) return None
def getElection(): # from PIL import Image, ImageFile x_index = 1 mypath = 'D:\Personal\Machine Learning\Election Commission' onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))] files_length = len(onlyfiles) print(files_length) # Workbook is created wb = Workbook() # add_sheet is used to create sheet. sheet1 = wb.add_sheet('Sheet 1', cell_overwrite_ok=True) # Write Headers to sheet sheet1.write(0, 0, 'S.No') sheet1.write(0, 1, 'Election Number') sheet1.write(0, 2, 'Gender') sheet1.write(0, 3, 'Age') sheet1.write(0, 4, 'Name') sheet1.write(0, 5, 'Fathers Name') sheet1.write(0, 6, 'File Name') sheet1.write(0, 7, 'Text') ixsheet = 0 # print(files_lenth) while ixsheet < files_length: ImageFile.LOAD_TRUNCATED_IMAGES = True x = "'D:\Personal\Machine Learning\Election Commission\'" dir_path = x.replace("'", "") file_path = onlyfiles[ixsheet] join_path = join(dir_path, file_path) print(join_path) im = Image.open(join_path) # load the example image and convert it to grayscale image = cv2.imread(join_path) gray = prePrcoessing(image) # write the grayscale image to disk as a temporary file so we can # apply OCR to it filename = "{}.png".format(os.getpid()) cv2.imwrite(filename, gray) ############################################################################################################## ######################################## Section 3: Running PyTesseract ###################################### ############################################################################################################## # load the image as a PIL/Pillow image, apply OCR, and then delete # the temporary file pytesseract.pytesseract.tesseract_cmd = 'C:\\Users\\Ashish.Gupta\\AppData\\Local\\Tesseract-OCR\\tesseract.exe' text = pytesseract.image_to_string(Image.open(filename), lang='eng') # add +hin after eng within the same argument to extract hindi specific text - change encoding to utf-8 while writing os.remove(filename) # writing extracted data into a text file text_output = open('outputbase.txt', 'w', encoding='utf-8') text_output.write(text) text_output.close() file = open('outputbase.txt', 'r', encoding='utf-8') text = file.read() # Cleaning all the gibberish text text = ftfy.fix_text(text) text = ftfy.fix_encoding(text) bad_chars = [ '~', '`', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '{', '}', "'", '[', ']', '|', ':', ';', ',', '<', '>', '.', '?', '/', '+', '=', '_' ] for i in bad_chars: text = text.replace(i, '') ############################################################################################################ ###################################### Section 4: Extract relevant information ############################# ############################################################################################################ # Initializing data variable name = None fname = None dob = None pan = None nameline = [] dobline = [] panline = [] text0 = [] text1 = [] text2 = [] # Searching for PAN lines = text.split('\n') for lin in lines: s = lin.strip() s = lin.replace('\n', '') s = s.rstrip() s = s.lstrip() text1.append(s) text1 = list(filter(None, text1)) # print(text1) # to remove any text read from the image file which lies before the line 'Income Tax Department' lineno = 0 # to start from the first line of the text file. for wordline in text1: xx = wordline.split('\n') if ([ w for w in xx if re.search('(ELECTION|COMMISSION|INDIA|NDIA)$', w) ]): text1 = list(text1) lineno = text1.index(wordline) break # text1 = list(text1) text0 = text1[lineno + 1:] #print(text0) # Contains all the relevant extracted text in form of a list - uncomment to check ############################################################################################################### ######################################### Section 5: Dishwasher part ########################################## ############################################################################################################### # Cleaning first names, better accuracy namelist = text.split(' ') while ("" in namelist): namelist.remove("") name = re.findall(r'\w{0}[N,n]\w{0}[A,a]\w{0}[M,m]\w{0}[E,e]', text) for n in name: indices = [i for i, x in enumerate(namelist) if x == n] # print(indices) list_names = list() if name is not None: for n in indices: list_names.append((namelist[n + 1] + " " + namelist[n + 2])) # Cleaning Father's name Person_name = "" print(len(list_names)) if len(list_names) > 0: if list_names[0] is not None: Person_name = list_names[0] else: Person_name = "" fname = "" if len(list_names) == 2: if list_names[1] is not None: fname = list_names[1] else: fname = "" gender = re.findall( r'\w{0}[F,f]\w{0}[e,E]\w{0}[M,m]\w{0}[A,a]\w{0}[a-zA-Z]\w{0}[E,e]', text) if gender == []: gender = re.findall(r'\w{0}[M,m]\w{0}[A,a]\w{0}[a-zA-Z]\w{0}[E,e]', text) # Cleaning DOB dob = re.findall(r'\d{2}[-/|-]\d{2}[-/|-]\d{4}', text) electiono = re.findall(r'\w{2}[a-zA-Z]\w{6}[0-9]', text) print(electiono) # Making tuples of data data = {} data['Name'] = Person_name data['Father Name'] = fname data['Gender'] = gender data['Election No'] = electiono sheet1.write(ixsheet + 1, 0, ixsheet + 1) sheet1.write(ixsheet + 1, 1, data['Election No']) sheet1.write(ixsheet + 1, 2, data['Gender']) sheet1.write(ixsheet + 1, 4, data['Name']) sheet1.write(ixsheet + 1, 5, data['Father Name']) sheet1.write(ixsheet + 1, 6, join_path) sheet1.write(ixsheet + 1, 7, text) ixsheet = ixsheet + 1 wb.save('Election Card DATA.xls')
class vwapScores(): headers = { 'Content-Type': 'xxxxxxx', 'Accept': 'xxxxxxx', 'Chain-Rider': 'xxxxxxx' } timeInterval = 11700 #Every 3.25 hours #Getting VWAP score for every currency available on Chainrider: getInfo = requests.get('https://api.chainrider.io/v1/finance/info/', params={}, headers=headers) response_text = getInfo.text #Converting html code into readable text. Keep resonse.text do not replace it with anything currencies = json.loads(response_text) #Loading json data from API listOfCurrencies = currencies["message"]["pairs"] #pprint(getInfo.status_code) #When a specific timestamp matters : currentTime = 1584662400 #START TIME: (2/1/2020 19:00:46) pastTime = currentTime - timeInterval #Time from a minute ago interval = 60 #How many data points we will have for each cryptocurrency. rowNumber = 0 colNumber = 0 #Creating spreadsheet wb = Workbook() vwapSpreadsheet = wb.add_sheet('VWAP Training Data 1.xls') #Getting list of Cryptocurrencies: #for i in range(0, len(listOfCurrencies)): #if(listOfCurrencies[i] == "BTCUSD" and listOfCurrencies[i] != "BTGUSD" and listOfCurrencies[i] != "XMRUSD" and listOfCurrencies[i] != "TUSDUSD"): while (interval != 0): #Body for POST request: body = { "pair": "BTCUSD", #listOfCurrencies[i] "upper_unix": currentTime, "lower_unix": pastTime, "analytics": True, "exchanges": ["Huobi"] #Exchange we are getting VWAP from } #Retrieving VWAP score: r = requests.post( 'https://api.chainrider.io/v1/finance/vwap/historic/', json=body, params={}, headers=headers) #pprint(r.status_code) response_text = r.text #Converting html code into readable text. Keep resonse.text do not replace it with anything vwaps = json.loads(response_text) #Loading json data from API if (r.status_code == 200): vwap = vwaps['message']['vwap'] #pprint("Currency Name: " + str(vwaps['message']['pair']) + ", VWAP Score: " + str(vwap)) pprint(currentTime) #Writing values into an Excel Spreadsheet vwapSpreadsheet.write(rowNumber, colNumber, "BTCUSD") #Writing name of crypto into excel doc colNumber = colNumber + 1 #Incrementing column vwapSpreadsheet.write(rowNumber, colNumber, vwap) #Writing VWAP Score into excel doc colNumber = colNumber + 1 vwapSpreadsheet.write(rowNumber, colNumber, pastTime) #Writing time interval to excel doc #Resetting column and incrementing row rowNumber = rowNumber + 1 #Incrementing row colNumber = 0 #Decrementing values to get more data points interval = interval - 1 currentTime = currentTime - timeInterval pastTime = pastTime - timeInterval #Resetting interval and times currentTime = 1580586360 #Current Time pastTime = 1583091960 #Time from 30 seconds ago interval = 240 #Getting 120 data points for each crypto #Saving Excel Spreadsheet: wb.save('VWAP Bitcoin March 13th-20th.xls')
def getTableau(GF): # get data for the # table temps = timedelta(days=7) temps2 = timedelta(days=30) arrive = datetime.today() depart = arrive - temps depart2 = arrive - temps2 departt = str(depart) arrivet = str(arrive) departt2 = str(depart2) browser.open( "https://greenfeed.c-lockinc.com/GreenFeed/tabledata/cowfeeding.php?fids=0," + GF + "&from=" + departt[0:10] + "&to=" + arrivet[0:10] + "&cons=0&uncons=0¶m=1") tableaubrute = str(browser.parsed) listtag = Recherche(tableaubrute, 'tag="', 24) for o in range(len(listtag)): listtag[o] = listtag[o].lstrip('0') listnom = Recherche(tableaubrute, 'name="', None, '" tag') listdate = Recherche(tableaubrute, "<date>", 5) listdrop = Recherche(tableaubrute, "<v>", None, '</v>') listdrop = list(zip(*[iter(listdrop)] * len(listdate))) browser.open( "https://greenfeed.c-lockinc.com/GreenFeed/tabledata/cowfeeding.php?fids=0," + GF + "&from=" + departt2[0:10] + "&to=" + arrivet[0:10] + "&cons=0&uncons=0¶m=1") tableaubrute2 = str(browser.parsed) listdate2 = Recherche(tableaubrute2, "<date>", 5) listdrop2 = Recherche(tableaubrute2, "<v>", None, '</v>') listdrop2 = list(zip(*[iter(listdrop2)] * len(listdate2))) listnom2 = Recherche(tableaubrute2, 'name="', None, '" tag') ind = [] for a in listnom: ind.append(listnom2.index(a)) somind = [] for e in range(len(listnom2)): som = 0 for r in range(len(listdate2)): som += int(listdrop2[e][r]) somind.append(som) bonval = [] for y in ind: bonval.append(somind[y]) # ---------------------------------------- # sort the table passage = 0 while bonval != sorted(bonval): passage += 1 for u in range(len(bonval) - 1): if bonval[u] > bonval[u + 1]: permute(bonval, u) permute(listtag, u) permute(listnom, u) permute(listdrop, u) # ---------------------------------------- # write in a excel file book3 = Workbook() sheet1 = book3.add_sheet("Tableau") sheet1.write(0, 0, "Animal Name") sheet1.write(0, 1, "Animal Tag") sheet1.write(0, len(listdate) + 2, "Total 30j") for k in range(1, len(listnom) + 1): sheet1.write(k, 1, listtag[k - 1]) sheet1.write(k, 0, listnom[k - 1]) sheet1.write(k, len(listdate) + 2, bonval[k - 1]) for i in range(2, len(listdate) + 2): sheet1.write(0, i, listdate[i - 2]) for l in range(len(listdrop)): for z in range(len(listdate)): sheet1.write(l + 1, z + 2, listdrop[l][z]) return book3
def Init(self): self.report = Workbook(encoding='utf-8') self.sheet = self.report.add_sheet('测试结束报告', cell_overwrite_ok=True) self.sheet.write(self.index_x, self.index_y, '测试结束报告') self.sheet.write(2, 0, '测试用例名词') self.sheet.write(2, 1, '测试结果')
def Q_Learning(Pr_des, eps_unc, N_EPISODES, SHOW_EVERY, LEARN_RATE, DISCOUNT, EPS_DECAY, epsilon, i_s, pa, energy_pa, pa2ts, pa_s, pa_t, act_num, possible_acts_not_pruned, possible_acts_pruned, possible_next_states_not_pruned, possible_next_states_pruned, pick_up, delivery, pick_ups, deliveries, test_n, n_samples, ts_size): wb = Workbook() sheet_name = 'Simulation' + str(test_n + 1) s1 = wb.add_sheet(sheet_name) s1.write(1, 0, 'Task-1') s1.write(1 + N_EPISODES / SHOW_EVERY, 0, 'Task-2') s1.write(1 + 2 * N_EPISODES / SHOW_EVERY, 0, 'Task-3') s1.write(1 + 3 * N_EPISODES / SHOW_EVERY, 0, 'Task-4') s1.write(0, 1, '# of Hit') s1.write(0, 2, ' Avg. Reward') s1.write(0, 3, ' Discounted Avg. Reward') s1.write(0, 11, ' Discounted Episode Reward - Task 1') s1.write(0, 12, ' Discounted Episode Reward - Task 2') s1.write(0, 13, ' Discounted Episode Reward - Task 3') s1.write(0, 14, ' Discounted Episode Reward - Task 4') s1.write(0, 6, 'Total Run Time') s1.write(0, 7, 'Total Avg. Reward') inx = 0 QL_start_time = timeit.default_timer() EVERY_PATH = [] episode_rewards = [] # Initialize the Q - table (Between -0.01 and 0) pa_size = [] q_table = [] agent_s = [] hit_count = [] mission_tracker = [] ep_per_task = [] disc_ep_per_task = [] old_q_tables = [] all_samples = [] for i in range(len(energy_pa)): pa_size.append(len(pa[i].g.nodes())) agent_s.append(i_s[i]) # Initialize the agent's location hit_count.append(0) mission_tracker.append(0) ep_per_task.append([]) disc_ep_per_task.append([]) all_samples.append([]) q_table.append([]) old_q_tables.append([]) for t in range(ep_len + 1): q_table[i].append(np.random.rand(pa_size[i], 9) * 0.001 - 0.001) # of states x # of actions old_q_tables[i].append(q_table[i][t]) ep_rewards = [] ep_trajectories_pa = [] agent_upt_i = [] agent_upt = [] for j in range(len(energy_pa)): for i in range(len(pa[j].g.nodes())): if pa[j].g.nodes()[i][1] == 0 or str(pa[j].g.nodes( )[i][0]) == 'r' + str( pick_up[j] ): #or str(pa[j].g.nodes()[i][0]) == 'r'+str(delivery[j]): # If the mission changes check here agent_upt_i.append(pa2ts[j][i]) else: agent_upt_i.append([]) agent_upt.append(agent_upt_i) for episode in range(N_EPISODES): if episode > 900000: epsilon = 0 which_pd = np.random.randint( len(energy_pa)) # randomly chosing the pick_up delivery states mission_tracker[which_pd] = mission_tracker[which_pd] + 1 hit = [] ep_rew = [] for i in range(len(energy_pa)): hit.append(0) ep_traj_pa = [agent_s[which_pd]] # Initialize the episode trajectory ep_rew = 0 # Initialize the total episode reward disc_ep_rew = 0 for t_ep in range(ep_len): old_q_tables[which_pd][t_ep] = q_table[which_pd][t_ep] possible_acts = possible_acts_not_pruned[which_pd] possible_next_states = possible_next_states_not_pruned[which_pd] if hit[which_pd] == 0: if energy_pa[which_pd][agent_s[ which_pd]] == 0: # Raise the 'hit flag' if the mission is achieved hit[which_pd] = 1 # re-initialize the agent_s to prevent stuckZ agent_s[which_pd] = agent_upt[which_pd].index( pa2ts[which_pd] [agent_s[which_pd]]) # Reinitiliaze the pa(region, 0) hit_count[which_pd] = hit_count[which_pd] + 1 else: possible_acts = possible_acts_pruned[which_pd] possible_next_states = possible_next_states_pruned[ which_pd] if len(possible_acts[t_ep][agent_s[which_pd]]) == 0: agent_s[which_pd] = agent_upt[which_pd].index( pa2ts[which_pd][agent_s[which_pd]]) if np.random.uniform() > epsilon: # Exploit possible_qs = q_table[which_pd][t_ep][ agent_s[which_pd], possible_acts[t_ep] [agent_s[which_pd]]] # Possible Q values for each action next_ind = np.argmax( possible_qs) # Pick the action with max Q value else: # Explore next_ind = np.random.randint( len(possible_acts[t_ep][ agent_s[which_pd]])) # Picking a random action # Taking the action prev_state = agent_s[which_pd] intended_action = possible_acts[t_ep][prev_state][next_ind] if np.random.uniform() < eps_unc: [chosen_act, next_state ] = action_uncertainity(intended_action, pa_s[which_pd], pa_t[which_pd], act_num[which_pd], agent_s[which_pd]) action = chosen_act s_a = (agent_s[which_pd], action) # State & Action pair agent_s[ which_pd] = next_state # possible_next_states[agent_upt.index(pa2ts[agent_s])][next_ind] # moving to next state (s,a) else: action = intended_action s_a = (agent_s[which_pd], action) # State & Action pair agent_s[which_pd] = possible_next_states[t_ep][ agent_s[which_pd]][next_ind] # moving to next state (s,a) ep_traj_pa.append(agent_s[which_pd]) current_q = q_table[which_pd][t_ep][prev_state, intended_action] max_future_q = np.amax(q_table[which_pd][t_ep + 1][ agent_s[which_pd], :]) # Find the max future q rew_obs = rewards_pa[which_pd][ agent_s[which_pd]] * np.random.binomial( 1, 1 - rew_uncertainity) # Observe the rewards of the next state new_q = (1 - LEARN_RATE) * current_q + LEARN_RATE * ( rew_obs + DISCOUNT * max_future_q) q_table[which_pd][t_ep][prev_state, intended_action] = new_q disc_ep_rew += rew_obs * (DISCOUNT**t_ep) ep_rew += rew_obs # Adding sample to the memory all_samples[which_pd].append([ prev_state, intended_action, rew_obs, agent_s[which_pd], t_ep ]) # S,A,R,S' and time # Sample n times for i in range(n_samples): random_sample_index = np.random.choice( len(all_samples[which_pd])) sample_s = all_samples[which_pd][random_sample_index][0] sample_action = all_samples[which_pd][random_sample_index][1] sample_r = all_samples[which_pd][random_sample_index][2] sample_s_prime = all_samples[which_pd][random_sample_index][3] sample_t = all_samples[which_pd][random_sample_index][4] current_q = q_table[which_pd][sample_t][sample_s, sample_action] max_future_q = np.amax(q_table[which_pd][sample_t + 1][sample_s_prime, :]) new_q = (1 - LEARN_RATE) * current_q + LEARN_RATE * ( sample_r + DISCOUNT * max_future_q) q_table[which_pd][sample_t][sample_s, sample_action] = new_q agent_s[which_pd] = agent_upt[which_pd].index( pa2ts[which_pd][agent_s[which_pd]]) ep_rewards.append(ep_rew) ep_trajectories_pa.append(ep_traj_pa) epsilon = epsilon * EPS_DECAY disc_ep_per_task[which_pd].append(disc_ep_rew) ep_per_task[which_pd].append(ep_rew) if (episode + 1) % SHOW_EVERY == 0: inx = inx + 1 for ind in range(len(energy_pa)): avg_per_task = np.mean(ep_per_task[ind]) disc_avg_per_task = np.mean(disc_ep_per_task[ind]) print('Episode # ' + str(episode + 1) + ' : Task-' + str(ind) + ' # of Hit=' + str(len(ep_per_task[ind])) + ' Avg.=' + str(avg_per_task)) s1.write(ind * N_EPISODES / SHOW_EVERY + inx, 1, len(ep_per_task[ind])) s1.write(ind * N_EPISODES / SHOW_EVERY + inx, 2, avg_per_task) s1.write(ind * N_EPISODES / SHOW_EVERY + inx, 3, disc_avg_per_task) if (episode + 1) % SHOW_EVERY == 0: avg_rewards = np.mean(ep_rewards[episode - SHOW_EVERY + 1:episode]) print('Episode # ' + str(episode + 1) + ' : Epsilon=' + str(round(epsilon, 4)) + ' Avg. reward in the last ' + str(SHOW_EVERY) + ' episodes=' + str(round(avg_rewards, 2))) best_episode_index = ep_rewards.index(max(ep_rewards)) optimal_policy_pa = ep_trajectories_pa[ N_EPISODES - 1] #ep_trajectories_pa[best_episode_index] # Optimal policy in pa ep_trajectories_pa[N_EPISODES-1]# optimal_policy_ts = [] # optimal policy in ts opt_pol = [] # optimal policy in (m, n, h) format for visualization for ind, val in enumerate(optimal_policy_pa): optimal_policy_ts.append(pa2ts[which_pd][val]) opt_pol.append((math.floor(optimal_policy_ts[ind] / n), optimal_policy_ts[ind] % n, 0)) print('Tajectory at the last episode : ' + str(optimal_policy_ts)) indices = [ 0, 1, 2 ] #, 50000,50001,50001,100000,100001,100002,299997,299998,299999,N_EPISODES-3,N_EPISODES-2,N_EPISODES-1 optimal_policy_pas = [] for i in range(len(indices)): optimal_policy_pas.append(ep_trajectories_pa[indices[i]]) optimal_policy_ts = [] for ind, val in enumerate(optimal_policy_pas[i]): optimal_policy_ts.append(pa2ts[which_pd][val]) print('Tajectory at the episode ' + str(indices[i]) + ' : ' + str(optimal_policy_ts)) QL_timecost = timeit.default_timer() - QL_start_time success_ratio = [] for i in range(len(energy_pa)): success_ratio.append(100 * hit_count[i] / mission_tracker[i]) print("Successful Mission Ratio[%] = " + str(success_ratio[i])) print("Successful Missions = " + str(hit_count[i]) + " out of " + str(mission_tracker[i])) d_maxs = [] for i in range(len(energy_pa)): d_maxs.append(max(energy_pa[i])) max_energy = max(d_maxs) for i in range(len(energy_pa)): for j in range(ep_len): #name_diff = "q_table_diff_perc_" + str(i) + ".npy" name_q = "Env3_Converged_Q_TABLE__8x8" + str( n_samples) + '_task' + str(i) + '_t' + str(j) + ".npy" #np.save(name_diff,q_table_diff_perc) np.save(os.path.join('Q_TABLES', name_q), q_table[i][j]) print('Total time for Q-Learning : ' + str(QL_timecost) + ' seconds') print('Action uncertainity[%] = ' + str(eps_unc * 100)) print('# of Samples = ' + str(n_samples)) print("Desired Minimum Success Ratio[%] = " + str(100 * Pr_des)) print("Episode Length = " + str(ep_len) + " and Max. Energy of the System = " + str(max_energy)) print('Reward at last episode = ' + str(ep_rewards[-1])) # for task in range(len(energy_pa)): # for ind in range(len(disc_ep_per_task[task])): # s1.write(1+ind,11+task,disc_ep_per_task[task][ind]) s1.write(1, 6, QL_timecost) s1.write(1, 7, np.mean(ep_rewards)) filename = 'model_based_8x8' + str(n_samples) + '.xls' filename = filename wb.save(filename) return opt_pol
def open(self, file_name, encoding='ascii', style_compression=2, **keywords): BookWriter.open(self, file_name, **keywords) self.work_book = Workbook(style_compression=style_compression, encoding=encoding)
#-*- coding: utf-8 -*- import urllib, urllib2 import StringIO, gzip # to require bs4 from bs4 import BeautifulSoup # to read xls file from xlrd import open_workbook xls = open_workbook('test2.xls') sheet0 = xls.sheet_by_index(0) #to write in xls file from tempfile import TemporaryFile from xlwt import Workbook book = Workbook() sheet1 = book.add_sheet('result 1') import re #import time for row_index in range(sheet0.nrows): # time.sleep(0.7) keyword = sheet0.cell(row_index, 0).value params = {'q': keyword, 'hl': 'ko'} enc_params = urllib.urlencode(params) request = urllib2.Request('http://sky-kpkr.sandbox.google.com/' + 'search' + '?' + enc_params) request.add_header(
def action_team_report(self, cr, uid, ids, context): wizard = self.browse(cr, uid, ids[0], context) year = int(wizard.year) currency = self.pool['res.users'].browse( cr, uid, uid, context).company_id.currency_id partner_obj = self.pool['res.partner'] file_name = 'Sales_Team_{model}_{year}.xls'.format(model=wizard.model, year=year) book = Workbook(encoding='utf-8') for section in self.pool['crm.case.section'].browse( cr, uid, context['active_ids'], context): # ws = book.add_sheet(name, cell_overwrite_ok=True) ws = book.add_sheet(section.name) if wizard.model == 'invoice.paid': query = self.get_query(date(year, 1, 1), date(year, 12, 31), section.id, 'account.invoice') cr.execute(query) results = cr.fetchall() results = dict(results) partner_ids = partner_obj.search( cr, uid, [('section_id', '=', section.id)], context=context) account_ids = [] for partner in partner_obj.browse(cr, uid, partner_ids, context=context): if partner.property_account_receivable: if not str(partner.property_account_receivable.id ) in account_ids: account_ids.append( str(partner.property_account_receivable.id)) account_ids = ','.join(account_ids) if account_ids: query = self.get_query(date(year, 1, 1), date(year, 12, 31), section.id, 'account.move.line', account_ids) cr.execute(query) results2 = cr.fetchall() results2 = dict(results2) else: results2 = {} report = {} for partner in self.pool['res.partner'].browse( cr, uid, results.keys(), context=context): report[partner.id] = { 'name': partner.name, 'total_amount_invoice': results[partner.id] } for partner in self.pool['res.partner'].browse( cr, uid, results2.keys(), context=context): if partner.id in report: report[partner.id].update( {'total_amount_paid': results2[partner.id]}) else: report[partner.id] = { 'name': partner.name, 'total_amount_paid': results2[partner.id] } for month in range(1, 13): date_start, date_end = self.get_period(year, month) query = self.get_query(date_start, date_end, section.id, 'account.invoice') cr.execute(query) results = cr.fetchall() if account_ids: query = self.get_query(date_start, date_end, section.id, 'account.move.line', account_ids) cr.execute(query) results2 = cr.fetchall() else: results2 = {} month_i = str(month) + 'i' month_p = str(month) + 'p' for key, value in results: report[key].update({month_i: value}) for key, value in results2: report[key].update({month_p: value}) ws.write(0, 5, 'Fatturato/Incassato Mensile Clienti', Style.bold_header) self.write_header_info(ws, currency) Style.currency = easyxf( 'align: horiz right;', num_format_str=u'{symbol}#,##0.00'.format( symbol=currency.symbol)) Style.currency_bold = easyxf( 'font: bold on; align: horiz right', num_format_str=u'{symbol}#,##0.00'.format( symbol=currency.symbol)) Style.currency_border_left = easyxf( 'align: horiz right; borders: left thin;', num_format_str=u'{symbol}#,##0.00'.format( symbol=currency.symbol)) Style.last_col_currency_border_left = easyxf( 'align: horiz right; borders: left thin; font: bold on;', num_format_str=u'{symbol}#,##0.00'.format( symbol=currency.symbol)) Style.last_col_currency_border = easyxf( 'align: horiz right; borders: right thin;', num_format_str=u'{symbol}#,##0.00'.format( symbol=currency.symbol)) Style.last_col_currency_border_bold = easyxf( 'align: horiz right; borders: right thin; font: bold on;', num_format_str=u'{symbol}#,##0.00'.format( symbol=currency.symbol)) row = 4 ws, row = self.write_header_invoice_paid(ws, row) first_row = row + 1 if report: for row, item in enumerate(report.items(), first_row): partner_id, values = item if section.sale_agent_id and section.sale_agent_id.commission: commission_value = section.sale_agent_id.commission.get_commission( partner_id) * 0.01 else: commission_value = 0.0 self.write_table_invoice_paid(ws, row, values, year, commission_value) else: row = first_row values = { 'name': '', 'total_amount_invoice': 0, 'total_amount_paid': 0 } if section.sale_agent_id and section.sale_agent_id.commission: commission_value = section.sale_agent_id.commission.get_commission( ) * 0.01 else: commission_value = 0.0 self.write_table_invoice_paid(ws, row, values, year, commission_value) self.write_total_invoice_paid(ws, row, first_row) else: query = self.get_query(date(year, 1, 1), date(year, 12, 31), section.id, wizard.model) cr.execute(query) results = cr.fetchall() results = dict(results) report = {} for partner in self.pool['res.partner'].browse( cr, uid, results.keys(), context=context): report[partner.id] = { 'name': partner.name, 'total_amount': results[partner.id] } for month in range(1, 13): date_start, date_end = self.get_period(year, month) query = self.get_query(date_start, date_end, section.id, wizard.model) cr.execute(query) results = cr.fetchall() for key, value in results: report[key].update({month: value}) if wizard.model == 'sale.order': ws.write(0, 5, 'Ordinato Mensile Clienti', Style.bold_header) elif wizard.model == 'account.invoice': ws.write(0, 5, 'Fatturato Mensile Clienti', Style.bold_header) elif wizard.model == 'account.move.line': ws.write(0, 5, 'Incassato Mensile Clienti', Style.bold_header) else: ws.write(0, 5, 'Fatturato/Incassato Mensile Clienti', Style.bold_header) self.write_header_info(ws, currency) Style.currency = easyxf( 'align: horiz right', num_format_str=u'{symbol}#,##0.00'.format( symbol=currency.symbol)) Style.currency_bold = easyxf( 'font: bold on; align: horiz right', num_format_str=u'{symbol}#,##0.00'.format( symbol=currency.symbol)) ws = self.write_header(ws, 4) first_row = 5 if report: for row, item in enumerate(report.items(), first_row): partner_id, values = item self.write_table(ws, row, values, year) self.write_total(ws, row, first_row) else: row = first_row values = {'name': '', 'total_amount': 0} self.write_table(ws, row, values, year) self.write_total(ws, row, first_row) """PARSING DATA AS STRING """ file_data = StringIO() book.save(file_data) """STRING ENCODE OF DATA IN WKSHEET""" out = file_data.getvalue() out = out.encode("base64") return self.write(cr, uid, ids, { 'state': 'end', 'data': out, 'name': file_name }, context=context)
def solveWithSolver(Arrival_Time, epsilon, NB_Clients, Personnes_Distribution, Tps_Distribution, Lmax): indice = [] NB_Clients_Total = 0 top_inter = 0 for i in NB_Clients: NB_Clients_Total += i for j in range(NB_Clients_Total): indice.append(j) # Decision variables and objective function dvar_arrivee_clients = pulp.LpVariable.dicts("Heure d'arrivée", (indice), 0, None, pulp.LpInteger) dvar_depart_clients = pulp.LpVariable.dicts("Heure de départ", (indice), 0, None, pulp.LpInteger) prob = pulp.LpProblem("Coopain_Distribution_Planning_Problem", pulp.LpMinimize) prob += pulp.lpSum([ dvar_depart_clients[i] - dvar_arrivee_clients[i] for i in indice ]), "Waiting_Time_Sum" for i in indice: prob += (dvar_arrivee_clients[i] + Tps_Distribution <= dvar_depart_clients[i]),\ "condition_arrivee_distribution_{0}".format(i) prob += (dvar_depart_clients[i]<=Lmax),\ "Lapse_temps_distribution{0}".format(i) for j in range(len(NB_Clients)): for i in range(1, NB_Clients[j]): prob += ( dvar_arrivee_clients[top_inter + i] >= Arrival_Time[j] + epsilon), "condition_heure_arrivee_clins{0}".format(top_inter + i) #Condition sur la prise en charge des clients selon le nombre d'agents (x) #au point de distribution if (dvar_depart_clients[top_inter] <= Arrival_Time[j]): prob += (dvar_depart_clients[top_inter+i] >= Arrival_Time[j] + epsilon + \ (math.floor((top_inter+i-1)/Personnes_Distribution)+1)*Tps_Distribution),\ "condition_depart_{0}".format(i + top_inter) else: prob += (dvar_depart_clients[top_inter+i] >= dvar_depart_clients[top_inter] + epsilon +\ (math.floor((top_inter+i-1)/Personnes_Distribution)+1)*Tps_Distribution),\ "condition_depart_{0}".format(top_inter+ i) top_inter += NB_Clients[j] for i in range(top_inter - 1): prob += (dvar_arrivee_clients[i + 1] >= dvar_arrivee_clients[i] ), "condition_ordre_arrivee_{0}".format(i) prob += (dvar_depart_clients[i + 1] >= dvar_depart_clients[i] ), "condition_ordre_depart_{0}".format(i) prob.solve(pulp.PULP_CBC_CMD(fracGap=0)) result = 60 * [0] # Each significant variables is printed with it's resolved optimum value for v in prob.variables(): #print(v.name) #print("/") if v.varValue: if (v.name.find("Heure_d'") != -1): #print(v.varValue) #print("*") result[int(v.varValue)] += 1 print("la case{0}".format(int(v.varValue)), " a pris plus un ") print(v.name, "=", v.varValue) print("tableau du cumul") print(result) # Workbook is created wb = Workbook() # add_sheet is used to create sheet. sheet1 = wb.add_sheet('Sheet 1') style = xlwt.easyxf('font: bold 1') sheet1.write(0, 0, 'Heure', style) sheet1.write(0, 1, 'nombre', style) date_format = xlwt.XFStyle() date_format.num_format_str = 'dd-mm-yyyy hh:mm' index = 0 for i in range(len(result)): if result[i] != 0: time = pd.Timestamp(2021, 3, 19, 12, i) #print(time) sheet1.write(index + 1, 0, time, date_format) sheet1.write(index + 1, 1, result[i]) index += 1 #The optimised objective function value is printed to the screen print("Total Cost of Transportation = ", pulp.value(prob.objective)) wb.save('DistributionPlanning.xls')
def collectsf(fn): #setting excel paramaters wb = Workbook() sheet = wb.add_sheet("sheet 1") rb = xlrd.open_workbook("Dataset.xls", formatting_info=True) r_sheet = rb.sheet_by_index(0) r = r_sheet.nrows wb = copy(rb) i = 0 #setting path of recorded data folder print("setting path") oldpath = os.getcwd() try: os.chdir("wav") print("Directory Changed") except: print("could not change directory") path = os.getcwd() #iterating through files in wav for filename in glob.glob(os.path.join(path, '*.wav')): #reading the wav files stored at the database of sounds #reading the wav files stored at the database of sounds try: i += 1 frate, data = wavfile.read(filename) #fname = filename.replace(path , "") #looping through the single wav file #for i in range(len(data)): #storing the data in a numpy array as a float datatype to extract sound features #data = np.array(data[i],dtype=float) data = np.array(data) print("try to get frequency") freq = frequency(frate, data) print("frequency done") #extracting sound features #editing kurtosis output kurt = '' kurt = str(kurtosis(data)) kurt = kurt.replace('[', "") kurt = kurt[0:9] #editing skewness output skk = '' skk = str(skew(data)) skk = skk.replace('[', "") skk = skk[0:9] print("done with kurt and skew") #array of data = [ mean , variance , minimum , maximum , median , kurtosis , Skewness , Frequency ] arrofdata = 0 arrofdata = [ np.mean(data), np.var(data), str(np.min(data)), str(np.max(data)), np.median(data), kurt, skk, freq ] print("data stored in array") #importing data into excel sheet sheet = wb.get_sheet(0) sheet.write(r, 0, fn) sheet.write(r, 1, arrofdata[0]) sheet.write(r, 2, arrofdata[1]) sheet.write(r, 3, arrofdata[2]) sheet.write(r, 4, arrofdata[3]) sheet.write(r, 5, arrofdata[4]) sheet.write(r, 6, arrofdata[5]) sheet.write(r, 7, arrofdata[6]) sheet.write(r, 8, arrofdata[7]) r = r + 1 except: print("could not read so i passed it") pass try: os.chdir(oldpath) print("dir is here: " + os.getcwd()) print("Directory Changed") except: print("coudl not change directory") print("saved") wb.save("Dataset.xls") return i
for i in range(instrumentcount): vars()[instruments[0]] = volumes[0] volumetotal = volumetotal + int(volumes[i * 5].replace(',', '')) print(instruments[i] + ' = $' + volumes[i * 5]) print('') print("The Total Volume USD ('000) is $" + format( volumetotal, ',d')) #add a comma to the result.... e.g $61000 > $61,000 # Writing to an excel using the xlwt library # sheet using Python import xlwt from xlwt import Workbook #allows you to create and write to a workbook in excel # Workbook is created - Workbook() creates a new workbook wb = Workbook() # add_sheet is used to create sheet. sheet = wb.add_sheet('Sheet 1') first_col = sheet.col(0) #Also known as Column A first_col.width = 256 * 20 #Increase width to 20 Characters long #Writes to the new sheet that was created #sheet.write(Vertical coordinate, Horizontal coordinate, 'Value to be put in') sheet.write(0, 0, 'Instrument name') #Outputs "Instrument name" in A1 sheet.write(0, 1, "Total Volume USD ('000)") for i in range(instrumentcount): vars()[instruments[0]] = volumes[0] sheet.write(1 + i, 0, instruments[i])
def generate_missing(self, eng_flang_dict): workbook = Workbook(encoding="utf-8") ws = workbook.add_sheet('pt_orphan_templates') self.write_orphan_flang(ws, eng_flang_dict) workbook.save('template_info_pt_03.xls')