Example #1
0
def dealwithFile(fileName):
    try:
        xls = xlrd.open_workbook(fileName)
        sheetNames = xls.sheet_names()
        # sheet = xls.sheet_by_name("Sheet1")
        for sheetName in sheetNames:
            try:
                sheetName1 = str(sheetName).upper().replace('SELECT ', '')
                print 'sheetName:' + sheetName1
                if 'SQL' == sheetName1:
                    continue
                workbook = xlrd.open_workbook(BaseDicPath + "/" + sheetName1 + ".xls")
                workbook_t = copy(workbook)
                sheet_t = workbook_t.add_sheet(str('Example'), cell_overwrite_ok=True)
                cur_sheet = xls.sheet_by_name(sheetName)
                for row in range(0, cur_sheet.nrows):
                    for col in range(0, cur_sheet.ncols):
                        sheet_t.write(row, col, cur_sheet.cell_value(row, col), style)

                workbook_t.save(BaseDicPath + "/" + sheetName1 + ".xls")
                print sheetName1, ' gen sucess'
            except Exception, e:
                print Exception, ":", e
    except Exception, e:
        print Exception, ":", e
    def __init__(self,file=None,meta=None):

        self.meta = meta
        if file is None:
            wb = xlrd.open_workbook('SampleData.xls')
        else:
            wb = xlrd.open_workbook(file)

        sh1 = wb.sheet_by_index(0)
        rows = []
        columns = []

        for rownum in range(sh1.nrows): # sh1.nrows -> number of rows (ncols -> num columns)
            rows.append(sh1.row_values(rownum))

        # print rows

        for column in range(sh1.ncols):
            columns.append(sh1.col_values(column))

        # print columns

        res = Generator(meta=self.meta).main(rows=rows,columns=columns)

        # print res
        #
        # import simplejson
        # res = simplejson.dumps(res)#, indent=4 * ' ')

        # with open('data2.json', 'w') as f:
        #     f.write(res)
        #
        # f.close()

        self.res = res
Example #3
0
    def __init__(self, io, **kwds):

        import xlrd  # throw an ImportError if we need to

        ver = tuple(map(int, xlrd.__VERSION__.split(".")[:2]))
        if ver < (0, 9):  # pragma: no cover
            raise ImportError("pandas requires xlrd >= 0.9.0 for excel "
                              "support, current version " + xlrd.__VERSION__)

        self.io = io

        engine = kwds.pop('engine', None)

        if engine is not None and engine != 'xlrd':
            raise ValueError("Unknown engine: %s" % engine)

        if isinstance(io, compat.string_types):
            self.book = xlrd.open_workbook(io)
        elif engine == "xlrd" and isinstance(io, xlrd.Book):
            self.book = io
        elif hasattr(io, "read"):
            data = io.read()
            self.book = xlrd.open_workbook(file_contents=data)
        else:
            raise ValueError('Must explicitly set engine if not passing in'
                             ' buffer or path for io.')
Example #4
0
def calculateWeightOfCompletion(unitId):
    """
    calculate the weight of completion catagory,note that
    we classify the completion by 4 and L4,respectively.
    @params:
        unitId: unitId of every institution
    @return:
        prob: weight of completion field of each institution
    """
    database_4 = xlrd.open_workbook('completion_4.xlsx')
    table_4 = database_4.sheet_by_name('Sheet1')
    id_4 = table_4.col_values(0)
    class_4 = table_4.col_values(2)

    database_L4 = xlrd.open_workbook('completion_L4.xlsx')
    table_L4 = database_L4.sheet_by_name('Sheet1')
    id_L4 = table_L4.col_values(0)
    class_L4 = table_L4.col_values(2)

    prob = []
    weight = [0.5,0.3,0.1,0.05,0.05]
    for u in unitId:
        ind1 = unitId.index(u)
        if u in id_4:
            ind2 = id_4.index(u)
            prob.append(weight[int(class_4[ind2])])
        elif u in id_L4:
            ind2 = id_L4.index(u)
            prob.append(weight[int(class_L4[ind2])])
        else:
            prob.append(0)
    return prob
Example #5
0
    def _check_xls_export(self):
        xls_export_url = reverse(
            'xls_export', kwargs={'username': self.user.username,
                                  'id_string': self.xform.id_string})
        response = self.client.get(xls_export_url)
        expected_xls = open_workbook(os.path.join(
            self.this_directory, "fixtures", "transportation",
            "transportation_export.xls"))
        content = self._get_response_content(response)
        actual_xls = open_workbook(file_contents=content)
        actual_sheet = actual_xls.sheet_by_index(0)
        expected_sheet = expected_xls.sheet_by_index(0)

        # check headers
        self.assertEqual(actual_sheet.row_values(0),
                         expected_sheet.row_values(0))

        # check cell data
        self.assertEqual(actual_sheet.ncols, expected_sheet.ncols)
        self.assertEqual(actual_sheet.nrows, expected_sheet.nrows)
        for i in range(1, actual_sheet.nrows):
            actual_row = actual_sheet.row_values(i)
            expected_row = expected_sheet.row_values(i)

            # remove _id from result set, varies depending on the database
            del actual_row[22]
            del expected_row[22]
            self.assertEqual(actual_row, expected_row)
 def __parse_xsl(self, file_object):     
     nocolor = False
     file_contents = file_object.read()
     try:
         book = xlrd.open_workbook(file_contents=file_contents, formatting_info=1)
     except NotImplementedError:
         book = xlrd.open_workbook(file_contents=file_contents)
         nocolor = True
     self.result_dict = {}
     color_dict = {}
     slice_index = 2
     for sheet_index in range(book.nsheets):
         sheet = book.sheet_by_index(sheet_index)
         for row_index in range(sheet.nrows):
             row_dict = {}
             row_color_dict = {}
             for col_index in range(sheet.ncols):
                 if(row_index >= 1):
                     cell_value = sheet.cell(row_index, col_index).value
                     if cell_value:
                         row_dict[col_index] = cell_value
                         if not nocolor:
                             xfx = sheet.cell_xf_index(row_index, col_index)
                             xf = book.xf_list[xfx]
                             row_color_dict[col_index] = xf.background.pattern_colour_index
             if row_dict:
                 self.result_dict[slice_index] = row_dict
                 
                 if color_dict:
                     color_dict[slice_index] = row_color_dict
                 slice_index += 1
     return  (self.result_dict, color_dict)
Example #7
0
def xl_read_flags(cf,ds,level,VariablesInFile):
    # First data row in Excel worksheets.
    FirstDataRow = int(cf['Files'][level]['xl1stDataRow']) - 1
    # Get the full name of the Excel file from the control file.
    xlFullName = cf['Files'][level]['xlFilePath']+cf['Files'][level]['xlFileName']
    # Get the Excel workbook object.
    if os.path.isfile(xlFullName):
        xlBook = xlrd.open_workbook(xlFullName)
    else:
        log.error(' Excel file '+xlFullName+' not found, choose another')
        xlFullName = get_xlfilename()
        if len(xlFullName)==0:
            return
        xlBook = xlrd.open_workbook(xlFullName)
    ds.globalattributes['xlFullName'] = xlFullName
    
    for ThisOne in VariablesInFile:
        if 'xl' in cf['Variables'][ThisOne].keys():
            log.info(' Getting flags for '+ThisOne+' from spreadsheet')
            ActiveSheet = xlBook.sheet_by_name('Flag')
            LastDataRow = int(ActiveSheet.nrows)
            HeaderRow = ActiveSheet.row_values(int(cf['Files'][level]['xlHeaderRow'])-1)
            if cf['Variables'][ThisOne]['xl']['name'] in HeaderRow:
                xlCol = HeaderRow.index(cf['Variables'][ThisOne]['xl']['name'])
                Values = ActiveSheet.col_values(xlCol)[FirstDataRow:LastDataRow]
                Types = ActiveSheet.col_types(xlCol)[FirstDataRow:LastDataRow]
                ds.series[ThisOne]['Flag'] = numpy.array([-9999]*len(Values),numpy.int32)
                for i in range(len(Values)):
                    if Types[i]==2: #xlType=3 means a date/time value, xlType=2 means a number
                        ds.series[ThisOne]['Flag'][i] = numpy.int32(Values[i])
                    else:
                        log.error('  xl_read_flags: flags for '+ThisOne+' not found in xl file')
    return ds
Example #8
0
def proceed(filename, runtime, FILE):
    options = runtime.get('options', {})

    basename = os.path.basename(filename)
    root, ext = os.path.splitext(basename)
    ext = ext.lower()

    if ext in ['.xls', '.xlsx', '.xlsm', '.xlsb']:
        # Sheet
        if ext == '.xls':
            book = xlrd.open_workbook(filename, on_demand=True, formatting_info=True)
        else:
            reg_debug(FILE, "Option 'formatting_info=True' is not implemented yet!")
            book = xlrd.open_workbook(filename, on_demand=True)

        sheets = book.sheet_names()
        sheets_filter = options.get('sheets_filter')
        sheets_list = [i for i in sheets if filter_match(i, sheets_filter)]

        brief = [sheets, '---', sheets_list]
        reg_debug(FILE, brief)

        FILE.nsheets = book.nsheets

        for name in sheets_list:
            sh = book.sheet_by_name(name)
            i = sheets.index(name)
            proceed_sheet(sh, runtime, i, FILE)
            book.unload_sheet(name)
def readTop50Schools():
    """
    read university name from Top50 file'
    """
    data1 = xlrd.open_workbook('last200Info.xlsx')
    table1 = data1.sheet_by_name('data')
    feature_name = table1.row_values(0)

    data2 = xlrd.open_workbook('candidates.xlsx')
    table2 = data2.sheet_by_name('data')
    headers = table2.row_values(0)

    feature_index=[]
    for feature in feature_name:
        if feature in headers:
            feature_index.append(headers.index(feature))
    feature_data = []
    print feature_index
    for i in range(0,2937):
        d=[]
        for j in feature_index:
            d.append(table2.row_values(i)[j])
        feature_data.append(d)
    print len(feature_data),len(feature_data[0])    
    data = xlwt.Workbook()
    sheet1 = data.add_sheet(u'sheet1',cell_overwrite_ok=True) #创建sheet        
    for i in range(0,len(feature_data)):
        for j in range(0,len(feature_data[0])):
            sheet1.write(i,j,feature_data[i][j])
    data.save('candidatesFeatures.xlsx') #save into a file 
    data.save('candidatesFeatures.csv') #save into a file 
Example #10
0
	def get_books(self):
		''' get xlrd.book objects from the two xls files '''
		self.mapa_book = xlrd.open_workbook(
                "/home/med/Desktop/asomo/canarias/correc/r.mapa.xls"
                )
   		self.cmpr_book = xlrd.open_workbook(
                "/home/med/Desktop/asomo/canarias/correc/r.cmpr.xls")
Example #11
0
    def __init__(self, path_or_buf):
        self.use_xlsx = True
        self.path_or_buf = path_or_buf
        self.tmpfile = None

        if isinstance(path_or_buf, basestring):
            if path_or_buf.endswith('.xls'):
                self.use_xlsx = False
                import xlrd
                self.book = xlrd.open_workbook(path_or_buf)
            else:
                try:
                    from openpyxl.reader.excel import load_workbook
                    self.book = load_workbook(path_or_buf, use_iterators=True)
                except ImportError:  # pragma: no cover
                    raise ImportError(_openpyxl_msg)
        else:
            data = path_or_buf.read()

            try:
                import xlrd
                self.book = xlrd.open_workbook(file_contents=data)
                self.use_xlsx = False
            except Exception:
                from openpyxl.reader.excel import load_workbook
                buf = py3compat.BytesIO(data)
                self.book = load_workbook(buf, use_iterators=True)
    def getAndOperateData(self):
        browser=webdriver.Firefox()
        browser.get("http://www.xebest.com:8000")
        object_excel=xlrd.open_workbook(r"E:\data\objectname_locatemethod_locatedata.xls")
        object_sheet=object_excel.sheets()[0]
        object_sheet_rows=object_sheet.nrows
        object_sheet_cols=object_sheet.ncols
        object_name_list=[]#定义一个存放登录功能中需要定位的对象名称的空列表
        for i in range(object_sheet_rows):#拿到登录功能中需要定位的对象名称列表
            object_name_list.append(object_sheet.cell(i,0).value)
        object_name_list.pop(0)#去掉对象名excel中的第一行的标签项名称
        print object_name_list

        username_password_list=[]
        senddata_excel=xlrd.open_workbook(r"E:\data\username_password.xls")
        senddata_sheet=senddata_excel.sheets()[0]
        senddata_sheet_rows=senddata_sheet.nrows
        senddata_sheet_cols=senddata_sheet.ncols
        for i in range(1,senddata_sheet_rows):
            username_password_list.append(senddata_sheet.row_values(i))
        print username_password_list


        for username,password in username_password_list:
            for m in range(object_name_list.__len__()):
                self.locateObject(browser,username,password,object_name_list[m])
            browser.switch_to_alert().accept()
Example #13
0
 def __init__(self, url, localfile=True, hash_comments=True):
     self.book = None
     self.hash_comments = hash_comments
     
     if localfile:
         try:
             self.book = xlrd.open_workbook(on_demand=True,
                                            filename=url)
         except:
             print("Error on %s" % url)
     else:
         try:
             conn = urlopen(url)
             
         except URLError as strerr:
             print("\nURL Error reading url: %s\n %s" % (url, strerr))
         
         except: 
             print("\nGeneral Error reading url: %s\n" % url)
             
         else:
             try:
                 self.book = xlrd.open_workbook(on_demand=True,
                                                file_contents=conn.read())
             except:
                 print("Error on %s" % url)
         
         finally:
             conn.close()
         
     if self.book:
         self.datemode = self.book.datemode
Example #14
0
def files():
    '''打开指定文件,读取内容并返回指定内容'''
    fileName = 'NQM.xls'
    fileName2 = 'NQM.xlsx'
    if os.path.exists(fileName):
        try:
            cotent = xlrd.open_workbook(fileName)
            tables = cotent.sheet_by_index(0)
            return  tables
        except:
            print '请检查excel文档格式,文档不能有附件及链接等'
            time.sleep(3)
            sys.exit()
    elif os.path.exists(fileName2):
        try:
            cotent = xlrd.open_workbook(fileName2)
            tables = cotent.sheet_by_index(0)
            return  tables
        except:
            print '请检查excel文档格式,文档不能有附件及链接等'
            time.sleep(3)
            sys.exit()
    else:
        print '请将“NQM.XLS(x)”文件放在当前目录下或者将其更名为NQM.xls或者NQM.xlsx,然后执行!'
        time.sleep(3)
        sys.exit()
Example #15
0
def is_excel(filename):
    try:
        xlrd.open_workbook(filename)
    except:
        return None
    else:
        return True
def read_from_excel_1(file_path,case_id=1):

	s_general = xlrd.open_workbook(file_path, on_demand=True).sheet_by_name('general')
	s_generator = xlrd.open_workbook(file_path, on_demand=True).sheet_by_name('generator')
	np=int(s_general.cell_value(case_id, 1))
	ng = 11
	#11 normalement 
	data_1 = SizingData(nperiods=np,
						  ngenerators = ng,
						  periodDuration=float(s_general.cell_value(case_id, 2)),
						  powerMax = [float(s_generator.cell_value(i+1,0)) for i in range(ng)],
						  countInit = [float(s_generator.cell_value(i+1,1)) for i in range(ng)],
						  onOffInit = [float(s_generator.cell_value(i+1,2)) for i in range(ng)],
						  fixedCost = [float(s_generator.cell_value(i+1,3)) for i in range(ng)],
						  rampUpLim = [float(s_generator.cell_value(i+1,4)) for i in range(ng)],
						  rampDownLim = [float(s_generator.cell_value(i+1,5)) for i in range(ng)],
						  minUpTime = [float(s_generator.cell_value(i+1,6)) for i in range(ng)],
						  minDownTime = [float(s_generator.cell_value(i+1,7)) for i in range(ng)],
						  powerMin = [float(s_generator.cell_value(i+1,8)) for i in range(ng)],
						  powerInit = [float(s_generator.cell_value(i+1,9)) for i in range(ng)],
						  powerCost = [[float(s_generator.cell_value(i+1,10+j)) for j in range(3)]for i in range(ng) ],
						  rangeCost = [[float(s_generator.cell_value(i+1,13+j)) for j in range(3)] for i in range(ng)],
						  startUpCost = [float(s_generator.cell_value(i+1,16)) for i in range(ng)]
						  )


	return data_1
Example #17
0
    def handle(self, *args, **options):
        import glob
        import xlrd

        wb = xlrd.open_workbook('./docs/result/100.xls')
        sh = wb.sheet_by_index(0)
        for r in range(5,sh.nrows-2):
            FundGet.objects.filter(index=sh.cell(r,1).value).update(result=sh.cell(r,9).value)
            print(sh.cell(r,1).value)
            print(sh.cell(r,9).value)
            print(r)

        wb = xlrd.open_workbook('./docs/result/101.xls')
        sh = wb.sheet_by_index(0)
        for r in range(5,sh.nrows-2):
            try:
                a = FundGet.objects.get(index=sh.cell(r,1).value)
                a.result = sh.cell(r,10).value
                a.save()
                print(r)
            except Exception as e:
                print(e)



        wb = xlrd.open_workbook('./docs/result/102.xlsx')
        sh = wb.sheet_by_index(0)
        for r in range(5,sh.nrows-2):
            try:
                a = FundGet.objects.get(index=sh.cell(r,1).value)
                a.result = sh.cell(r,10).value
                a.save()
                print(r)
            except Exception as e:
                print(e)
Example #18
0
 def load_workbook(self, filepath_or_buffer):
     from xlrd import open_workbook
     if hasattr(filepath_or_buffer, "read"):
         data = filepath_or_buffer.read()
         return open_workbook(file_contents=data)
     else:
         return open_workbook(filepath_or_buffer)
Example #19
0
def combine_excel():
    wb1 = xlrd.open_workbook("/home/vagrant/Downloads/log.xls")
    sheet1 = wb1.sheet_by_index(0)
    nrows = sheet1.nrows
    data1 = {}
    for row_num in range(1, nrows):
        row = sheet1.row_values(row_num)
        data1.update({row[0]: [row[1], row[2]]})

    wb2 = xlrd.open_workbook("/home/vagrant/Downloads/log1.xls")
    sheet2 = wb2.sheet_by_index(0)
    nrows = sheet2.nrows
    data2 = {}
    for row_num in range(1, nrows):
        row = sheet2.row_values(row_num)
        data2.update({row[0]: [row[1], row[2]]})

    data = {}
    keys = list(set(data1.keys()).union(set(data2.keys())))
    for k in keys:
        if k in data1 and k in data2:
            data.update({k: [data1[k][0] + data2[k][0], data1[k][1] + data2[k][1]]})

        if k in data1 and k not in data2:
            data.update({k: data1[k]})

        if k not in data1 and k in data2:
            data.update({k: data2[k]})

    return data
Example #20
0
def get_state_crosswalks():
	"""
	Returns a dictionary matching IMPLAN industry identifiers to the sectors used in state and national forecasts.
	
	Only Oregon provides a suitable forecast now; others may be added if they become available.
	Outer keys identify the state or 'US' for national.
	For states, inner keys are IMPLAN industry identifiers (1 to 440).
	For national, second-level keys are 'output' and 'employment' and inner keys are IMPLAN industry identifiers (1 to 440).
	Values are the sector identifierss used in the individual forecasts.
	"""
	state_crosswalks = {'OR' : {}, 'WA' : {}, 'ID' : {}, 'NV' : {}, 'CA' : {}, 'US' : {'output' : {}, 'employment' : {}}}
	wb = open_workbook('IMPLAN_OEA.xls')
	sheet = wb.sheet_by_index(0)
	for i in range(1, 436):
		row = sheet.row_values(i, 0)
		state_crosswalks['OR'][row[0]] = row[2]
	wb = open_workbook('implan_gi_sectors.xls')
	sheet = wb.sheet_by_index(0)
	for i in range(1, 437):
		row = sheet.row_values(i, 0)
		state_crosswalks['US']['output'][row[0]] = row[1]
	sheet = wb.sheet_by_index(1)
	for i in range(1, 437):
		row = sheet.row_values(i, 0)
		state_crosswalks['US']['employment'][row[0]] = row[1]
	return state_crosswalks
Example #21
0
def get_IMPLAN_crosswalks():
	"""
	Returns two dictionaries matching IMPLAN industries and commodities to AA activities and AA commodities.
	
	The first, implan_ned_activity, matches IMPLAN industry identifiers to AA activity identifiers.
		Outer keys are IMPLAN industry identifiers.
		Second-level keys are AA activity identifiers.
		Inner keys are 'employment_proportion' and 'output_proportion'.
		Values range from zero to one and represent the proportion of activity in the IMPLAN sector 
			that is assigned to that AA activity.
	The second, implan_aa_commodity, matches IMPLAN commodity identifiers to AA commodity identifiers.
		Outer keys are IMPLAN commodity identifiers.
		Inner keys are AA commodity identifiers.
		Values range from zero to one and represent the proportion of the IMPLAN commodity 
			that is assigned to that AA commodity.
	"""
	implan_ned_activity = {}
	wb = open_workbook('implan_ned_activity.xls')
	sheet = wb.sheet_by_index(0)
	for i in range(1, 1014):
		row = sheet.row_values(i, 0)
		implan_ned_activity.setdefault(row[0], {})
		implan_ned_activity[row[0]][row[1]] = {'employment_proportion' : row[3], 'output_proportion' : row[2]}
	implan_aa_commodity = {}
	wb = open_workbook('implan_aa_commodity.xls')
	sheet = wb.sheet_by_index(0)
	for i in range(1, 443):
		row = sheet.row_values(i, 0)
		implan_aa_commodity.setdefault(row[0], {})
		implan_aa_commodity[row[0]][row[1]] = row[2]
	return implan_ned_activity, implan_aa_commodity
Example #22
0
File: qlang.py Project: jkpr/qlang
def translations_to_questionnaire(filename, prefix, suffix):
    first_part, second_part = os.path.split(filename)
    if not second_part.startswith(prefix):
        m = '"{}" does not start with supplied prefix "{}"'
        m = m.format(second_part, prefix)
        raise QlangError(m)
    orig_filename = os.path.join(first_part,second_part[len(prefix):])
    full_file, ext = os.path.splitext(orig_filename)
    dest_filename = full_file + suffix + ext
    with xlrd.open_workbook(filename) as book:
        with xlrd.open_workbook(orig_filename) as orig:
            trans_ws = book.sheet_by_name(QLANG_WORKSHEET_NAME)
            # Copy over "survey" and "choices" after merging translations
            survey_ws = orig.sheet_by_name(SURVEY)
            new_survey = get_worksheet_w_trans(survey_ws, trans_ws)
            choices_ws = orig.sheet_by_name(CHOICES)
            new_choices = get_worksheet_w_trans(choices_ws, trans_ws)
            wb = xlsxwriter.Workbook(dest_filename)
            survey_out_ws = wb.add_worksheet(SURVEY)
            write_out_worksheet(survey_out_ws, new_survey)
            choices_out_ws = wb.add_worksheet(CHOICES)
            write_out_worksheet(choices_out_ws, new_choices)
            # Copy all other sheets over
            for sheet in orig.sheet_names():
                if sheet not in (SURVEY, CHOICES):
                    rows = get_unicode_ws(orig.sheet_by_name(sheet))
                    this_ws = wb.add_worksheet(sheet)
                    write_out_worksheet(this_ws, rows)
            wb.close()
    m = 'Translations successfully merged: "{}"'.format(dest_filename)
    print(m)
Example #23
0
def load_spreadsheet(source):
    """Attempt to open the specified file using xlrd.  

    'source' should either be an absolute filename, or an open
    file object (e.g., the result of urllib.urlopen)
    
    Catches and suppresses common exceptions, but outputs a warning.
    """
    # TODO: use real python warnings
    try:
        if hasattr(source,'read'):
            workbook = open_workbook(file_contents=source.read(), 
                                     formatting_info=True,
                                     logfile=sys.stderr)
        else:
            workbook = open_workbook(source, 
                                     formatting_info=True,
                                     logfile=sys.stderr)
    except XLRDError, e:
        if 'Expected BOF' in str(e):
            logging.error("Error reading file (file extension may be wrong):")
            logging.error(e)
        elif 'Workbook is encrypted' in str(e):
            logging.error("Encrypted workbook:")
            logging.error(e)
        elif "Can't find workbook in OLE2" in str(e):
            logging.error("Weird OLE2 doc format:")
            logging.error(e)
        else:
            raise
        return
Example #24
0
def assemble_samples(layout_path, quant_path):
    A1 = np.array((3, 2))
    layout = xlrd.open_workbook(layout_path).sheet_by_name('Sheet1')
    quant = xlrd.open_workbook(quant_path).sheet_by_name('0')
    # make sure we're actually at A1
    if layout.cell_value(*(A1 + (0,-1))) != 'A' or layout.cell_value(*(A1 + (-1,0))) != 1:
        raise ValueError("A1 seems to be in the wrong place or the input files are swapped.")
    rows = 'ABCDEFGH'
    cols = np.arange(12)+1
    sample_index = {}
    for (i, row) in enumerate(rows):
        for (j, col) in enumerate(cols):
            value = layout.cell_value(*(A1+(i,j)))
            if value: sample_index['%s%02d' % (row, col)] = str(value)

    start_row = 1
    name_col = 1
    cq_col = 6
    cq_index = {}
    for row in range(96):
        name = quant.cell_value(start_row+row, name_col)
        value = quant.cell_value(start_row+row, cq_col) or 'nan'
        cq_index[name] = float(value)

    print 'Well\tSample\tCq\tTarget'
    wells = sorted(sample_index.keys())
    for well in wells:
        print '%s\t%s\t%f\t' % (well, sample_index[well], cq_index[well])
Example #25
0
def upload_table(file_input, filename):

    if filename == '':
        with open("templates/index_error.template", 'r') as index_error:
            index_err_message = Template(index_error.read())
            return index_err_message.substitute(error_message="Please enter a file!")

    if filename[len(filename)-4:] != ".xls" and filename[len(filename)-5:] != ".xlsx":
        with open("templates/index_error.template", 'r') as index_error:
            index_err_message = Template(index_error.read())
            return index_err_message.substitute(error_message="Wrong file extension!")


    if filename[len(filename)-4:] == ".xls":
        workbook = xlrd.open_workbook(file_contents=file_input.read(), encoding_override='cp1252', formatting_info=True)
    else:
        workbook = xlrd.open_workbook(file_contents=file_input.read(), encoding_override='cp1252')

    list_of_sheets_name = workbook.sheet_names()

    id_tabs_list = []
    for sheet_name in list_of_sheets_name:
        sheet_name = unicodedata.normalize('NFKD', sheet_name).encode('ASCII', 'ignore')
        sheet_name = sheet_name.lower()
        sheet_name = sheet_name.replace(' ', '-')
        id_tabs_list.append(sheet_name)

    with open("templates/index.template", 'r') as index_file:
        index = Template(index_file.read())
        with open("templates/table.template", 'r') as table_file:
            table = Template(table_file.read())
            with open("templates/table_rows.template", 'r') as table_rows_file:
                table_rows = Template(table_rows_file.read())
                with open("templates/table_row_col.template", 'r') as table_row_col:
                    table_row_col = Template(table_row_col.read())
                    with open("templates/table_row_col_head.template", 'r') as table_row_col_head:
                        table_row_col_head = Template(table_row_col_head.read())
                        with open("templates/tabs.template", 'r') as tabs:
                            tabs = Template(tabs.read())
                            num_sheet = 0
                            render_table = ""
                            render_tab = ""
                            active = " in active"
                            for sheet in workbook.sheets():
                                if num_sheet != 0:
                                    render_tab += tabs.substitute(tab=list_of_sheets_name[num_sheet],
                                                                  tab_id=id_tabs_list[num_sheet]) + "\t\t\t\t"
                                    active = ""
                                render_table_rows, render_table_head = process_sheet(sheet, table_rows,
                                                                                     table_row_col, table_row_col_head)
                                render_table += table.substitute(tab_id=id_tabs_list[num_sheet],
                                                                 active=active,
                                                                 table_head=render_table_head,
                                                                 table_rows=render_table_rows) + "\t\t\t\t"
                                num_sheet += 1
            #print render_table
            return index.substitute(tab_id=id_tabs_list[0],
                                    first_tab=list_of_sheets_name[0],
                                    tab=render_tab,
                                    table=render_table)
Example #26
0
def calculateWeightOfStudent(unitId):
    """
    calculate the weight of student catagory,note we classify 
    the student by 4 and primary,respectively.
    @params:
        unitId: unitId of every institution
    @return:
        prob: weight of student field of each institution
    """
    database_4 = xlrd.open_workbook('student_L4_class.xlsx')
    table_4 = database_4.sheet_by_name('sheet1')
    id_4 = table_4.col_values(0)
    class_4 = table_4.col_values(1)

    database_L4 = xlrd.open_workbook('student_4_class.xlsx')
    table_L4 = database_L4.sheet_by_name('sheet1')
    id_L4 = table_L4.col_values(0)
    class_L4 = table_L4.col_values(1)

    prob = []
    weight = [0.167,0.167,0.167,0.167,0.167,0.167]
    for u in unitId:
        ind1 = unitId.index(u)
        if u in id_4:
            ind2 = id_4.index(u)
            prob.append(weight[int(class_4[ind2])])
        elif u in id_L4:
            ind2 = id_L4.index(u)
            prob.append(weight[int(class_L4[ind2])])
        else:
            prob.append(0)
    return prob
Example #27
0
def define_duration():
    """
    get investment duration of each institution to be invested,
    and write it to file.
    """
    data = xlrd.open_workbook('ROI.xlsx')
    table = data.sheet_by_name('data')
    unitId = table.col_values(0)[1:242]
    data_4 = xlrd.open_workbook('student_4_class.xlsx')
    table_4 = data_4.sheet_by_name('sheet1')
    id_4 = table.col_values(0)
    data_L4 = xlrd.open_workbook('student_L4_class.xlsx')
    table_L4 = data_4.sheet_by_name('sheet1')
    id_L4 = table.col_values(0)
    flag = []
    for id in unitId:
        if id in id_4:
            flag.append(1)
        else:
            flag.append(0)
    data = xlwt.Workbook()
    sheet1 = data.add_sheet(u'sheet1',cell_overwrite_ok=True) 
    for i in range(0,len(flag)):
        sheet1.write(i,0,flag[i])
    data.save('duration.xlsx') #save into a file 
    data.save('duration.csv') #save into a file
 def __init__(self, spreadsheet, input_encoding='utf-8', sheet=1, control_row=None, force_dates=False, object_type='parent'):
     '''Open file and get data from correct sheet.
     
     First, try opening the file as an excel spreadsheet.
     If that fails, try opening it as a CSV file.
     Exit with error if CSV doesn't work.
     '''
     self.obj_type = object_type
     self._force_dates = force_dates
     self._input_encoding = input_encoding
     self._user_ctrl_row_number = control_row
     try:
         try:
             self.book = xlrd.open_workbook(spreadsheet)
         except TypeError:
             self.book = xlrd.open_workbook(file_contents=spreadsheet.read())
         self.dataset = self.book.sheet_by_index(int(sheet)-1)
         self.data_type = 'xlrd'
     except xlrd.XLRDError as xerr:
         #if it's not excel, try csv
         try:
             with open(spreadsheet, 'rt', encoding=self._input_encoding) as csv_file:
                 self._process_csv_file(csv_file)
         except TypeError:
             #got a file object, which might have been opened in binary format
             spreadsheet.seek(0)
             spreadsheet_bytes = spreadsheet.read()
             spreadsheet_text = spreadsheet_bytes.decode(self._input_encoding)
             spreadsheet_file = io.StringIO(spreadsheet_text)
             self._process_csv_file(spreadsheet_file)
         except RuntimeError:
             raise RuntimeError('Could not recognize file format - must be .xls, .xlsx, or .csv.')
Example #29
0
def calculateWeightOfCost(unitId):
    """
    calculate the weight of cost catagory,note we classify 
    the cost by public and primary,respectively.
    @params:
        unitId: unitId of every institution
    @return:
        prob: weight of cost field of each institution
    """
    database_pub = xlrd.open_workbook('cost_pub_class.xlsx')
    table_pub = database_pub.sheet_by_name('sheet1')
    id_pub = table_pub.col_values(0)
    class_pub = table_pub.col_values(1)

    database_pri = xlrd.open_workbook('cost_pri_class.xlsx')
    table_pri = database_pri.sheet_by_name('sheet1')
    id_pri = table_pri.col_values(0)
    class_pri = table_pri.col_values(1)
    
    prob = []
    weight = [0.2,0.8]
    weight_class = [0.5,0.5,0.1,0.3,0.5]
    for u in unitId:
        ind1 = unitId.index(u)
        if u in id_pub:
            ind2 = id_pub.index(u)
            prob.append(weight[0] * weight_class[int(class_pub[ind2])])
        elif u in id_pri:
            ind2 = id_pri.index(u)
            prob.append(weight[1] * weight_class[int(class_pri[ind2])])
        else:
            prob.append(0)
    return prob
Example #30
0
def excel_setup() -> None:
    """opens the necessary files/worksheets from excel documents"""
    global awl, aoa, tasa, subtlex, zeno
    # I tried to make a for loop for this but it never worked...
    try:
        awl = xlrd.open_workbook("AWL.xls")
        awl = awl.sheet_by_index(0)
        print("1/5")
    except:
        print("Failed to load file: AWL.xls")
    try:
        aoa = xlrd.open_workbook("AoA.xlsx")
        aoa = aoa.sheet_by_index(0)
        print("2/5")
    except:
        print("Failed to load file: AoA.xlsx")
    try:
        tasa = xlrd.open_workbook("tasa.xlsx")
        tasa = tasa.sheet_by_index(0)
        print("3/5")
    except:
        print("Failed to load file: tasa.xlsx")
    try:
        subtlex = xlrd.open_workbook("SUBTLEX.xlsx")
        subtlex = subtlex.sheet_by_index(0)
        print("4/5")
    except:
        print("Failed to load file: SUBTLEX.xlsx")

    try:
        zeno = xlrd.open_workbook("Zeno.xlsx")
        zeno = zeno.sheet_by_index(0)
    except:
        print("Failed to load file: Zeno.xlsx")
    return
        arcpy.TableToExcel_conversion(output_dir+ch5id+".dbf", output_hist+basin_name+".xls")
        proceed = 'yes'
    
#        else:
#            if ch5id in check_list:
#                basin_name = ch5id
#                if len(ch5id) <6:
#                    basin_name = ch5id + 'LOC'
#                arcpy.TableToExcel_conversion(output_dir+ch5id+".dbf", output_hist+basin_name+".xls")
#                proceed = 'yes'
        print basin_name
    
        # loop through and create data array for histogram/percentile calcs
        if proceed == 'yes':
            hist_array = []
            wb = xlrd.open_workbook(output_hist+basin_name+".xls")
            worksheet = wb.sheet_by_name(basin_name)
            num_rows = worksheet.nrows - 1
            num_cells = worksheet.ncols - 1
            curr_row = -1
            ##### define elevation zone ranges
            if basin_name[:5] in elev_splits: # search for 5 char basin id in split dictionary
                if basin_name[-1:] == 'U' or basin_name[-3:] == 'upr' or basin_name[-3:] == 'UPR':
                    min_elev = elev_splits[basin_name[:5]][-1]/3.28084
                    max_elev = 99999
                if basin_name[-2:] == 'MI' or basin_name[-3:] == 'mid' or basin_name[-3:] == 'MID':
                    min_elev = elev_splits[basin_name[:5]][-2]/3.28084
                    max_elev = elev_splits[basin_name[:5]][-1]/3.28084
                if basin_name[-1:] == 'L' or basin_name[-3:] == 'lwr' or basin_name[-3:] == 'LWR':
                    min_elev = elev_splits[basin_name[:5]][0]/3.28084
                    max_elev = elev_splits[basin_name[:5]][1]/3.28084
from sklearn.model_selection import KFold
import tensorflow as tf
from scipy import io
import numpy as np
    #import data_gather_sin1_auto
from random import randint
import xlwt
import xlrd
import math
import csv
import random
import numpy as np
num_derv=40
num_input=52
workbook = xlrd.open_workbook('derv_data1_double.xlsx')
    #workbook = xlrd.open_workbook('myfile.xls')
sheet1 = workbook.sheet_by_name('derv_data1')
total_train_row=4320
training_epochs=10000
batch=500
num_batch=np.int(total_train_row/batch)
traindata = np.zeros(shape=[total_train_row,num_input+num_derv])
#testdata_n = np.zeros(shape=[10000,num_input])
#testdata_p = np.zeros(shape=[sheet1.nrows,4])
for index1 in range(0,total_train_row):
    for index in range(0,num_input+2*num_derv):
        traindata[index1,index]=sheet1.cell_value(index1,index)
        #traindata[index1,5]=sheet1.cell_value(index1,5)
mnist=traindata
    #mnist1=traindata
    #np.random.shuffle(mnist1)
Example #33
0
#hide_number.py
import json
from lxml import etree
from xlwt import Workbook
import xlrd, codecs

excel = xlrd.open_workbook("./unicom.xls")
sheet = excel.sheet_by_name("2017年02月语音通信")

excel_editing = Workbook()
sheet_editing = excel_editing.add_sheet('2017年02月语音通信')
'''
隐藏电话号码一部分数字 => 对2-7位隐藏
'''
for i in range(sheet.nrows):
    row = sheet_editing.row(i)
    for j in range(sheet.ncols):
        if i > 0 and j == 5:
            st = str(sheet.cell(i, j).value)
            new_st = '1' + '******' + st[7:]
            row.write(j, new_st)
        else:
            row.write(j, str(sheet.cell(i, j).value))

excel_editing.save('unicom.xls')
Example #34
0
    def main(cmd_args):
        import optparse
        global options, PSYCO
        usage = "\n%prog [options] command [input-file-patterns]\n" + cmd_doc
        oparser = optparse.OptionParser(usage)
        oparser.add_option("-l",
                           "--logfilename",
                           default="",
                           help="contains error messages")
        oparser.add_option(
            "-v",
            "--verbosity",
            type="int",
            default=0,
            help="level of information and diagnostics provided")
        oparser.add_option(
            "-m",
            "--mmap",
            type="int",
            default=-1,
            help="1: use mmap; 0: don't use mmap; -1: accept heuristic")
        oparser.add_option("-e",
                           "--encoding",
                           default="",
                           help="encoding override")
        oparser.add_option(
            "-f",
            "--formatting",
            type="int",
            default=0,
            help="0 (default): no fmt info\n"
            "1: fmt info (all cells)\n",
        )
        oparser.add_option(
            "-g",
            "--gc",
            type="int",
            default=0,
            help=
            "0: auto gc enabled; 1: auto gc disabled, manual collect after each file; 2: no gc"
        )
        oparser.add_option(
            "-s",
            "--onesheet",
            default="",
            help="restrict output to this sheet (name or index)")
        oparser.add_option("-u",
                           "--unnumbered",
                           action="store_true",
                           default=0,
                           help="omit line numbers or offsets in biff_dump")
        oparser.add_option("-d",
                           "--on-demand",
                           action="store_true",
                           default=0,
                           help="load sheets on demand instead of all at once")
        oparser.add_option("-t",
                           "--suppress-timing",
                           action="store_true",
                           default=0,
                           help="don't print timings (diffs are less messy)")
        oparser.add_option("-r",
                           "--ragged-rows",
                           action="store_true",
                           default=0,
                           help="open_workbook(..., ragged_rows=True)")
        options, args = oparser.parse_args(cmd_args)
        if len(args) == 1 and args[0] in ("version", ):
            pass
        elif len(args) < 2:
            oparser.error("Expected at least 2 args, found %d" % len(args))
        cmd = args[0]
        xlrd_version = getattr(xlrd, "__VERSION__", "unknown; before 0.5")
        if cmd == 'biff_dump':
            xlrd.dump(args[1], unnumbered=options.unnumbered)
            sys.exit(0)
        if cmd == 'biff_count':
            xlrd.count_records(args[1])
            sys.exit(0)
        if cmd == 'version':
            print("xlrd: %s, from %s" % (xlrd_version, xlrd.__file__))
            print("Python:", sys.version)
            sys.exit(0)
        if options.logfilename:
            logfile = LogHandler(open(options.logfilename, 'w'))
        else:
            logfile = sys.stdout
        mmap_opt = options.mmap
        mmap_arg = xlrd.USE_MMAP
        if mmap_opt in (1, 0):
            mmap_arg = mmap_opt
        elif mmap_opt != -1:
            print('Unexpected value (%r) for mmap option -- assuming default' %
                  mmap_opt)
        fmt_opt = options.formatting | (cmd in ('xfc', ))
        gc_mode = options.gc
        if gc_mode:
            gc.disable()
        for pattern in args[1:]:
            for fname in glob.glob(pattern):
                print("\n=== File: %s ===" % fname)
                if logfile != sys.stdout:
                    logfile.setfileheading("\n=== File: %s ===\n" % fname)
                if gc_mode == 1:
                    n_unreachable = gc.collect()
                    if n_unreachable:
                        print("GC before open:", n_unreachable,
                              "unreachable objects")
                if PSYCO:
                    import psyco
                    psyco.full()
                    PSYCO = 0
                try:
                    t0 = time.time()
                    bk = xlrd.open_workbook(
                        fname,
                        verbosity=options.verbosity,
                        logfile=logfile,
                        use_mmap=mmap_arg,
                        encoding_override=options.encoding,
                        formatting_info=fmt_opt,
                        on_demand=options.on_demand,
                        ragged_rows=options.ragged_rows,
                    )
                    t1 = time.time()
                    if not options.suppress_timing:
                        print("Open took %.2f seconds" % (t1 - t0, ))
                except xlrd.XLRDError as e:
                    print("*** Open failed: %s: %s" % (type(e).__name__, e))
                    continue
                except KeyboardInterrupt:
                    print("*** KeyboardInterrupt ***")
                    traceback.print_exc(file=sys.stdout)
                    sys.exit(1)
                except BaseException as e:
                    print("*** Open failed: %s: %s" % (type(e).__name__, e))
                    traceback.print_exc(file=sys.stdout)
                    continue
                t0 = time.time()
                if cmd == 'hdr':
                    bk_header(bk)
                elif cmd == 'ov':  # OverView
                    show(bk, 0)
                elif cmd == 'show':  # all rows
                    show(bk)
                elif cmd == '2rows':  # first row and last row
                    show(bk, 2)
                elif cmd == '3rows':  # first row, 2nd row and last row
                    show(bk, 3)
                elif cmd == 'bench':
                    show(bk, printit=0)
                elif cmd == 'fonts':
                    bk_header(bk)
                    show_fonts(bk)
                elif cmd == 'names':  # named reference list
                    show_names(bk)
                elif cmd == 'name_dump':  # named reference list
                    show_names(bk, dump=1)
                elif cmd == 'labels':
                    show_labels(bk)
                elif cmd == 'xfc':
                    count_xfs(bk)
                else:
                    print("*** Unknown command <%s>" % cmd)
                    sys.exit(1)
                del bk
                if gc_mode == 1:
                    n_unreachable = gc.collect()
                    if n_unreachable:
                        print("GC post cmd:", fname, "->", n_unreachable,
                              "unreachable objects")
                if not options.suppress_timing:
                    t1 = time.time()
                    print("\ncommand took %.2f seconds\n" % (t1 - t0, ))

        return None
Example #35
0
import xlrd
import csv
import os
for file in os.listdir("xlsx/"):
    if file.endswith(".xlsx"):
        print(file)
        print(os.path.join(os.path.splitext(file)[0] + ".csv"))
        with xlrd.open_workbook(file) as wb:
            sh = wb.sheet_by_index(0)  # wb.sheet_by_name('sheet_name')
            with open(os.path.join("dml" + os.path.splitext(file)[0] + ".csv"),
                      'w',
                      encoding="utf-16",
                      newline="") as f:
                col = csv.writer(f)
                for row in range(sh.nrows):
                    col.writerow(sh.row_values(row))
Example #36
0
    help="Increase verbosity (specify multiple times for more)")    

if __name__ == "__main__":
    args = parser.parse_args()
    log_level = logging.WARNING # default
    if args.verbose == 1:
        log_level = logging.INFO
    elif args.verbose >= 2:
        log_level = logging.DEBUG
        DEBUG=True
    logging.basicConfig(
        level=log_level, 
        format='%(msecs)d:%(module)s:%(lineno)d:%(levelname)s: %(message)s')        

    with open(args.screen_result_file) as input_file:
        wb = xlrd.open_workbook(file_contents=input_file.read())
        print json_printer(read_workbook(wb))
        
# def parse_result_row_orig(i,parsed_columns,result_row):    
#     
#     logger.debug('parse result row: %r', result_row)
#     
#     meta_columns = RESULT_VALUE_FIELD_MAP.values()
#     parsed_row = {}
#     excluded_cols = []
#     
#     meta_key = 'plate_number'
#     val = result_row[meta_key]
#     logger.debug('plate value to parse: %r', val)
#     plate_number = parse_val(val, meta_key, 'integer')
#     meta_key = 'well_name'
import xlrd
import xlwt
import os

wb = xlrd.open_workbook(
    'C:\\Users\\Autobio-A3517\\Desktop\\test.xlsx')  # 找到Excel表所在路径
table1 = wb.sheets()[0]  # 选择Excel工作簿中的第1个工作表
n_rows = table1.nrows  # 计算工作表的行数
n_cols = table1.ncols  # 计算工作表的列数


def CreateTxt():
    for i in range(1, n_rows):
        name = table1.cell(i, 1).value  # 提取工作表第二列所有单元格内容
        path = 'C:\\Users\\Autobio-A3517\\Desktop\\test\\'  # 定义输出的txt文档存放的位置
        full_path = path + str(name) + '.txt'  # 以第二列单元格内命名.txt文件名
        SeqData = table1.cell(i, 2).value  # 提取工作表中第3列中的基因序列数据
        with open(full_path, 'w') as f:  # 打开已命名的.txt文件写入基因序列数据
            f.write(SeqData)


if __name__ == "__main__":
    CreateTxt()
Example #38
0
#-*- coding: utf8 -*-

import xlrd

fname = u"百度热词.xls"
bk = xlrd.open_workbook(fname)
shxrange = range(bk.nsheets)
try:
    sh = bk.sheet_by_name("Sheet1")
except:
    print "no sheet in %s named Sheet1" % fname
#获取行数
nrows = sh.nrows
print type(nrows)
#获取列数
ncols = sh.ncols
print "nrows %d, ncols %d" % (nrows,ncols)


for i in nrows:
	for j in ncols:
		print sh.cell_value(i,j)
#获取第一行第一列数据 
cell_value = sh.cell_value(1,1)
print "cell_value:",cell_value

# row_list = []
# #获取各行数据
# for i in range(1,nrows):
#     row_data = sh.row_values(i)
#     print unicode(row_data)
Example #39
0
    def parse(self, response):
        # file = 'propertylist.xls'
        file = 'records.xls'
        #Getting phone numbers from sheet2 of contacts.xlsx file
        rows = self.read_xls(file)
        print "here in self"
        """
            Modifying the existing xls file
        """
        rb = open_workbook(file)
        wb = copy(rb)
        sheet = wb.get_sheet(0)
        # sheet.write(0,23,'Phone')
        print sheet
        # sheet.write(0,42,'Phone(pipl.com)')
        # sheet.write(0,24,'Phone(pipl.com)')

        try:
            options = webdriver.ChromeOptions()
            options.add_argument("--start-maximized")
            options.add_argument("--disable-javascript")
            options.add_argument("--disable-java")
            options.add_argument("--disable-plugins")
            options.add_argument("--disable-popup-blocking")
            options.add_argument("--disable-images")
            driver = webdriver.Chrome('c://chromedriver.exe',
                                      chrome_options=options)

            # start = 90
            # rows = rows[start:]

            for ctr, row in enumerate(rows, start=1):
                print ctr
                row = rows[ctr]
                #row_col = row[5].value
                first_name = row[2].value
                last_name = row[4].value
                middle_name = row[3].value
                full_name = ' '.join([first_name, last_name]).lower()
                # if middle_name:
                #     full_name = ' '.join([first_name,middle_name,last_name])

                city_col = row[6].value
                st = row[7].value

                #name_col = row[11].value
                name_col = full_name

                name = urllib.quote_plus(name_col)
                location = urllib.quote_plus(city_col + ', ' + st)
                driver.get(
                    'https://pipl.com/search/?q={}&l={}&sloc=&in=6'.format(
                        name, location))

                html = driver.page_source
                hxs = fromstring(html)

                with open('profile.html', 'wb') as hf:
                    hf.write(html)
                """
                If No Result Found for then skipping
                """
                no_results = hxs.xpath('//div[class="header no_results"]')
                if no_results:
                    continue
                    time.sleep(2)

                #print "sheet name %s" % full_name
                profiles_container = hxs.xpath(
                    '//div[@id="profile_container"]')
                for profile_container in profiles_container:
                    pipl_name = profile_container.xpath(
                        './div/div[@id="profile_summary"]/div/span[@class="highlight"]/text()'
                    )
                    print "pipl name is:%s" % pipl_name
                    # pipl_name = profile_container.xpath('normalize-space(./div/div[@id="profile_summary"]/text())')
                    phones = profile_container.xpath(
                        '//ul[@class="phones"]/li/a/text()')
                    pipl_phones = [
                        re.sub('\D*', '', phone) for phone in phones
                    ]
                    #print phones
                    print pipl_phones
                    # sheet_phone = re.sub('\D*','',sheet_p  hone)
                    """
                    if total digits are greater than 10 strip left digit to get only 10 digit
                    """
                    # if len(sheet_phone) > 10:
                    #     sheet_phone = sheet_phone[1:10]
                    # print sheet_phone
                    # print pipl_name
                    if pipl_name:
                        # pipl_name = re.sub('\s*\(.*\)|\W*\s*[\(\)]','',name)
                        pipl_name = ' '.join(pipl_name)
                        pipl_name = pipl_name.lower()
                        print ctr
                        print "pipl name %s" % pipl_name
                        """
                        checking if sheet name and site name are matched then putting address data to xls
                        """
                        print 'Full name:%s' % full_name

                        if all(x in full_name.split()
                               for x in pipl_name.split()) or all(
                                   x in pipl_name.split()
                                   for x in full_name.split()):
                            print "name matched exactly"
                            sheet.write(ctr, 11, ', '.join(pipl_phones))
                            wb.save('file.xls')

                time.sleep(1)

        except Exception as e:
            print e
# coding = utf-8

import re
import xlrd
import os
import json
import csv
from openpyxl import load_workbook
from pprint import pprint

kywdFile = xlrd.open_workbook('new_alias.xlsx')
kywdTable = kywdFile.sheets()[0]
kywdRows = kywdTable.nrows
kywdCols = kywdTable.ncols

aliasDict = {}

for rowIndex in range(1, kywdRows):
    rowValue = kywdTable.row_values(rowIndex)

    if rowValue[1] not in aliasDict:
        aliasDict[rowValue[1]] = [rowValue[0]]
    elif rowValue[0] not in aliasDict[rowValue[1]]:
        aliasDict[rowValue[1]].append(rowValue[0])
    else:
        pass

wb = load_workbook('dict_with_alias.xlsx')
ws = wb['Sheet1']
for i in range(2, len(list(ws.rows)) + 1):
    thisNewAlias = []
Example #41
0
# Run all the tests
print "Run all tests:"
testPreviousOrderSource()
testCleanAndReturnNumberString()

###### PHASE 1: PROCESSING EXCEL ORDERS

for f in files:
    if f.endswith("xls"):
        xlsfiles.append(f)

orders = []  #########order processing############
for xfile in xlsfiles:
    filepath = os.path.join(targetdir, xfile)
    #dowork
    xl_workbook = xlrd.open_workbook(filepath)
    xl_sheet = xl_workbook.sheet_by_index(1)
    print('Sheet name: %s' % xl_sheet.name)
    #print xlrd.xldate_as_datetime(42088,xl_workbook.datemode)

    curr_order = []
    for order in range(xl_sheet.nrows):
        row = xl_sheet.row(order)

        ######## PO_NUMBER , PARSING NAME
        if row[0].value == 'PATIENT NAME / CODE NO.':
            #print '===================================='
            curr_order = {
            }  ################ Creation of the dictionary [why here, why not higher]
            curr_order['wo_num'] = ''
            curr_order['pt_num'] = ''
def get_first_sheet(filename):
    # Open the workbook
    xl_workbook = xlrd.open_workbook(filename)

    # Return first sheet
    return xl_workbook.sheet_by_index(0)
        self.y = y
        self.lDcm = cosmo.luminosity_distance(self.z)*u.Mpc.to(u.cm) / u.Mpc
        self.radToKpc = conv.arcsec_per_kpc_proper(self.z)*0.05/u.arcsec*u.kpc

# Grabbing and filling galaxy data
cf = ['coarse','fine']

# label describing image processing
rc = ['final','convolved_image']
# matching file name ending whoooooooot.
# not confident the h in needed in whooooot. but lets roll with it for now. its a nice h
fex = ['*sci.fits','.fits']

galaxies = []

wbo = open_workbook('galaxydata.xlsx')
for sheet in wbo.sheets():
    numr = sheet.nrows
    
    for row in range(1,numr):
        galaxies.append(Galaxy(sheet.cell(row,0).value,sheet.cell(row,1).value,sheet.cell(row,2).value,sheet.cell(row,3).value))



# define the directory that contains the images
dir = '/Users/sgottlie/Desktop/linux-lab/'


# define a function to plot "postage stamp" images
def plot_image():
    std = np.std(stamp[stamp==stamp])
        workbook.close()
    return os.path.join('ACC_'+str(i)+'.xlsx')

def sum(a, axes, i, initial, final):
    sum = 0
    for j in range(initial,final + 1):
	    sum = sum + a[axes,i ,j]
    return sum

for n in range(0,Total_data):
	dirpath = os.getcwd()
	foldername = os.path.basename(dirpath)
	cwd = os.getcwd()
	file_location = foldername + '/Training_Data/ACC_' + str(n) + '.csv'
	excelfile = convert.convert_csv_to_xlsx(file_location,n)
	workbook = xlrd.open_workbook(cwd + '/Training_Data/' + excelfile)
	sheet = workbook.sheet_by_index(0)
	s = 0  # Segment no
	k1 = 0  # Frame No
	k2 = 0
	n1 = 0
	n2 = 0
	axis = 3  # Number of axis
	number_of_features = 5
	gesture_no = 2
	N = 9  # Number of segments of identical length
	kmax = N  # No of Frames
	number_of_training_samples = Total_data
	L = sheet.nrows  # Length of the temporal sequence
	LS = int(math.floor(L / (N + 1)))  # Length of each segment
	total_features = 3*number_of_features*N
Example #45
0
import scraperwiki
import urllib2
import xlrd

url = "http://www.whatdotheyknow.com/request/49869/response/128432/attach/3/sw%20climate%20sitelisting.xls"

raw = urllib2.urlopen(url).read()
book = xlrd.open_workbook(file_contents=raw)

sheet = book.sheets()[0]


def sheet_rows(sheet):
    for i in range(sheet.nrows):
        row = sheet.row(i)
        yield row


def remove_blanks(rows):
    for row in rows:
        if row[11].value:
            yield row


for i, row in enumerate(remove_blanks(sheet_rows(sheet))):
    if row[0].value.lower() == 'region':
        # ignore first row: header line
        continue
    if row[0].value:
        # Size is dubious.  Don't know if anything appears in Column E.
        d = dict(
def main(RegionalScope,ResBldsList,SectorIn):
    
    import xlrd
    import numpy as np
    import matplotlib.pyplot as plt  
    import pylab
    import os
    import RECC_Paths # Import path file   
    
    # FileOrder:
    # 1) None
    # 2) + EoL + FSD + FYI
    # 3) + EoL + FSD + FYI + ReU +LTE
    # 4) + EoL + FSD + FYI + ReU +LTE + MSu
    # 5) + EoL + FSD + FYI + ReU +LTE + MSu + LWE 
    # 6) + EoL + FSD + FYI + ReU +LTE + MSu + LWE + MIU = ALL 
    
    Region      = RegionalScope
    FolderlistB = ResBldsList
    
    # Waterfall plots.
    
    NS = 3 # no of SSP scenarios
    NR = 2 # no of RCP scenarios
    NE = 6 # no of Res. eff. scenarios
    
    CumEmsV        = np.zeros((NS,NR,NE)) # SSP-Scenario x RCP scenario x RES scenario
    CumEmsV2060    = np.zeros((NS,NR,NE)) # SSP-Scenario x RCP scenario x RES scenario
    AnnEmsV2030    = np.zeros((NS,NR,NE)) # SSP-Scenario x RCP scenario x RES scenario
    AnnEmsV2050    = np.zeros((NS,NR,NE)) # SSP-Scenario x RCP scenario x RES scenario
    ASummaryV      = np.zeros((12,NE)) # For direct copy-paste to Excel
    AvgDecadalEmsV = np.zeros((NS,NE,4)) # SSP-Scenario x RES scenario, RCP fixed to RCP2.6
    # for materials:
    MatCumEmsV        = np.zeros((NS,NR,NE)) # SSP-Scenario x RCP scenario x RES scenario
    MatCumEmsV2060    = np.zeros((NS,NR,NE)) # SSP-Scenario x RCP scenario x RES scenario
    MatAnnEmsV2030    = np.zeros((NS,NR,NE)) # SSP-Scenario x RCP scenario x RES scenario
    MatAnnEmsV2050    = np.zeros((NS,NR,NE)) # SSP-Scenario x RCP scenario x RES scenario    
    MatSummaryV       = np.zeros((12,NE)) # For direct copy-paste to Excel
    AvgDecadalMatEmsV = np.zeros((NS,NE,4)) # SSP-Scenario x RES scenario, RCP is fixed: RCP2.6
    # for materials incl. recycling credit:
    MatCumEmsVC       = np.zeros((NS,NR,NE)) # SSP-Scenario x RCP scenario x RES scenario
    MatCumEmsVC2060   = np.zeros((NS,NR,NE)) # SSP-Scenario x RCP scenario x RES scenario
    MatAnnEmsV2030C   = np.zeros((NS,NR,NE)) # SSP-Scenario x RCP scenario x RES scenario
    MatAnnEmsV2050C   = np.zeros((NS,NR,NE)) # SSP-Scenario x RCP scenario x RES scenario    
    MatSummaryVC      = np.zeros((12,NE)) # For direct copy-paste to Excel
    AvgDecadalMatEmsVC= np.zeros((NS,NE,4)) # SSP-Scenario x RES scenario, RCP is fixed: RCP2.6
    
    for r in range(0,NE): # RE scenario
        Path = os.path.join(RECC_Paths.results_path,FolderlistB[r],'SysVar_TotalGHGFootprint.xls')
        Resultfile = xlrd.open_workbook(Path)
        Resultsheet = Resultfile.sheet_by_name('TotalGHGFootprint')
        for s in range(0,NS): # SSP scenario
            for c in range(0,NR):
                for t in range(0,35): # time until 2050 only!!! Cum. emissions until 2050.
                    CumEmsV[s,c,r] += Resultsheet.cell_value(t +2, 1 + c + NR*s)
                for t in range(0,45): # time until 2060.
                    CumEmsV2060[s,c,r] += Resultsheet.cell_value(t +2, 1 + c + NR*s)                    
                AnnEmsV2030[s,c,r]  = Resultsheet.cell_value(16  , 1 + c + NR*s)
                AnnEmsV2050[s,c,r]  = Resultsheet.cell_value(36  , 1 + c + NR*s)
            AvgDecadalEmsV[s,r,0]   = sum([Resultsheet.cell_value(i, 2*(s+1)) for i in range(7,17)])/10
            AvgDecadalEmsV[s,r,1]   = sum([Resultsheet.cell_value(i, 2*(s+1)) for i in range(17,27)])/10
            AvgDecadalEmsV[s,r,2]   = sum([Resultsheet.cell_value(i, 2*(s+1)) for i in range(27,37)])/10
            AvgDecadalEmsV[s,r,3]   = sum([Resultsheet.cell_value(i, 2*(s+1)) for i in range(37,47)])/10                    
                
    ASummaryV[0:3,:] = AnnEmsV2030[:,1,:].copy()
    ASummaryV[3:6,:] = AnnEmsV2050[:,1,:].copy()
    ASummaryV[6:9,:] = CumEmsV[:,1,:].copy()
    ASummaryV[9::,:] = CumEmsV2060[:,1,:].copy()
                        
    # Waterfall plot            
    MyColorCycle = pylab.cm.Set1(np.arange(0,1,0.14)) # select 12 colors from the 'Paired' color map.            
    
    Sector = SectorIn
    Title  = ['Cum_GHG_2016_2050','Cum_GHG_2040_2050','Annual_GHG_2050']
    Scens  = ['LED','SSP1','SSP2']
    LWE    = ['No RE','higher yields', 're-use/longer use','material subst.','light design','more intense use','All RE stratgs.']
    
    for nn in range(0,3):
        for m in range(0,NS): # SSP
            if nn == 0:
                Data = np.einsum('SE->ES',CumEmsV[:,1,:])
            if nn == 1:
                Data = np.einsum('SE->ES',10*AvgDecadalEmsV[:,:,2])
            if nn == 2:
                Data = np.einsum('SE->ES',AnnEmsV2050[:,1,:])
                
            inc = -100 * (Data[0,m] - Data[5,m])/Data[0,m]
        
            Left  = Data[0,m]
            Right = Data[5,m]
            # plot results
            bw = 0.5
            ga = 0.3
        
            fig  = plt.figure(figsize=(5,8))
            ax1  = plt.axes([0.08,0.08,0.85,0.9])
        
            ProxyHandlesList = []   # For legend     
            # plot bars
            ax1.fill_between([0,0+bw], [0,0],[Left,Left],linestyle = '--', facecolor =MyColorCycle[0,:], linewidth = 0.0)
            ax1.fill_between([1,1+bw], [Data[1,m],Data[1,m]],[Left,Left],linestyle = '--', facecolor =MyColorCycle[1,:], linewidth = 0.0)
            ax1.fill_between([2,2+bw], [Data[2,m],Data[2,m]],[Data[1,m],Data[1,m]],linestyle = '--', facecolor =MyColorCycle[2,:], linewidth = 0.0)
            ax1.fill_between([3,3+bw], [Data[3,m],Data[3,m]],[Data[2,m],Data[2,m]],linestyle = '--', facecolor =MyColorCycle[3,:], linewidth = 0.0)
            ax1.fill_between([4,4+bw], [Data[4,m],Data[4,m]],[Data[3,m],Data[3,m]],linestyle = '--', facecolor =MyColorCycle[4,:], linewidth = 0.0)
            ax1.fill_between([5,5+bw], [Data[5,m],Data[5,m]],[Data[4,m],Data[4,m]],linestyle = '--', facecolor =MyColorCycle[5,:], linewidth = 0.0)
            ax1.fill_between([6,6+bw], [0,0],[Data[5,m],Data[5,m]],linestyle = '--', facecolor =MyColorCycle[6,:], linewidth = 0.0)
            
            ProxyHandlesList.append(plt.Rectangle((0, 0), 1, 1, fc=MyColorCycle[0,:])) # create proxy artist for legend
            ProxyHandlesList.append(plt.Rectangle((0, 0), 1, 1, fc=MyColorCycle[1,:])) # create proxy artist for legend
            ProxyHandlesList.append(plt.Rectangle((0, 0), 1, 1, fc=MyColorCycle[2,:])) # create proxy artist for legend
            ProxyHandlesList.append(plt.Rectangle((0, 0), 1, 1, fc=MyColorCycle[3,:])) # create proxy artist for legend
            ProxyHandlesList.append(plt.Rectangle((0, 0), 1, 1, fc=MyColorCycle[4,:])) # create proxy artist for legend
            ProxyHandlesList.append(plt.Rectangle((0, 0), 1, 1, fc=MyColorCycle[5,:])) # create proxy artist for legend
            ProxyHandlesList.append(plt.Rectangle((0, 0), 1, 1, fc=MyColorCycle[6,:])) # create proxy artist for legend
            
            # plot lines:
            plt.plot([0,7.5],[Left,Left],linestyle = '-', linewidth = 0.5, color = 'k')
            plt.plot([1,2.5],[Data[1,m],Data[1,m]],linestyle = '-', linewidth = 0.5, color = 'k')
            plt.plot([2,3.5],[Data[2,m],Data[2,m]],linestyle = '-', linewidth = 0.5, color = 'k')
            plt.plot([3,4.5],[Data[3,m],Data[3,m]],linestyle = '-', linewidth = 0.5, color = 'k')
            plt.plot([4,5.5],[Data[4,m],Data[4,m]],linestyle = '-', linewidth = 0.5, color = 'k')
            plt.plot([5,6.5],[Data[5,m],Data[5,m]],linestyle = '-', linewidth = 0.5, color = 'k')
    
            plt.arrow(6.25, Data[5,m],0, Data[0,m]-Data[5,m], lw = 0.8, ls = '-', shape = 'full',
                  length_includes_head = True, head_width =0.1, head_length =0.01*Left, ec = 'k', fc = 'k')
            plt.arrow(6.25,Data[0,m],0,Data[5,m]-Data[0,m], lw = 0.8, ls = '-', shape = 'full',
                  length_includes_head = True, head_width =0.1, head_length =0.01*Left, ec = 'k', fc = 'k')
    
            # plot text and labels
            plt.text(5.00, 0.94 *Left, ("%3.0f" % inc) + ' %',fontsize=18,fontweight='bold')          
            plt.text(2.8, 0.94  *Right, Scens[m],fontsize=18,fontweight='bold') 
            plt.title('RE strategies and GHG emissions, ' + Sector[0] + '.', fontsize = 18)
            plt.ylabel(Title[nn] + ', Mt.', fontsize = 18)
            plt.xticks([0.25,1.25,2.25,3.25,4.25,5.25,6.25])
            plt.yticks(fontsize =18)
            ax1.set_xticklabels([], rotation =90, fontsize = 21, fontweight = 'normal')
            plt_lgd  = plt.legend(handles = ProxyHandlesList,labels = LWE,shadow = False, prop={'size':12},ncol=1, loc = 'upper right' ,bbox_to_anchor=(1.91, 1)) 
            #plt.axis([-0.2, 7.7, 0.9*Right, 1.02*Left])
            plt.axis([-0.2, 6.7, 0, 1.02*Left])
        
            plt.show()
            fig_name = Title[nn] + Region + '_ ' + Sector[0] + '_' + Scens[m] + '.png'
            fig.savefig(os.path.join(RECC_Paths.results_path,fig_name), dpi = 400, bbox_inches='tight')             
                
    
    ### Area plot RE
            
    NS = 3
    NR = 2
    NE = 6
    Nt = 45
    Nm = 6
    
    AnnEmsV = np.zeros((Nt,NS,NR,NE)) # SSP-Scenario x RCP scenario x RES scenario
    #AnnEmsB = np.zeros((Nt,NS,NC,NR)) # SSP-Scenario x RCP scenario x RES scenario
    MatEmsV = np.zeros((Nt,NS,NR,NE)) # SSP-Scenario x RCP scenario x RES scenario
    #MatEmsB = np.zeros((Nt,NS,NC,NR)) # SSP-Scenario x RCP scenario x RES scenario
    MatStocks = np.zeros((Nt,Nm,NS,NR,NE))
    
    for r in range(0,NE): # RE scenario
        Path = os.path.join(RECC_Paths.results_path,FolderlistB[r],'SysVar_TotalGHGFootprint.xls')
        Resultfile   = xlrd.open_workbook(Path)
        Resultsheet  = Resultfile.sheet_by_name('TotalGHGFootprint')
        Resultsheet1 = Resultfile.sheet_by_name('Cover')
        UUID         = Resultsheet1.cell_value(3,2)
        Resultfile2  = xlrd.open_workbook(os.path.join(RECC_Paths.results_path,FolderlistB[r],'ODYM_RECC_ModelResults_' + UUID + '.xlsx'))
        Resultsheet2 = Resultfile2.sheet_by_name('Model_Results')
        # Find the index for the recycling credit and others:
        rci = 1
        while True:
            if Resultsheet2.cell_value(rci, 0) == 'GHG emissions, recycling credits':
                break # that gives us the right index to read the recycling credit from the result table.
            rci += 1
        mci = 1
        while True:
            if Resultsheet2.cell_value(mci, 0) == 'GHG emissions, material cycle industries and their energy supply _3di_9di':
                break # that gives us the right index to read the recycling credit from the result table.
            mci += 1
            
        ms1 = 1
        while True:
            if Resultsheet2.cell_value(ms1, 0) == 'In-use stock, construction grade steel':
                break # that gives us the right index from the result table.
            ms1 += 1            
        ms2 = 1
        while True:
            if Resultsheet2.cell_value(ms2, 0) == 'In-use stock, automotive steel':
                break # that gives us the right index from the result table.
            ms2 += 1 
        ms3 = 1
        while True:
            if Resultsheet2.cell_value(ms3, 0) == 'In-use stock, stainless steel':
                break # that gives us the right index from the result table.
            ms3 += 1
        ms4 = 1
        while True:
            if Resultsheet2.cell_value(ms4, 0) == 'In-use stock, cast iron':
                break # that gives us the right index from the result table.
            ms4 += 1 
        ms5 = 1
        while True:
            if Resultsheet2.cell_value(ms5, 0) == 'In-use stock, wrought Al':
                break # that gives us the right index from the result table.
            ms5 += 1 
        ms6 = 1
        while True:
            if Resultsheet2.cell_value(ms6, 0) == 'In-use stock, cast Al':
                break # that gives us the right index from the result table.
            ms6 += 1 
        ms7 = 1
        while True:
            if Resultsheet2.cell_value(ms7, 0) == 'In-use stock, copper electric grade':
                break # that gives us the right index from the result table.
            ms7 += 1 
        ms8 = 1
        while True:
            if Resultsheet2.cell_value(ms8, 0) == 'In-use stock, plastics':
                break # that gives us the right index from the result table.
            ms8 += 1 
        ms9 = 1
        while True:
            if Resultsheet2.cell_value(ms9, 0) == 'In-use stock, cement':
                break # that gives us the right index from the result table.
            ms9 += 1 
        ms10 = 1
        while True:
            if Resultsheet2.cell_value(ms10, 0) == 'In-use stock, wood and wood products':
                break # that gives us the right index from the result table.
            ms10 += 1             
                    
        for s in range(0,NS): # SSP scenario
            for c in range(0,NR):
                for t in range(0,45): # time
                    AnnEmsV[t,s,c,r] = Resultsheet.cell_value(t +2, 1 + c + NR*s)
                    MatEmsV[t,s,c,r] = Resultsheet2.cell_value(mci+ 2*s +c,t+8)
        # Material results export
        for s in range(0,NS): # SSP scenario
            for c in range(0,NR):
                for t in range(0,35): # time until 2050 only!!! Cum. emissions until 2050.
                    MatCumEmsV[s,c,r] += Resultsheet2.cell_value(mci+ 2*s +c,t+8)
                for t in range(0,45): # time until 2060.
                    MatCumEmsV2060[s,c,r] += Resultsheet2.cell_value(mci+ 2*s +c,t+8)                    
                MatAnnEmsV2030[s,c,r]  = Resultsheet2.cell_value(mci+ 2*s +c,22)
                MatAnnEmsV2050[s,c,r]  = Resultsheet2.cell_value(mci+ 2*s +c,42)
            AvgDecadalMatEmsV[s,r,0]   = sum([Resultsheet2.cell_value(mci+ 2*s +1,t) for t in range(13,23)])/10
            AvgDecadalMatEmsV[s,r,1]   = sum([Resultsheet2.cell_value(mci+ 2*s +1,t) for t in range(23,33)])/10
            AvgDecadalMatEmsV[s,r,2]   = sum([Resultsheet2.cell_value(mci+ 2*s +1,t) for t in range(33,43)])/10
            AvgDecadalMatEmsV[s,r,3]   = sum([Resultsheet2.cell_value(mci+ 2*s +1,t) for t in range(43,53)])/10    
        # Material results export, including recycling credit
        for s in range(0,NS): # SSP scenario
            for c in range(0,NR):
                for t in range(0,35): # time until 2050 only!!! Cum. emissions until 2050.
                    MatCumEmsVC[s,c,r]+= Resultsheet2.cell_value(mci+ 2*s +c,t+8) + Resultsheet2.cell_value(rci+ 2*s +c,t+8)
                for t in range(0,45): # time until 2060.
                    MatCumEmsVC2060[s,c,r]+= Resultsheet2.cell_value(mci+ 2*s +c,t+8) + Resultsheet2.cell_value(rci+ 2*s +c,t+8)
                MatAnnEmsV2030C[s,c,r] = Resultsheet2.cell_value(mci+ 2*s +c,22)  + Resultsheet2.cell_value(rci+ 2*s +c,22)
                MatAnnEmsV2050C[s,c,r] = Resultsheet2.cell_value(mci+ 2*s +c,42)  + Resultsheet2.cell_value(rci+ 2*s +c,42)
            AvgDecadalMatEmsVC[s,r,0]  = sum([Resultsheet2.cell_value(mci+ 2*s +1,t) for t in range(13,23)])/10 + sum([Resultsheet2.cell_value(rci+ 2*s +1,t) for t in range(13,23)])/10
            AvgDecadalMatEmsVC[s,r,1]  = sum([Resultsheet2.cell_value(mci+ 2*s +1,t) for t in range(23,33)])/10 + sum([Resultsheet2.cell_value(rci+ 2*s +1,t) for t in range(23,33)])/10
            AvgDecadalMatEmsVC[s,r,2]  = sum([Resultsheet2.cell_value(mci+ 2*s +1,t) for t in range(33,43)])/10 + sum([Resultsheet2.cell_value(rci+ 2*s +1,t) for t in range(33,43)])/10
            AvgDecadalMatEmsVC[s,r,3]  = sum([Resultsheet2.cell_value(mci+ 2*s +1,t) for t in range(43,53)])/10 + sum([Resultsheet2.cell_value(rci+ 2*s +1,t) for t in range(43,53)])/10                       
    
        # Material stocks export
        for s in range(0,NS): # SSP scenario
            for c in range(0,NR):
                for t in range(0,45): # time until 2060
                    MatStocks[t,0,s,c,r] = Resultsheet2.cell_value(ms1+ 2*s +c,t+8) + Resultsheet2.cell_value(ms2+ 2*s +c,t+8) + Resultsheet2.cell_value(ms3+ 2*s +c,t+8) + Resultsheet2.cell_value(ms4+ 2*s +c,t+8)
                    MatStocks[t,1,s,c,r] = Resultsheet2.cell_value(ms5+ 2*s +c,t+8) + Resultsheet2.cell_value(ms6+ 2*s +c,t+8)
                    MatStocks[t,2,s,c,r] = Resultsheet2.cell_value(ms7+ 2*s +c,t+8)
                    MatStocks[t,3,s,c,r] = Resultsheet2.cell_value(ms9+ 2*s +c,t+8)
                    MatStocks[t,4,s,c,r] = Resultsheet2.cell_value(ms8+ 2*s +c,t+8)
                    MatStocks[t,5,s,c,r] = Resultsheet2.cell_value(ms10+ 2*s +c,t+8)    
    
    MatSummaryV[0:3,:] = MatAnnEmsV2030[:,1,:].copy() # RCP is fixed: RCP2.6
    MatSummaryV[3:6,:] = MatAnnEmsV2050[:,1,:].copy() # RCP is fixed: RCP2.6
    MatSummaryV[6:9,:] = MatCumEmsV[:,1,:].copy()     # RCP is fixed: RCP2.6                    
    MatSummaryV[9::,:] = MatCumEmsV2060[:,1,:].copy() # RCP is fixed: RCP2.6                    
    
    MatSummaryVC[0:3,:]= MatAnnEmsV2030C[:,1,:].copy() # RCP is fixed: RCP2.6
    MatSummaryVC[3:6,:]= MatAnnEmsV2050C[:,1,:].copy() # RCP is fixed: RCP2.6
    MatSummaryVC[6:9,:]= MatCumEmsVC[:,1,:].copy()     # RCP is fixed: RCP2.6
    MatSummaryVC[9::,:]= MatCumEmsVC2060[:,1,:].copy() # RCP is fixed: RCP2.6
    
    # Area plot, stacked, GHG emissions, system
    MyColorCycle = pylab.cm.Set1(np.arange(0,1,0.1)) # select colors from the 'Paired' color map.            
    grey0_9      = np.array([0.9,0.9,0.9,1])
    
    Title      = ['GHG_System_RES_stack','GHG_material_cycles_RES_stack']
    Sector     = SectorIn
    Scens      = ['LED','SSP1','SSP2']
    LWE_area   = ['higher yields', 're-use & LTE','material subst.','light design','more intense use']     
    
    for nn in range(0,len(Title)):
        #mS = 1
        #mR = 1
        mRCP = 1 # select RCP2.6, which has full implementation of RE strategies by 2050.
        for mS in range(0,NS): # SSP
            for mR in range(0,1): # Blds
                
                if nn == 0 and mR == 0:
                    Data = AnnEmsV[:,mS,mRCP,:]
                
                if nn == 1 and mR == 0:
                    Data = MatEmsV[:,mS,mRCP,:]                
                
                fig  = plt.figure(figsize=(8,5))
                ax1  = plt.axes([0.08,0.08,0.85,0.9])
                
                ProxyHandlesList = []   # For legend     
                
                # plot area
                ax1.fill_between(np.arange(2016,2061),np.zeros((Nt)), Data[:,-1], linestyle = '-', facecolor = grey0_9, linewidth = 1.0, alpha=0.5)
                ProxyHandlesList.append(plt.Rectangle((0, 0), 1, 1, fc=grey0_9)) # create proxy artist for legend
                for m in range(5,0,-1):
                    ax1.fill_between(np.arange(2016,2061),Data[:,m], Data[:,m-1], linestyle = '-', facecolor = MyColorCycle[m,:], linewidth = 1.0, alpha=0.5)
                    ProxyHandlesList.append(plt.Rectangle((0, 0), 1, 1, fc=MyColorCycle[m,:], alpha=0.75)) # create proxy artist for legend
                    ax1.plot(np.arange(2016,2061),Data[:,m],linestyle = '--', color = MyColorCycle[m,:], linewidth = 1.1,)                
                ax1.plot(np.arange(2016,2061),Data[:,0],linestyle = '--', color = 'k', linewidth = 1.1,)               
                #plt.text(Data[m,:].min()*0.55, 7.8, 'Baseline: ' + ("%3.0f" % Base[m]) + ' Mt/yr.',fontsize=14,fontweight='bold')
                plt.text(2027,Data[m,:].max()*1.02, 'Colors may deviate from legend colors due to overlap of RES wedges.',fontsize=8.5,fontweight='bold')
                
                plt.title(Title[nn] + ' \n' + Region + ', ' + Sector[mR] + ', ' + Scens[mS] + '.', fontsize = 18)
                plt.ylabel('Mt of CO2-eq.', fontsize = 18)
                plt.xlabel('Year', fontsize = 18)
                plt.xticks(fontsize=18)
                plt.yticks(fontsize=18)
                if mR == 0: # buildings, upper right
                    plt_lgd  = plt.legend(handles = reversed(ProxyHandlesList),labels = LWE_area, shadow = False, prop={'size':12},ncol=1, loc = 'upper right')# ,bbox_to_anchor=(1.91, 1)) 
                ax1.set_xlim([2015, 2061])
                
                plt.show()
                fig_name = Title[nn] + '_' + Region + '_' + Sector[mR] + '_' + Scens[mS] + '.png'
                fig.savefig(os.path.join(RECC_Paths.results_path,fig_name), dpi = 400, bbox_inches='tight')             
           
    ##### line Plot overview of primary steel and steel recycling
    
    # Select scenario list: same as for bar chart above
    # E.g. for the USA, run code lines 41 to 59.
    
    MyColorCycle = pylab.cm.Paired(np.arange(0,1,0.2))
    #linewidth = [1.2,2.4,1.2,1.2,1.2]
    linewidth  = [1.2,2,1.2]
    linewidth2 = [1.2,2,1.2]
    
    ColorOrder         = [1,0,3]
            
    NS = 3
    NR = 2
    NE = 6
    Nt = 45
    
    # Primary steel
    AnnEmsV_PrimarySteel   = np.zeros((Nt,NS,NR,NE)) # SSP-Scenario x RCP scenario x RES scenario
    AnnEmsV_SecondarySteel = np.zeros((Nt,NS,NR,NE)) # SSP-Scenario x RCP scenario x RES scenario
    
    for r in range(0,NE): # RE scenario
        Path         = os.path.join(RECC_Paths.results_path,FolderlistB[r],'SysVar_TotalGHGFootprint.xls')
        Resultfile1  = xlrd.open_workbook(Path)
        Resultsheet1 = Resultfile1.sheet_by_name('Cover')
        UUID         = Resultsheet1.cell_value(3,2)
        Resultfile2  = xlrd.open_workbook(os.path.join(RECC_Paths.results_path,FolderlistB[r],'ODYM_RECC_ModelResults_' + UUID + '.xlsx'))
        Resultsheet2 = Resultfile2.sheet_by_name('Model_Results')
        # Find the index for materials
        pps = 1
        while True:
            if Resultsheet2.cell_value(pps, 0) == 'Primary steel production':
                break # that gives us the right index to read the recycling credit from the result table.
            pps += 1
        sps = 1
        while True:
            if Resultsheet2.cell_value(sps, 0) == 'Secondary steel':
                break # that gives us the right index to read the recycling credit from the result table.
            sps += 1
            
        for s in range(0,NS): # SSP scenario
            for c in range(0,NR):
                for t in range(0,45): # timeAnnEmsV_SecondarySteel[t,s,c,r] = Resultsheet2.cell_value(151+ 2*s +c,t+8)
                    AnnEmsV_PrimarySteel[t,s,c,r] = Resultsheet2.cell_value(pps + 2*s +c,t+8)
                    AnnEmsV_SecondarySteel[t,s,c,r] = Resultsheet2.cell_value(sps + 2*s +c,t+8)
                    
    Title      = ['primary_steel','secondary_steel']            
    Sector     = SectorIn
    ScensL     = ['SSP2, no REFs','SSP2, full REF spectrum','SSP1, no REFs','SSP1, full REF spectrum','LED, no REFs','LED, full REF spectrum']
    
    #mS = 1
    #mR = 1
    for nn in range(0,2):
        mRCP = 1 # select RCP2.6, which has full implementation of RE strategies by 2050.
        for mR in range(0,1): # Veh/Buildings
            
            if nn == 0:
                Data = AnnEmsV_PrimarySteel[:,:,mRCP,:]
            if nn == 1:
                Data = AnnEmsV_SecondarySteel[:,:,mRCP,:]
        
        
            fig  = plt.figure(figsize=(8,5))
            ax1  = plt.axes([0.08,0.08,0.85,0.9])
            
            ProxyHandlesList = []   # For legend     
            
            for mS in range(NS-1,-1,-1):
                ax1.plot(np.arange(2016,2061), Data[:,mS,0],  linewidth = linewidth[mS],  linestyle = '-',  color = MyColorCycle[ColorOrder[mS],:])
                #ProxyHandlesList.append(plt.line((0, 0), 1, 1, fc=MyColorCycle[m,:]))
                ax1.plot(np.arange(2016,2061), Data[:,mS,-1], linewidth = linewidth2[mS], linestyle = '--', color = MyColorCycle[ColorOrder[mS],:])
                #ProxyHandlesList.append(plt.Rectangle((0, 0), 1, 1, fc=MyColorCycle[m,:]))     
            plt_lgd  = plt.legend(ScensL,shadow = False, prop={'size':12}, loc = 'upper left',bbox_to_anchor=(1.05, 1))    
            plt.ylabel('Mt/yr.', fontsize = 18) 
            plt.xlabel('year', fontsize = 18)         
            plt.title(Title[nn] + ', by socio-economic scenario, \n' + Region + ', ' + Sector[mR] + '.', fontsize = 18)
            plt.xticks(fontsize=18)
            plt.yticks(fontsize=18)
            ax1.set_xlim([2015, 2061])
            plt.gca().set_ylim(bottom=0)
            
            plt.show()
            fig_name = Title[nn] + '_' + Region + '_ ' + Sector[mR] + '.png'
            fig.savefig(os.path.join(RECC_Paths.results_path,fig_name), dpi = 400, bbox_inches='tight')             
        
    
    return ASummaryV, AvgDecadalEmsV, MatSummaryV, AvgDecadalMatEmsV, MatSummaryVC, AvgDecadalMatEmsVC, CumEmsV, AnnEmsV2050, MatStocks
Example #47
0
import xlrd
dict = {}
gentype = ''
number = []
beizhu = {}
data = []
ll = 0
ExcelFile = xlrd.open_workbook(r'Report_Haplotype_GT.2.xls', formatting_info=1)
sheet = ExcelFile.sheet_by_name('Sheet1')
num = []
mynum = []
col_index = []
mf_list = []
lie = {}
bianhao = []
margin = []
amend = {}
base_ = []
genum = []
getk = []
tkv = []
name = []
name1 = []
for row in range(0, sheet.nrows):
    rows = sheet.row_values(row)
    if row == 0:
        for col in range(0, sheet.ncols):
            name.append(rows[col])
    elif row == 1:
        for col in range(3, sheet.ncols):
            if rows[col] != '':
        scp.put(source_file,
                remote_path='/vmfs/volumes/datastore1/{0}'.format(vm_name))
        print(" DEBUG: ... Register the virtual machine {}".format(vm_name))
        ssh.exec_command(register_command)

    ssh.close()


print("The script working directory is {}".format(os.path.dirname(__file__)))
script_dir = os.path.dirname(__file__)

vmx_env = Environment(loader=FileSystemLoader(script_dir),
                      trim_blocks=True,
                      lstrip_blocks=True)

workbook = xlrd.open_workbook(os.path.join(script_dir, "vm_inventory.xlsx"))
sheet = workbook.sheet_by_index(0)
print("The number of rows inside the Excel sheet is {}".format(sheet.nrows))
print("The number of columns inside the Excel sheet is {}".format(sheet.ncols))

vmx_data = {}

for row in range(1, sheet.nrows):
    vm_name = sheet.row(row)[0].value
    vm_memory_size = int(sheet.row(row)[1].value)
    vm_cpu = int(sheet.row(row)[2].value)
    cpu_per_socket = int(sheet.row(row)[3].value)
    vm_hdd_size = int(sheet.row(row)[4].value)
    vm_guest_os = sheet.row(row)[5].value
    vm_network1 = sheet.row(row)[6].value
Example #49
0
def open_excel(file='file.xls'):
    try:
        data = xlrd.open_workbook(file)
        return data
    except Exception as e:
        print(str(e))
Example #50
0
def main ():
    
    #lecture du fichier configuration
    parser.read('Configuration.cfg')
    out = parser.get('sortie', 'out')#Type de sortie
    fichier = parser.get('Doc', 'fichier')#adresse de fichier excel
    
    
    
    Ma_date = date.today()#récuperation de la date du system
    #print Ma_date
    Mon_heure = time.strftime("%H%M", time.localtime(time.time()))#récuperation de l'heure du system
    #print Mon_heure
    Jour = calendar.day_name[Ma_date.weekday()]#recuperation du jour du system
    #print jour
    
    
    
    #ouverture du classeur
    classeur = xlrd.open_workbook(fichier)
    #Récuperation du nom de toutes les feuilles sous forme de liste
    nom_des_feuilles = classeur.sheet_names()
    #Récupération de la première feuille
    feuille = classeur.sheet_by_name(nom_des_feuilles[0])
    
  
    if Jour == 'Monday':
        if (feuille.cell_value(1,1) <= Mon_heure) and (feuille.cell_value(2,1) > Mon_heure):
            sortie = True
        else:
            sortie = False
            
    if Jour == 'Tuesday':
        if (feuille.cell_value(1,2) <= Mon_heure) and (feuille.cell_value(2,2) > Mon_heure):
            sortie = True
        else:
            sortie = False
    
    if Jour == 'Wednesday':
        if (feuille.cell_value(1,3) <= Mon_heure) and (feuille.cell_value(2,3) > Mon_heure):
            sortie = True
        else:
            sortie = False
    
    if Jour == 'Thursday':
        if (feuille.cell_value(1,4) <= Mon_heure) and (feuille.cell_value(2,4) > Mon_heure):
            sortie = True
        else:
            sortie = False
    
    if Jour == 'Friday':
        if (feuille.cell_value(1,5) <= Mon_heure) and (feuille.cell_value(2,5) > Mon_heure):
            sortie = True
        else:
            sortie = False
    
    if Jour == 'Saturday':
        if (feuille.cell_value(1,6) <= Mon_heure) and (feuille.cell_value(2,6) > Mon_heure):
            sortie = True
        else:
            sortie = False
    
    if Jour == 'Sunday':
        if (feuille.cell_value(1,7) <= Mon_heure) and (feuille.cell_value(2,7) > Mon_heure):
            sortie = True
        else:
            sortie = False
    
    print sortie
    
    if sortie == True and out == '433':
        print "433"
        sender.sendDecimal(code_on, 24)
    else:
        sender.sendDecimal(code_off, 24)
        
        
    if sortie == True and out == 'GPIO':
        print "GPIO"
        GPIO.output(25,GPIO.HIGH)
    else:
        GPIO.output(25,GPIO.LOW)
Example #51
0
 def read_rows(self, excel_path, sheet_index=0):
     data = xlrd.open_workbook(excel_path)
     table = data.sheet_by_index(sheet_index)
     row_count = table.nrows
     for i in xrange(row_count):
         yield table.row_values(i)
Example #52
0
# DOCS - https://python-googlesearch.readthedocs.io/en/latest/
# Got it from here - https://www.geeksforgeeks.org/performing-google-search-using-python-code/

import xlrd
import time
import urllib.error

# imports module for google searching 
try: 
	from googlesearch import search
except ImportError: 
	print("No module named 'google' found") 

#to read input workbook
book = xlrd.open_workbook('/Users/tavasyaagarwal/desktop/growthhacking.xlsx')
sheet = book.sheet_by_name('Outlets')

inputdata = []
outputdata = []

for i in range(sheet.nrows):
	inputdata.append(sheet.cell_value(i, 1))

#removing the first two cell values -- they aren't URLs
inputdata.remove('')
inputdata.remove('Outlets we published on')
#print(data)

#print (len(data))

# # to search 
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import xlrd
import time
import random
option = webdriver.ChromeOptions()
#设置为开发者模式,避免被识别
option.add_experimental_option('excludeSwitches', ['enable-automation'])
option.add_argument(
    r'--user-data-dir=C:\Users\81316\AppData\Local\Google\Chrome\linjj\Default')  # 加载前面获取的 个人资料路径
driver = webdriver.Chrome(options=option)
# driver.maximize_window()
data = xlrd.open_workbook('user.xlsx')
table = data.sheets()[0]
links = table.col_values(int(1))
for url in links[1:]:
    print(url)
    try:
        driver.get(url)
        time.sleep(1)
    except Exception as e:
        print(e)
    wait = WebDriverWait(driver, 3, 0.2)  # 设置等待时间
    try:
        driver.find_element_by_xpath('//*[@class="h-action"]/span[@class="h-f-btn h-follow"]').click()
    except Exception as e:
        print(e)
driver.quit()
def get_failed_list(links_Apks, downloadPath):
	for value in list(links_Apks.values()):
		for dirname in os.listdir(downloadPath):
			if dirname == value:
				del links_Apks[list(links_Apks.keys())[list(links_Apks.values()).index(value)]]
	failed_list = list(links_Apks.keys())
	return failed_list


# 程序入口
if __name__ == '__main__':

	# 打开excel中更新链接页
	file_updatelink = 'sys_config.xlsx'
	updateExcel = xlrd.open_workbook(filename=file_updatelink)
	updateSheet = updateExcel.sheet_by_index(0)
	# 链接位于第二列,渠道号位于第四列
	testlink_col = updateSheet.col(2)
	testlink_col_channels = updateSheet.col(4)
	downloadPath = r"/code/update"
	# 每个版本需要改的是这个地址——可以通过最后修改时间来自动获取
	publishPath = r'/mnt/webDevice/JellyBlast/publish/android/China/6.9.1'
	# 后续把这里改一下——自动生成
	Net_downloadPath = r'/mnt/webDevice/JellyBlast/01-QA-Jelly/4-Testing_report/update_prompt_downloaded_6.9.1'
	channelsDict = {
	'162' : 'taptap',
	'109' : 'gamedl.gionee',
	'104' : 'qq_browser',
	'139' : 'app.2345',
	'24' : 'baidu_dk',
        new_row.append(sheet.cell(row_index, col_index).value)
    row_list.append(new_row)


last_sunday = datetime.date.today() - datetime.timedelta(
    datetime.date.today().weekday() + 1)
date_str = last_sunday.strftime("%m-%d-%Y")
url_str = 'http://www.dailyfx.com/files/Calendar-' + date_str + '.xls'
response = requests.get(url_str,
                        stream=True,
                        headers={'User-agent': 'Mozilla/5.0'})
if response.status_code != 200:
    raise RuntimeError("Could not download the file: " + url_str)
with open('Calendar-' + date_str + '.csv', "wb") as f:
    data = response.raw.read()
    book = xlrd.open_workbook(file_contents=data, formatting_info=True)
    sheet = book.sheet_by_index(0)
    keys = [
        sheet.cell(0, col_index).value for col_index in xrange(sheet.ncols)
    ]
    row_list = []
    has_started = False
    new_date = None
    for row_index in xrange(0, sheet.nrows):
        if sheet.cell(row_index, 0).value == 'Date':
            has_started = True
            append_row(row_list, sheet, row_index, 'Date')
        elif has_started:
            cur_date = sheet.cell(
                row_index,
                0).value if sheet.cell(row_index, 0).value != '' else None
Example #56
0
def read_bea(asset_tree):
    # Opening BEA's excel file on depreciable assets by industry:
    bea_book = xlrd.open_workbook(_BEA_ASSET_PATH)
    sht_names = bea_book.sheet_names()
    num_shts = bea_book.nsheets
    # Opening "readme" sheet:
    try:
        bea_readme = bea_book.sheet_by_name("readme")
    except xlrd.XLRDError:
        bea_readme = bea_book.sheet_by_index(0)
    # Finding relevant positions in the readme sheet:
    sht_pos = naics.search_ws(bea_readme, "Industry Title", 25, False)
    if (sht_pos == [-1, -1]):
        sht_pos = naics.search_ws(bea_readme, "bea code", 25, False, [0, 0],
                                  True)
        sht_pos[1] = sht_pos[1] - 1
    if (sht_pos == [-1, -1]):
        print "Error in reading BEA fixed asset \"readme\" sheet."
        return None
    cur_row = sht_pos[0] + 1
    cur_col = sht_pos[1]
    # Finding the number of industries (includes those without bea codes):
    number_of_industries = 0
    while cur_row < bea_readme.nrows:
        #if(str(bea_readme.cell_value(cur_row, cur_col)) != ""):
        if (unicode(bea_readme.cell_value(cur_row, cur_col)).encode('utf8') !=
                ""):
            # for rownum in xrange(sh.nrows):
            #wr.writerow([unicode(c).encode('utf8') for c in sh.row_values(rownum)])
            number_of_industries += 1
        cur_row += 1
    # Making a list of BEA codes based on the names of the worksheets:
    bea_codes1 = np.zeros(num_shts - 1, dtype=object)
    for index in xrange(1, num_shts):
        bea_codes1[index - 1] = str(sht_names[index])
    # Making a list of BEA codes based on info in the readme sheet:
    code_index = 0
    cur_row = sht_pos[0] + 1
    cur_col = sht_pos[1]
    bea_codes2 = np.zeros(number_of_industries, dtype=object)
    while cur_row < bea_readme.nrows:
        if (unicode(bea_readme.cell_value(cur_row, cur_col)).encode('utf8') !=
                ""):
            cur_code = str(bea_readme.cell_value(cur_row, cur_col + 1))
            cur_code = cur_code.replace("\xa0", " ").strip()
            bea_codes2[code_index] = cur_code
            code_index += 1
        cur_row += 1
    # Reading in a list of the assets in the BEA file:
    list_file = os.path.join(_BEA_DIR, "detailnonres_list.csv")
    asset_list = pd.read_csv(list_file)
    for i in xrange(0, asset_list.shape[0]):
        asset_list.iloc[i, 0] = asset_list.iloc[i, 0].replace("\xa0", " ")
        asset_list.iloc[i, 0] = asset_list.iloc[i, 0].strip()
    # Reading in the corresponding naics codes:
    naics_file = os.path.join(_BEA_DIR, "detailnonres_naics.csv")
    naics_cross = pd.read_csv(naics_file).replace("\xa0", " ")
    naics_inds = naics_cross["Industry"]
    for i in xrange(0, naics_cross.shape[0]):
        naics_inds[i] = naics_inds[i].replace("\xa0", " ").strip()
    # Creating a chart cross-referencing industry names, BEA and NAICS codes.
    chart_cols = ["Industry", "BEA Code", "NAICS Code"]
    bea_chart = pd.DataFrame(np.zeros(shape=(num_shts - 2, 3), dtype=object),
                             columns=chart_cols)
    bea_inds = bea_chart["Industry"]
    bea_naics = bea_chart["NAICS Code"]
    cur_row = sht_pos[0] + 1
    cur_col = sht_pos[1]
    num_naics = naics_cross.shape[0]
    # Filling chart with naics codes that are in both lists and the crosswalk:
    naics_counter = 0
    for i in range(0, num_shts - 2):
        for cur_row in range(sht_pos[0] + 1, bea_readme.nrows):
            bea_code = unicode(bea_readme.cell_value(cur_row, cur_col +
                                                     1)).encode('utf8')
            if (str(bea_codes1[i]) == bea_code):
                bea_ind = unicode(bea_readme.cell_value(
                    cur_row, cur_col)).encode('utf8')
                bea_ind = bea_ind.replace('\xa0', ' ').strip()
                bea_inds[i] = bea_ind
                bea_chart["BEA Code"][i] = bea_code
                for k in xrange(0, num_naics):
                    naics_counter = (naics_counter + 1) % num_naics
                    if (naics_inds[naics_counter] == bea_chart["Industry"][i]):
                        bea_naics[i] = naics_cross["NAICS"][naics_counter]
                        break
                break
            # If they match except one has ".0" at the end:
            elif (str(bea_codes1[i]) == str(
                    bea_readme.cell_value(cur_row, cur_col + 1))[:-2]):
                bea_ind = unicode(bea_readme.cell_value(
                    cur_row, cur_col)).encode('utf8')
                bea_ind = bea_ind.replace('\xa0', ' ').strip()
                bea_chart["Industry"][i] = bea_ind
                cur_code = str(bea_readme.cell_value(cur_row,
                                                     cur_col + 1))[:-2]
                bea_chart["BEA Code"][i] = cur_code
                for k in xrange(0, num_naics):
                    naics_counter = (naics_counter + 1) % num_naics
                    if (naics_inds[naics_counter] == bea_inds[i]):
                        bea_naics[i] = naics_cross["NAICS"][naics_counter]
                        break
                break
    # Initializing the table of assets:
    #cur_sht = bea_book.sheet_by_name(bea_chart["BEA Code"][0])
    #sht_pos = naics.search_ws(cur_sht, "asset codes", 25, False)
    bea_table = pd.DataFrame(np.zeros(
        (asset_list.shape[0], bea_chart.shape[0])),
                             columns=bea_chart["BEA Code"])
    # For each industry, calculating
    for i in bea_chart["BEA Code"]:
        cur_sht = bea_book.sheet_by_name(i)
        sht_pos = naics.search_ws(cur_sht, "asset codes", 25, False)
        for j in xrange(
                0, len(asset_list)):  #xrange(sht_pos[0]+2, cur_sht.nrows):
            cur_asset = asset_list.iloc[j, 0]
            for k in xrange(sht_pos[0] + 2, cur_sht.nrows):
                cur_cell = unicode(cur_sht.cell_value(k, sht_pos[1] +
                                                      1)).encode('utf8')
                cur_cell = cur_cell.replace("\xa0", " ").strip()
                if (cur_asset == cur_cell):
                    bea_table[i][j] = float(
                        cur_sht.cell_value(k, cur_sht.ncols - 1))
        #bea_table[i] = np.array(cur_sht.col_values(cur_sht.ncols-1, sht_pos[0]+2, cur_sht.nrows))
    # The dollar amounts are in millions:
    bea_table = bea_table.convert_objects(convert_numeric=True).fillna(0)
    bea_table = bea_table * _BEA_IN_FILE_FCTR
    # Initialize tree for assets data:
    fixed_asset_tree = naics.generate_tree()
    for i in xrange(0, len(fixed_asset_tree.enum_inds)):
        fixed_asset_tree.enum_inds[i].data.append(
            ("All",
             pd.DataFrame(np.zeros((1, asset_list.shape[0])),
                          columns=asset_list.iloc[:, 0])))
        fixed_asset_tree.enum_inds[i].data.append(
            ("Corp",
             pd.DataFrame(np.zeros((1, asset_list.shape[0])),
                          columns=asset_list.iloc[:, 0])))
        fixed_asset_tree.enum_inds[i].data.append(
            ("Non-Corp",
             pd.DataFrame(np.zeros((1, asset_list.shape[0])),
                          columns=asset_list.iloc[:, 0])))
    # Fill in data from BEA's fixed asset table:
    enum_index = len(asset_tree.enum_inds) - 1
    for i in xrange(0, bea_table.shape[1]):
        cur_codes = str(bea_chart["NAICS Code"][i]).split(".")
        tot_share = 0
        all_proportions = naics.get_proportions(cur_codes, asset_tree,
                                                "FA").iloc[1, :]
        corp_proportions = naics.get_proportions(cur_codes, asset_tree, "FA",
                                                 _CORP_NMS).iloc[1, :]
        non_corp_proportions = naics.get_proportions(cur_codes, asset_tree,
                                                     "FA",
                                                     _NCORP_NMS).iloc[1, :]
        for code_index in xrange(0, len(cur_codes)):
            for j in xrange(0, len(fixed_asset_tree.enum_inds)):
                enum_index = (enum_index + 1) % len(fixed_asset_tree.enum_inds)
                out_dfs = asset_tree.enum_inds[enum_index].data.dfs
                if (sum(out_dfs["FA"].iloc[0, :]) == 0):
                    continue
                all_ratio = 1.0
                corp_ratio = 0.0
                non_corp_ratio = 0.0
                for category in _CORP_NMS:
                    corp_ratio += (out_dfs["FA"][category][0] /
                                   sum(out_dfs["FA"].iloc[0, :]))
                for category in _NCORP_NMS:
                    non_corp_ratio += (out_dfs["FA"][category][0] /
                                       sum(out_dfs["FA"].iloc[0, :]))
                cur_data = fixed_asset_tree.enum_inds[enum_index].data
                ind_codes = cur_data.dfs["Codes:"].iloc[:, 0]
                share = naics.compare_codes(cur_codes, ind_codes)
                tot_share += share
                if (share == 0):
                    continue
                num_assets = fixed_asset_tree.enum_inds[0].data.dfs[
                    "All"].shape[1]
                for k in xrange(0, num_assets):
                    cur_data.dfs["All"].iloc[0,
                                             k] = (bea_table.iloc[k, i] *
                                                   all_ratio *
                                                   all_proportions[code_index])
                    cur_data.dfs["Corp"].iloc[0, k] = (
                        bea_table.iloc[k, i] * corp_ratio *
                        corp_proportions[code_index])
                    cur_data.dfs["Non-Corp"].iloc[0, k] = (
                        bea_table.iloc[k, i] * non_corp_ratio *
                        non_corp_proportions[code_index])
                break
            if (tot_share == 1):
                break
    #
    naics.pop_back(fixed_asset_tree, ["All", "Corp", "Non-Corp"])
    naics.pop_forward(tree=fixed_asset_tree,
                      df_list=["All"],
                      blueprint="FA",
                      blue_tree=asset_tree)
    naics.pop_forward(tree=fixed_asset_tree,
                      df_list=["Corp"],
                      blueprint="FA",
                      blue_tree=asset_tree,
                      sub_print=_CORP_NMS)
    naics.pop_forward(tree=fixed_asset_tree,
                      df_list=["Non-Corp"],
                      blueprint="FA",
                      blue_tree=asset_tree,
                      sub_print=_NCORP_NMS)
    return fixed_asset_tree
    def read_from_excel(file, proj_keys, serv_keys):
        print ("reading from " + str(file))

        rb = xlrd.open_workbook(file)
        sheet = rb.sheet_by_index(0)

        proj_data = {}
        serv_data = {}        

        error = False

        #get data from the request forms
        for search in proj_keys:
            key_row = -1
            key_col = -1

            for r in range(sheet.nrows):
                for c in range(sheet.ncols):
                    cell = sheet.cell(r, c)
                    if cell.value == search + ":":
                        key_row = r
                        key_col = c
                        break

            #If the request form being searched does not contain the Project keys,
            #print an error message and exit the loop
            if key_row == -1 or key_col == -1:
                   print ("ERROR: " + file + " does not contain the key " + search)
                   error = True
                   break
            
            data = sheet.cell(key_row, key_col + 6).value

            #Make sure project information in the request form are filled out
            if data == None:
                    print("ERROR: " + file + " is missing Project information missing for " + search)
                    error = True
                    break
            else:
                proj_data[search] = data
    
        for search in serv_keys:
            key_row = -1
            key_col = -1

            if error:
                break

            for r in range(sheet.nrows):
                for c in range(sheet.ncols):
                    cell = sheet.cell(r, c)
                    if str(cell.value).strip() == search:
                        key_row = r
                        key_col = c
                        break

            #If the request form being searched does not contain the Service keys,
            #print an error message and exit the loop
            if key_row == -1 or key_col == -1:
                   print ("ERROR: " + file + " does not contain the key " + search)
                   error = True
                   break

            service_values = sheet.cell(key_row, key_col + 7)

            if service_values.value == "Not to be requested":
                data = None
            else:
                data = service_values.value
            serv_data[search] = data

        #If request form is incomplete or does not contain the required keys, return empty dictionaries
        if error:
            proj_data = {}
            serv_data = {}

        return proj_data, serv_data
Example #58
0
 def openXLbook(self, filename):
     """Open Excel file and return list of sheetnames"""
     xl_workbook = xlrd.open_workbook(filename)
     return xl_workbook
import xlrd
d = {}
f = open("C:\\Users\\MY-PC\\Desktop\\TollGateJava\\NEWFILE.txt", "w")
wb = xlrd.open_workbook('D:\\New folder\\Book1.xlsx')
sh = wb.sheet_by_index(3)
x= len(sh.col_values(0))

for i in range(x):
    f.write("hash.put(\"{}\",\"{}\");\n".format(  sh.cell(i,0).value,sh.cell(i,1).value))



#print("hash.put("+"\""+start+"-"+destination+"\""+':'+str(value_Price[j+1])+")\n")
f.close()
	def top_10(self):
		path = '/home/tex/Documents/IR/Dataset_AS3/'
		filename = 'similarity5000.csv'
		with open(path+filename, 'r') as f:
		  reader = csv.reader(f)
		  your_list = list(reader)

		sim = []
		for i in range(len(your_list)):
		    sim.append([])
		    for j in range(len(your_list[i])):
		        if your_list[i][j]=='':
		            pass
		        else:
		            sim[i].append(float(your_list[i][j]))

		filename3 = '5000dataset.xls'

		xlsfile = xlrd.open_workbook(path+filename3, on_demand= True)
		xlsfiledata = xlsfile.sheet_by_index(0)

		data=[]
		for i in range(0,5000):
		    data.append([])
		    for j in range(1,101):
		        data[i].append(xlsfiledata.cell(i,j).value)

		error1=0
		error2=0
		for i in range(0,5000):
			#print(i)
			sorted_row = []
			for j in range(0,100):
				sorted_row.append([])
				sorted_row[j].append(data[i][j])
				sorted_row[j].append(j)

			sorted_row.sort(key=lambda tup: tup[0])
			#print(sorted_row)
			top_10_index = []
			count=0
			for j in range(0,100):
				if(sorted_row[100-j-1][0]!=99):
					top_10_index.append(sorted_row[100-j-1][1])
					count=count+1
					if count>=10:
						break

			#print(top_10_index)
			for j in range(0,len(top_10_index)):
				sum1=0
				sum2=0
				for k in range(0,5000):
					if k==i:
						pass
					else:
						if i<k and sim[i][k-i]>0 and data[k][top_10_index[j]]!=99:
							sum1 = sum1 + (float(data[k][top_10_index[j]])*float(sim[i][k-i]))
							sum2 = sum2 + float(sim[i][k-i])
						elif i>k and sim[i-k][k]>0 and data[k][top_10_index[j]]!=99:
							sum1 = sum1 + (float(data[k][top_10_index[j]])*float(sim[i-k][k]))
							sum2 = sum2 + float(sim[i-k][k])
				value = sum1/sum2
				error1 = error1 + ((value-data[i][j]) * (value-data[i][j]))
				error2 = error2 + 1

		xx = math.sqrt(error1)
		print("Precision Top 10 (Movie ratings of each user) ",xx/error2)