예제 #1
0
from pyexcel_xls import save_data
data = {"Data1": [[123, 45, 56], [23, 34, 35]]}
save_data('D:\Shubham\Python\programs\excelDemo1.xls', data)
from pyexcel_xls import read_data
data1 = read_data('D:\Shubham\Python\programs\excelDemo1.xls')
print(data1)
#To save data in an excel

from pyexcel_xls import save_data
data = {"sheet1": [['sno', 'name', 'address'], [1, 'abc', 'india']]}
save_data("demo.xls", data)

#To read the data from an excel

from pyexcel_xls import read_data
data = read_data("demo.xls")
print(data)

#Working with xlsxwriter

import xlsxwriter
workbook = xlsxwriter.Workbook('demo.xls')
worksheet = workbook.add_worksheet()
worksheet.write("A1", 'test data')
worksheet.close()
예제 #3
0
# 2) Read excel,Open excel, get sheets from workbook, getting sheets from the sheets
# getting rows and columns from the sheets

from pyexcel_xls import save_data, read_data

data = {
    "sheet 1": [[1, 2, 3], [4, 5, 6]],
    "sheet 2": [[2, 3, 'Arun'], ['python', 'data']]
}
save_data('E:\\trashcode\\aasd.xlsx', data)

rdata = read_data('E:\\trashcode\\aasd.xlsx')
print(rdata)

import xlsxwriter

workbook = xlsxwriter.Workbook('test.xlsx')
worksheet = workbook.add_worksheet()
worksheet.write('A2', 'Test data')
workbook.close()
예제 #4
0
# 2) Read excel,Open excel, get sheets from workbook, getting sheets from the sheets
# getting rows and columns from the sheets

from pyexcel_xls import save_data, read_data

data = {
    "sheet 1": [[1, 2, 3], [4, 5, 6]],
    "sheet 2": [[2, 3, 'Arun'], ['python', 'data']]
}
save_data("myxls.xls", data)

rdata = read_data("myxls.xls")
print(rdata)

import xlsxwriter

workbook = xlsxwriter.Workbook('test.xlsx')
worksheet = workbook.add_worksheet()
worksheet.write('A2', 'Test data')
workbook.close()
예제 #5
0
''' python project to build an SEO tool to input data in an excel sheet and 
find related tags in the HTML content of a website, thus printing the 
tags onto the python shell '''

from pyexcel_xls import read_data
from bs4 import BeautifulSoup
from urllib.request import urlopen
import xlsxwriter

name = input("Enter the excelsheet name(with .xlsx ext.): ")

exBook = xlsxwriter.Workbook(
    name)  #create an excel workbook and sheet for website name
exSheet = exBook.add_worksheet()  #add the data according to the project

data = read_data(name)
try:
    for sheetname, values in data.items():
        urls = values[0]
        tags = values[1:6]
        url = urls[0]
        print(url)
        print(tags)
        print('-' * 20)
        hyperLink = urlopen(url)
        html = hyperLink.read().decode('utf-8')

        Soup = BeautifulSoup(html,
                             'html.parser')  #bs4 to display the html content
        meta = Soup.find_all('meta')
        desc = Soup.find(attrs={'name': 'description'})
예제 #6
0
파일: seo2.py 프로젝트: GowthamR12/Projects
from pyexcel_xls import save_data
from pyexcel_xls import read_data
from bs4 import BeautifulSoup
import urllib.request
import xlsxwriter
import json

d = read_data("Book1.xlsx")
s = json.dumps(d)
obj = json.loads(s)
#for k in obj:
#    print(obj[k][0][0])
#urlNames=dict()
wb = xlsxwriter.Workbook('Book2.xlsx')
for k in obj:
    worksheet = wb.add_worksheet(str(k))
    worksheet.write("A1", obj[k][0][0])
    worksheet.write("A3", "WORDS")
    worksheet.write("B3", "COUNT")
    chart = wb.add_chart({'type': 'column'})
    req = urllib.request.Request(
        str(obj[k][0][0]),
        data=None,
        headers={
            'User-Agent':
            'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
        })
    f = urllib.request.urlopen(req)
    s = f.read().decode('utf-8')

    soup = BeautifulSoup('https://kerala.gov.in/web/guest/gallery',
예제 #7
0
sheet1.write('A1', 'javatpoint.com')
sheet1.write('A2', 'https://www.javatpoint.com/java-tutorial')
sheet1.write('A3', 'java')
sheet1.write('A4', 'sql')
sheet1.write('A5', 'tutorial')

sheet2 = workbook.add_worksheet("second")
sheet2.write('A1', 'tutorialspoint.com')
sheet2.write(
    'A2', 'https://www.tutorialspoint.com/python/python_data_structure.htm')
sheet2.write('A3', 'python')
sheet2.write('A4', 'data')
sheet2.write('A5', 'programming')
#####################################

d = read_data("Urlbook.xlsx")

L1 = []
for w in d['first']:
    L1.append("".join(map(str, w)))

L2 = []
for w in d['second']:
    L2.append("".join(map(str, w)))

print(L1, "\n")
print(L2, "\n")
print("Reading url from excel file:")
print(L1[1])
print(L2[1], "\n")
#####################################
예제 #8
0
파일: pciis.py 프로젝트: Jejulia/labtools
def ionsuppression(chrom, trans, window=10, save=False, data=data):

    # import data

    try:
        df_trans = data[trans]
    except:
        df_trans = pd.read_excel(trans)
        data[trans] = df_trans
    if chrom.split(".")[-1] == 'xls':
        from pyexcel_xls import read_data
    else:
        from pyexcel_xlsx import read_data
    try:
        df_chrom = data[chrom]
    except:
        df_chrom = read_data(chrom)
        data[chrom] = df_chrom

    # pci transition, retention time

    df_trans['Transition'] = [
        re.sub("^ *", "", i) for i in df_trans['Transition']
    ]
    # pci_name = re.sub("[(]PCI[)]","",df_trans.iloc[['(PCI)' in i for i in df_trans.iloc[:,0]],0][0])
    pci_trans = df_trans.loc[['(PCI)' in i for i in df_trans.iloc[:, 0]],
                             'Transition'][0]

    while True:
        rand = random.sample(list(df_chrom.keys()), 1)
        if pci_trans in df_chrom[rand[0]][0][0]:
            df = np.vstack(df_chrom[rand[0]][2:])
            break
    df = df[df[:, 1] < 2, :]
    iv = pd.Series([df[i + 1, 1] - df[i, 1] for i in range(df.shape[0] - 1)])
    iv = round(iv.mode()[0], 4)
    argmin = df[:, 2].argmin()
    rt = np.arange(df[argmin, 1] - window * iv,
                   df[argmin, 1] + (window + 1) * iv, iv)

    # filter chromatography

    result = np.array(
        [interpolate2(df, rt, pci_trans) for df in df_chrom.values()])
    result = result[result.nonzero()]
    mat_chrom = np.vstack(list(zip(*result))[0])
    name = list(zip(*result))[1]

    # data assembly

    datafile = [re.sub('.*[(]', "", re.sub('.*[) ]', "", i)) for i in name]
    df_is = pd.DataFrame(mat_chrom)
    df_is.columns = rt
    df_is.index = pd.Series(datafile).unique()

    # Save

    if save == True:
        try:
            df_is.to_excel('{}.xlsx'.format(asksaveasfilename()))
        except:
            "Cancelled"
    return df_is
예제 #9
0
파일: pciis.py 프로젝트: Jejulia/labtools
def quantify(chrom, trans, save=False):

    # import data

    try:
        df_trans = data[trans]
    except:
        df_trans = pd.read_excel(trans)
        data[trans] = df_trans
    if chrom.split(".")[-1] == 'xls':
        from pyexcel_xls import read_data
    else:
        from pyexcel_xlsx import read_data
    try:
        df_chrom = data[chrom]
    except:
        df_chrom = read_data(chrom)
        data[chrom] = df_chrom

    # pci transition, retention time

    df_trans['Transition'] = [
        re.sub("^ *", "", i) for i in df_trans['Transition']
    ]
    # pci_name = re.sub("[(]PCI[)]","",df_trans.iloc[['(PCI)' in i for i in df_trans.iloc[:,0]],0][0])
    pci_trans = df_trans.loc[['(PCI)' in i for i in df_trans.iloc[:, 0]],
                             'Transition'][0]
    df_trans = df_trans.set_index(['Transition'])
    rt_min = min(df_trans['RT.s'].dropna())
    rt_max = max(df_trans['RT.e'].dropna())

    rand = random.sample(list(df_chrom.keys()), 1)
    df = np.vstack(df_chrom[rand[0]][2:])
    df = df[(np.searchsorted(df[:, 1], rt_min, side='right') -
             1):(np.searchsorted(df[:, 1], rt_max) + 1), :]
    iv = pd.Series([df[i + 1, 1] - df[i, 1] for i in range(df.shape[0] - 1)])
    iv = round(iv.mode()[0], 4)
    rt = np.arange(df[0, 1], df[-1, 1] + iv, iv)

    # filter chromatography

    result = [interpolate1(df, rt) for df in df_chrom.values()]
    mat_chrom = np.vstack(list(zip(*result))[0]).transpose()
    name = list(zip(*result))[1]

    # Calculate ratio

    datafile = [re.sub('.*[(]', "", i) for i in name]
    trans = pd.Series([re.sub('[)].*', "", i) for i in datafile])
    datafile = [re.sub('.*[) ]', "", i) for i in datafile]
    pci_index = trans == pci_trans
    mat_pci = mat_chrom[:, pci_index]
    for i in range(len(df_trans.index) - 1):
        mat_pci = np.hstack([mat_pci, mat_chrom[:, pci_index]])
    mat_chrom = mat_chrom / mat_pci

    # Peak computing

    dict_range = dict()
    for i in df_trans.index:
        if i == pci_trans:
            dict_range[i] = list(range(len(rt)))
        else:
            dict_range[i] = [
                j for j, k in enumerate(rt)
                if k > df_trans.loc[i, 'RT.s'] and k < df_trans.loc[i, 'RT.e']
            ]

    mat_chrom = np.array(
        [sumif(mat_chrom, i, trans, dict_range) for i in range(len(datafile))])

    # data assembly

    datafile = pd.Series(datafile).unique()
    trans = trans.unique()
    peak = mat_chrom.reshape(len(trans), len(datafile)).transpose()
    peak = pd.DataFrame(peak)
    peak.index = datafile
    peak.columns = trans

    # Save

    if save == True:
        try:
            peak.to_excel('{}.xlsx'.format(asksaveasfilename()))
        except:
            "Cancelled"
    return peak
예제 #10
0
#key modules py -m pip install pyexcel-xls
#key modules py -m pip install beautifulsoup4
#key modules py -m pip install xlsxwriter
from pyexcel_xls import save_data
from pyexcel_xls import read_data
from urllib import *

from bs4 import BeautifulSoup
import urllib.request
import xlsxwriter
import json
import socket

data = read_data("input.xls")
ss = json.dumps(data)
object = json.loads(ss)
urlNames = dict()
wb = xlsxwriter.Workbook('output_scrap.xlsx')
h1 = wb.add_format({'bold': True, 'font_color': 'red'})
h2 = wb.add_format({'bold': True, 'font_color': 'blue'})
h3 = wb.add_format({'bold': True, 'font_color': 'green'})

for o in object:
    ws = wb.add_worksheet(str(o))
    ws.write("A1", object[o][0][0], h1)
    ws.write("A3", "WORDS", h2)
    ws.write("B3", "COUNT", h2)
    chart = wb.add_chart({'type': 'column'})
    req = urllib.request.Request(
        str(object[o][0][0]),
        data=None,
예제 #11
0
# -*- coding: utf-8 -*-
"""
Spyder Editor

This is a temporary script file.
"""

# pip install pyexcel_xls

from pyexcel_xls import save_data, read_data

data = {
    "sheet 1": [[1, 2, 3], [4, 5, 6], [4, 5, 6]],
    "sheet 2": [[2, 3, 'pavan'], ['python', 'data']]
}
save_data("myxls123.xls", data)

read_data = read_data("myxls123.xls")

print(read_data)
예제 #12
0
from pyexcel_xls import save_data

data = {"EmpId": [[101, 102, 103], [65, 47, 789]]}
save_data("d:\\emp.xls", data)
from pyexcel_xls import read_data

data1 = read_data('d:\\emp.xls')
print(data1)
예제 #13
0
from pyexcel_xls import save_data
data = {"sheet1": [[10, 40, 50], [75, 85, 74]]}
save_data("demo_excel.xls", data)

from pyexcel_xls import read_data
data = read_data("demo_excel.xls")
print(data)
    # Add a series of data to the Chart.
    chart1.add_series({
        'name': '=Sheet1!$C$1',
        'categories': '=Sheet1!$B$2:$B$6',
        'values': '=Sheet1!$C$2:$C$6',
    })

    # Add a chart title and some axis labels.
    chart1.set_title({'name': 'Analysis of Density of Keywords'})
    chart1.set_x_axis({'name': 'Keywords ---------->'})
    chart1.set_y_axis({'name': 'Densities ---------->'})

    # Set an Excel chart style. Colors with white outline and shadow.
    chart1.set_style(27)

    # Insert the chart into the worksheet (with an offset).
    worksheet.insert_chart('D5', chart1, {'x_offset': 25, 'y_offset': 10})

    workbook.close()

    print('''\nSccessfull analysis of URL
\nYou can open file - 'Density_analysis' in your (D:) drive ''')

    from pyexcel_xls import read_data

    data = read_data("D:\\Density_analysis.xlsx")
    print(data)

else:
    print('\n________*INVALID URL*________')
예제 #15
0
 def readExcel(self, filepath):
     self.filePath = filepath
     from pyexcel_xls import read_data
     data = read_data(filePath)
     return data
예제 #16
0
worksheet.write('A1', 'url')
worksheet.write('B1', 'Count')

row = 1
col = 0

for url, count in d.items():
    worksheet.write(row, col, url)
    worksheet.write(row, col + 1, count)
    row += 1

# Write a total using a formula.
worksheet.write(row, 0, 'Total')
worksheet.write(row, 1, '=SUM(B2:B5)')
workbook.close()
data = read_data("finalreport.xlsx")
print(data)

chartsheet = workbook.add_chartsheet()
chart = workbook.add_chart({'type': 'bar'})

# Configure the chart.

chartsheet.set_chart(chart)
chart.add_series({'values': '=Sheet1!$A$2:$A$7'})
chart.add_series({'values': '=Sheet1!$B$2:$B$7'})

# Insert the chart into the worksheet.
worksheet.insert_chart('A13', chart)
workbook.close()
예제 #17
0
d = {}
workbook = xlsxwriter.Workbook("urlbook.xlsx")
worksheet1 = workbook.add_worksheet("first")
worksheet1.write('A1', 'javatpoint.com')
worksheet1.write('A2', 'https://www.javatpoint.com/java-tutorial')
worksheet1.write('A3', 'java')
worksheet1.write('A4', 'SQL')
worksheet1.write('A5', 'C++')
worksheet2 = workbook.add_worksheet("second")
worksheet2.write('A1', 'tutorialspoint.com')
worksheet2.write(
    'A2', 'https://www.tutorialspoint.com/python/python_data_structure.htm')
worksheet2.write('A3', 'Python')
worksheet2.write('A4', 'Data')
worksheet2.write('A5', 'programming')
d = read_data("urlbook.xlsx")
print("Reading URL from excel file")
#print(d['first'])
#print(d['second'])
L1 = []
for w in d['first']:
    L1.append("".join(map(str, w)))
#print(L1)
print(L1[1])  #worksheet 1

L2 = []
for w in d['second']:
    L2.append("".join(map(str, w)))
#print(L2)
print(L2[1])  #worksheet 2
예제 #18
0
#Reading XLS file using pyexcel

from pyexcel_xls import read_data
data = read_data("book.xls")
print(data)