Example #1
0
def xl_1():
    path = pd.ExcelFile("Alarm.xls")  #Reading excel file using pandas module
    print(path)
    s = path.sheet_names
    print(s)
    if s == ['Sheet2']:
        df = path.parse(
            "Electricity Archive"
        )  #parsing excel data,parameter to this function is the sheet name
        df.to_csv("Archive.csv", encoding='utf-8')  #converting to cs
        csv = pd.read_csv('Archive.csv', encoding='utf-8')  #reading csv
        csv.to_json('Archivejson.json')  #converting to json
        file_watcher()

    else:
        pass
    print(data)
    header = ["Date", "BatchName", "Step Description", "TICR-01", "TICR-02"]
    data = csv.to_dict(orient='data')
    client = MongoClient('localhost', 27017)
    db = client['excel2-db']
    col = db.col
    #inserting data into mongodb and removing the file using os module
    col.insert(data)
    print("done1")
    os.remove("Archive.csv")
def write_data(csv_files):
    import pandas as pd
    import json
    from os.path import splitext

    for csv_file in csv_files:
        stem, _ = splitext(csv_file)
        jsonfile = stem + '.json'
        print(jsonfile)
        csv = pd.read_csv(csv_file)
        csv.to_json(jsonfile)

    return jsonfile
Example #3
0
def index_to_phrase(csv_file, txt_file):
    csv = pd.read_csv(Path(csv_file))
    csv['Phrase'] = ""
    csv['Before'] = ""
    csv['After'] = ""
    csv['ID'] = ""
    with open(txt_file, 'r') as file:
        data = file.read().replace('\n', ' ')
    highlights = []
    befores = []
    afters = []
    ids = []
    for index, row in csv.iterrows():
        if np.isnan(row['Start']) or np.isnan(row['End']):
            csv['Phrase'][index] = ""
            csv['Before'][index] = ""
            csv['After'][index] = ""
            csv['ID'][index] = ""
            continue

        start = int(row['Start'])
        end = int(row['End'])

        if start == end or start == -1 or end == -1:
            continue

        before = data[start - 12:start]
        after = data[end + 1:end + 12]
        phrase = data[start:end + 1]
        highlights.append(phrase)
        befores.append(before)
        afters.append(after)
        csv['Phrase'][index] = phrase
        csv['Before'][index] = before
        csv['After'][index] = after

        id = csv['Credibility Indicator Name'][index].replace(" ", "").lower()
        print(id)
        csv['ID'][index] = id


#     csv.to_csv(r'/Users/johnwsha/Documents/School/Organizations/Goodly Labs/' + 'test' + csv_file)
    cwd = os.getcwd()
    csv.to_json(csv_file[:-4] + "_test" + '.json')
    return highlights
Example #4
0
def dataset():
    csv = (pd.read_csv('dataSet.csv', sep=";"))
    return jsonify({"csv": csv.to_json()})
from pymongo import MongoClient
import csv
import pandas as pd
import os
import json
from bson.json_util import loads

os.chdir('E:\\file watch')
csv = pd.read_csv('DOWNTIME.csv', encoding='utf-8')  #reading csv
csv.to_json('DOWNTIMEjson.json')  #converting to json
jdf = open('DOWNTIMEjson.json').read()  #reading json
data = loads(jdf)  #loading data from the json file
#header=["Date","BatchName","Step Description","TICR-01","TICR-02"]
#data = csv.to_dict(orient = 'data')
client1 = MongoClient('localhost', 27017)
db1 = client1['cssv-db']
col1 = db1.col1
#inserting data into mongodb and removing the file using os module
col1.insert(data)
print('done')