Beispiel #1
0
def updatefunc():
    if request.method == 'POST':
        vals = []
        m = request.get_json()
        x = json.dumps(m)
        y = json.loads(x)
        # pn = y(["player"]["playerName"])
        pn = str((y["player"]["playerName"]))
        s = str((y["player"]["teamID"]))
        new_pn = str((y["player"]["playerID"]))
        new_s = str((y["player"]["season"]))
        delete_row = [pn, s]

        #to edit a column in a certain row,
        #we read CSV in as a list then edit the field
        myList = list()
        with open('dataset/players_copy.csv') as csvfile:
            searchVal1 = "Jeff Green"
            searchVal2 = s
            readCSV = csv.reader(csvfile, delimiter=',')
            for row in readCSV:
                myList.append(row)
                if row[0] == pn:
                    row[0] = new_pn
                    row[3] = new_s

        #then we write the list back into CSV
        with open('dataset/players_copy.csv', 'w') as writeFile:
            writer = csv.writer(writeFile)
            writer.writerows(myList)
        return ('update done')
Beispiel #2
0
    def _force(self):
        if config.sections["CALCULATOR"].force:
            num_forces = np.array(pt.shared_arrays['a'].num_atoms)*3
            if pt.shared_arrays['configs_per_group'].testing:
                testing = -1 * np.sum(num_forces[-pt.shared_arrays['configs_per_group'].testing:])
            else:
                testing = 0

            a, b, w = self._make_abw(pt.shared_arrays['a'].force_index, num_forces.tolist())
            config_indicies,energy_list,force_list,stress_list = self._config_error()
            if config.sections["SOLVER"].detailed_errors and self.weighted == "Unweighted":
                from csv import writer
                true, pred = b, a @ self.fit
                if pt.shared_arrays['configs_per_group'].testing:
                    ConfigType = ['Training'] * (
                                np.shape(true)[0] - np.sum(num_forces[-pt.shared_arrays['configs_per_group'].testing:])) + \
                                 ['Testing'] * (np.sum(num_forces[-pt.shared_arrays['configs_per_group'].testing:]))
                else:
                    ConfigType = ['Training'] * np.shape(true)[0]
                with open('detailed_force_errors.dat', 'w') as f:
                    writer = writer(f, delimiter=' ')
                    writer.writerow(['FileName Type True-Ref Predicted-Ref Difference(Pred-True)'])
                    writer.writerows(zip(force_list,ConfigType, true, pred, pred-true))

            self._errors([[0, testing]], ['*ALL'], "Force", a, b, w)
            if testing != 0:
                self._errors([[testing, 0]], ['*ALL'], "Force_testing", a, b, w)
Beispiel #3
0
def setup_directory():

    # Always use this directory
    directory = "/home/pi/experiment/"
    chdir( directory )

    # Read the "identity.txt" file    
    # "identity.txt" will contain the subject name, experiment name, and whether setup has already been done.    
    identity = identity.read_identity()

    # Always use these folders
    folders = ["data","log"]

    # Look through the folders and make them if they do not already exist.
    # Throw an error if a folder already exists.

    if identity["setup"] == "False":
        for folder in folders:
            if path.exists( folder ):
                raise NameError( "Attempting setup but %s already exists" % folder )
            else:
                print( "Constructing '%s' folder" % ( folder ) )
                makedirs(folder)

    # Set identity to True in "identity.txt"
    identity[ "setup" ] = "True"

    # Write "identity.txt"
    with open( "identity.txt", "w" ) as file:
        writer = writer( file )
        writer.writerows( identity.items() )
Beispiel #4
0
def fetch_whiteboard_page(url: str):
    datax = {}
    req = Request(url, headers=hdr)
    html = urlopen(req, timeout=30)
    soup = BeautifulSoup(html, features="lxml")
    header_blocks = soup.find("header", {"class": "productBanner-alt"})
    row_px = header_blocks.findAll("h1")
    row_p = header_blocks.findAll("p")

    datax[row_px[0].text.strip()] = row_p[0].text.replace(
        "\n        ", "").replace("\n      ", "").replace("\n\t\t\t",
                                                          "").strip()

    print(datax)

    with open('datacrawler.csv', 'a+', newline='') as csvfile:
        # Create a writer object from csv module
        fieldnames = ['Feature', 'Desc', 'Sub Features', 'Sub Desc']
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        with open('datacrawler.csv', 'a+', newline='') as csvfile:
            # Create a writer object from csv module
            fieldnames = ['Feature', 'Desc', 'Sub Features', 'Sub Desc']
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writerows([
                {
                    'Feature':
                    row_px[0].text.strip(),
                    'Desc':
                    datax[row_px[0].text.strip()].replace(",", ".").strip(),
                    'Sub Features':
                    row_px[0].text.strip(),
                    'Sub Desc':
                    datax[row_px[0].text.strip()].replace(",", ".").strip()
                },
            ])
Beispiel #5
0
    def _energy(self):
        if config.sections["CALCULATOR"].energy:
            testing = -1 * pt.shared_arrays['configs_per_group'].testing
            a, b, w = self._make_abw(pt.shared_arrays['a'].energy_index, 1)
            pt.single_print(np.shape(a))
            config_indicies, energy_list, force_list, stress_list = self._config_error(
            )
            self._errors([[0, testing]], ['*ALL'], "Energy", a, b, w)
            if config.sections[
                    "SOLVER"].detailed_errors and self.weighted == "Unweighted":
                from csv import writer
                true, pred = b, a @ self.fit
                ConfigType = ['Training'] * (np.shape(true)[0]-pt.shared_arrays['configs_per_group'].testing) + \
                                             ['Testing'] * (pt.shared_arrays['configs_per_group'].testing)
                with open('detailed_energy_errors.dat', 'w') as f:
                    writer = writer(f, delimiter=' ')
                    writer.writerow([
                        'FileName Type True-Ref Predicted-Ref Difference(Pred-True)'
                    ])
                    writer.writerows(
                        zip(energy_list, ConfigType, true, pred, pred - true))

            if testing != 0:
                self._errors([[testing, 0]], ['*ALL'], "Energy_testing", a, b,
                             w)
 def output_impact_metrics(self, service_name):
     metrics_dir = join(dirname(dirname(dirname(abspath(__file__)))),
                        'integration_test', service_name)
     ensure_ddir(metrics_dir)
     metrics_filename = 'ImpactMetrics_' + service_name + '_FuelCell' + '_' + datetime.now(
     ).strftime('%Y%m%dT%H%M') + '.csv'
     with open(join(metrics_dir, metrics_filename), 'w') as csvfile:
         writer = csv.writer(csvfile)
         writer.writerows(self.metrics)
def writedata_to_csv(course_id, records):
    filename = str(course_id) + '.csv'
    lines = [["Roll_no", "Name"]]
    for data in records:
        temp = [data['student_id'], data['student_name']]
        lines.append(temp)

    with open(filename, 'w') as w:
        writer = csv.writer(w, lineterminator='\n')
        writer.writerows(lines)
Beispiel #8
0
    def __call__(self, value, system):
        fout = StringIO()
        writer = UnicodeWriter(fout, quoting=QUOTE_ALL)

        writer.writerow(value['header'])
        writer.writerows(value['rows'])

        resp = system['request'].response
        resp.content_type = 'text/csv'
        resp.content_disposition = 'attachment;filename="report.csv"'
        return fout.getvalue()
Beispiel #9
0
def DeleteProject(ID):
    lines = list()
    with open('./static/Data/ProjectID.csv', 'r') as readFile:
        reader = csv.reader(readFile)
        for row in reader:
            lines.append(row)
            for field in row:
                if field == ID:
                    lines.remove(row)
    with open('./static/Data/ProjectID.csv', 'w') as writeFile:
        writer = csv.writer(writeFile)
        writer.writerows(lines)
    DeleteDIR(ID)
Beispiel #10
0
def writer_csv(header, data, filename, option):
    with open(filename, "w", newline="") as csvfile:
        if option == "write":

            wr = csv.writer(csvfile)
            wr.writerow(header)
            for x in data:
                wr.writerow(x)
        elif option == "update":
            writer = csv.DictWriter(csvfile, fieldnames=header)
            writer.writeheader()
            writer.writerows(data)
        else:
            print("Option is not known")
Beispiel #11
0
def appendCSV(arrays_to_append, append_filename, first_row):

    import csv
    
    if os.path.isfile(append_filename):
        #append row to end of append_filename
        fd = open(append_filename,'ab')
        writer = csv.writer(fd)
        writer.writerows(arrays_to_append)
        fd.close()
        print "File appended: %s" % append_filename
        
    else:
        arrays_to_write = [first_row]
        arrays_to_write.extend(arrays_to_append)
        SaveCSV(arrays_to_write, append_filename)
Beispiel #12
0
def fetch_chat_bot_page(url: str):
    datax = {}
    req = Request(url, headers=hdr)
    html = urlopen(req, timeout=30)
    soup = BeautifulSoup(html, features="lxml")
    header_blocks = soup.find("header", {"class": "productBanner-alt"})
    row_px = header_blocks.findAll("h1")
    row_p = header_blocks.findAll("p")

    datax[row_px[0].text.strip()] = row_p[0].text.replace(
        "\n        ", "").replace("\n      ", "").replace("\n\t\t\t",
                                                          "").strip()
    container_fluid = soup.find("div", {"class": "container-fluid"})

    row_title = container_fluid.findAll("h2")
    row_content = container_fluid.findAll("h3")
    list_features = []

    for title, content in zip(row_title, row_content):
        title = title.text.strip()
        content = content.text.replace(",", ".")
        content = content.replace("\n          ", "").strip()
        list_features.append(title)
        datax[title] = content

    print(datax)

    with open('datacrawler.csv', 'a+', newline='') as csvfile:
        # Create a writer object from csv module
        fieldnames = ['Feature', 'Desc', 'Sub Features', 'Sub Desc']
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        writer.writerows([
            {
                'Feature': row_px[0].text.strip(),
                'Desc': datax[row_px[0].text.strip()].replace(",",
                                                              ".").strip(),
                'Sub Features': list_features[0],
                'Sub Desc': datax[list_features[0]].replace(",", ".").strip()
            },
            {
                'Feature': row_px[0].text.strip(),
                'Desc': datax[row_px[0].text.strip()].replace(",",
                                                              ".").strip(),
                'Sub Features': list_features[1],
                'Sub Desc': datax[list_features[1]].replace(",", ".").strip()
            },
        ])
Beispiel #13
0
    def write_result(self):
        if self.result:
            from csv import writer
            with open(self.output, 'w') as bench_file:
                writer = writer(bench_file)
                writer.writerow(
                    ['Method', 'Dataset', 'Cat', 'Size.B', 'Memory.KB',
                     'Elapsed.s', 'User.s', 'System.s'])
                writer.writerows(self.result)

            # Move obtained results to the result/ directory
            make_path('result')
            for file in os.listdir(current_dir):
                if file.endswith('.csv') or file.endswith('.svg') or \
                        file.endswith('.pos') or file.endswith('.fil'):
                    shutil.copy(file, 'result/')
                    remove_path(file)
Beispiel #14
0
def ChangeName(New, ID):
    lines = list()
    with open('./static/Data/ProjectID.csv', 'r') as readFile:
        reader = csv.reader(readFile)
        for row in reader:
            lines.append(row)
            for field in row:
                if field == ID:
                    lines[lines.index(row)][2] = New
    with open('./static/Data/ProjectID.csv', 'w') as writeFile:
        writer = csv.writer(writeFile)
        writer.writerows(lines)


# CheckDB(UN)
# ChangeName("Test2","AW6Dj0")
# GenProject(UN,"helloworld")
# DeleteProject("ABDj2G")
# DeleteDIR("ABDj2G")
Beispiel #15
0
    def save_csv(self,
                 directory=None,
                 extra_array_names=None,
                 extra_arrays=None):

        if (extra_arrays != None and extra_array_names == None):
            raise ValueError(
                "Enter a list of array names associated with 'extra_arrays'.")
        if (extra_arrays == None and extra_array_names != None):
            raise ValueError(
                "Names for 'extra_arrays' entered, but no extra arrays.")

        ##### Make variables #####
        subject = str(self.subject)
        experiment_name = str(self.experiment_name)
        start_date = str(self.start_date)
        start_time = str(self.start_time)
        end_date = str(self.end_date)
        end_time = str(self.end_time)
        session = str(self.session)
        ###########################

        self.file_name = "%s_%s_%s_%s.csv" % (start_date, start_time, subject,
                                              experiment_name)
        if directory != None:
            self.file_name = directory + self.file_name

        ##### Organise meta data and extend if needed for extra events #####
        time_event_column = ["Time", "Event"]
        if (self.extra_columns != None):
            time_event_column.append(self.extra_columns)

        meta_data = [["Start_date", start_date], ["Start_time", start_time],
                     ["End_date", end_date], ["End_time", end_time],
                     ["Subject", subject], ["Session", session],
                     ["Experiment_name", experiment_name], time_event_column]

        with open(self.file_name, "w") as file:
            writer = writer(file, delimiter=",", lineterminator="\n")
            writer.writerows(meta_data)
            writer.writerows(self.event_record)
            if extra_arrays != None:
                for array in range(len(extra_array_names)):
                    writer.writerows(((extra_array_names[array], ), ))
                    writer.writerows(zip(extra_arrays[array]))

        msg = " subject %s, session %s, saved event record" % (subject,
                                                               session)
        log_file.update_log(time=clock.get_date_hmdmy(),
                            entry=msg,
                            directory=self.directory)
    def save(self):
        print("Save to CSV File")
        first = [
            'id', 'name', 'type', 'color', 'type_of_wood', 'borrowed',
            'returned'
        ]
        id = [
            self.tableWidget.item(row, 0).text()
            for row in range(self.tableWidget.rowCount())
        ]
        name = [
            self.tableWidget.item(row, 1).text()
            for row in range(self.tableWidget.rowCount())
        ]
        type = [
            self.tableWidget.item(row, 2).text()
            for row in range(self.tableWidget.rowCount())
        ]
        color = [
            self.tableWidget.item(row, 3).text()
            for row in range(self.tableWidget.rowCount())
        ]
        type_of_wood = [
            self.tableWidget.item(row, 4).text()
            for row in range(self.tableWidget.rowCount())
        ]
        borrowed = [
            self.tableWidget.item(row, 5).text()
            for row in range(self.tableWidget.rowCount())
        ]
        returned = [
            self.tableWidget.item(row, 6).text()
            for row in range(self.tableWidget.rowCount())
        ]
        table = [id, name, type, color, type_of_wood, borrowed, returned]

        row_list = list(zip(*table))
        with open('db.csv', 'w', newline='') as database:
            writer = csv.writer(database)
            writer.writerows(row_list)
    def log_finish():
        pid_finished = request.body.getvalue().decode('utf-8')

        # Move Files to finishedLogs Folder
        for x in range(9):

            conditionName = str(x)

            if x == 7:
                conditionName = "warmup"
            elif x == 8:
                conditionName = "cooldown"

            file_path = log_path + pid_finished + '-' + conditionName + ".csv"

            if os.path.isfile(file_path):
                shutil.move(
                    file_path, finished_log_path + pid_finished + '-' +
                    conditionName + "-f" + ".csv")

        # Update PID list hasFinished
        f = open('pidList.csv', "r+")
        line_to_update = int(pid_finished) + 1
        with f:
            r = csv.reader(f)
            split = list(r)
            split[line_to_update][4] = '1'
            writer = csv.writer(f)
            f.seek(0)
            writer.writerows(split)
            f.truncate()
            f.close()

        h = ''.join(random.choices(string.ascii_letters + string.digits, k=16))

        with open("finishedHashes.csv", "a") as csvfile:
            csvfile.write("\n" + str(h))
            csvfile.close
        return str(h)
def main():
  pgtoken = "0"
  newdict={}
  #countrylist = []
  #sessionlist = []

  while pgtoken is not None:
    print(pgtoken)
    analytics = initialize_analyticsreporting()
    response = get_report(analytics,pgtoken)
    c, s = print_response(response)
    # append to CSV
    print(c, s)


    store_response =(zip(c, s))
    #store_response[pgtoken] = newdict
    print(store_response)

    with open('pagination.csv', 'a', newline='') as myfile:
      writer = csv.writer(myfile)
      writer.writerows(store_response)

    pgtoken = response['reports'][0].get('nextPageToken')  # update the pageToken
Beispiel #19
0
    def write_flows_in_new_format(self, flows, source_type, name):
        path = os.path.join(self.DATA_DIR, source_type)
        if not os.path.exists(path):
            os.makedirs(path)
        path = os.path.join(path, "%s.csv" % name)
        with open(path, 'w', encoding='utf8') as output:
            writer = DictWriter(output, fieldnames=self.fieldnames)
            writer.writeheader()
            new_flows = list(self._format_for_datapackage(flows, path))
            if 'numrodeligne' in flows[0] or 'numerodeligne' in flows[0]:
                nb_line_number_before = sum(
                    1 for f in flows
                    if f['numrodeligne'] != '' or f['numerodeligne'] != '')
                nb_line_number_after = sum(
                    1 for f in new_flows
                    if 'line_number' in f and f['line_number'] != '')
                if nb_line_number_before != nb_line_number_after:
                    print(nb_line_number_before)
                    print(nb_line_number_after)
                    exit(1)
            writer.writerows(new_flows)

            # path to be added in the datapackage resource
            return path
Beispiel #20
0
def main():
    pgtoken = "0"
    # countrylist = []
    # sessionlist = []

    # clean csv if exist as used append function to do one by one export
    clean = []
    with open('venv/Scripts/pagination1.csv', 'w', newline='') as myfile:
        writer = csv.writer(myfile)
        writer.writerows(clean)

    while pgtoken is not None:
        print(pgtoken)
        analytics = initialize_analyticsreporting()
        response = get_report(analytics, pgtoken)
        headerlist, cdict, sdict = print_response(response)
        print(cdict, sdict)

        # for i in range(len(cdict.keys())):
        #     col = list(cdict.values())[i]
        #     print(col)

        # col1=list(cdict.values())[0]
        # col2 = list(cdict.values())[1]
        # col3 = list(cdict.values())[2]

        # accumulate data if export all record to csv
        # countrylist.append(c)
        # sessionlist.append(s)

        #  store_response =(zip(cdict.values(),slist))
        # # # store_response[pgtoken] = newdict
        #  print(store_response)

        if pgtoken == "0":
            with open('venv/Scripts/pagination1.csv', 'a',
                      newline='') as myfile:
                writer = csv.writer(myfile)
                writer.writerow(headerlist)
                writer.writerows(zip(*cdict.values(), *sdict.values()))

        else:
            with open('venv/Scripts/pagination1.csv', 'a',
                      newline='') as myfile:
                writer = csv.writer(myfile)
                writer.writerows(zip(*cdict.values(), *sdict.values()))

        pgtoken = response['reports'][0].get(
            'nextPageToken')  # update the pageToken
            print "Tot. sqft = " + str(totIc)
            totalIc.append(totIc)  # write total building ic

        ## Parcel Area......
        parArea.append(row[2])
        print "Parcel area = " + str(row[2])
        print "/********/********/********/"

        ## Parcel Area......
        pid.append(row[2])

        if count == 20:
            break
        #del tempBuild
        #del tempTotal
        count = count + 1
        shutil.rmtree(r'c:\temp')
        os.makedirs(r'c:\temp')

## Wrap into list of lists to write to csv, to send to R
listOfLists = [fid, parArea, totalIc, buildIc, pid]

## Write the lists out to a csv
with open("parcelICData.csv", "wb") as f:
    writer = csv.writer(f)
    writer.writerows(listOfLists)

## Clunky, open file, transpose, write out
a = izip(*csv.reader(open("parcelICData.csv", "rb")))
csv.writer(open("parcelICData_t.csv", "wb")).writerows(a)
            try:
                data[header].append(value)
            except KeyError:
                data[header] = [value]

    for locus in fields[NUM_SKIP:]:
        homo_one = ''
        first = True
        for i, x in enumerate(data[locus]):
            if x == 'NA':
                data[locus][i] = 'NA'
                # assign heterozygote genos as 1
            elif not x[0] == x[1]:
                data[locus][i] = '1'
            elif x[0] == x[1]:
                # assign first homozygous geno as 0
                if first:
                    first = False
                    homo_one = x
                if x == homo_one:
                    data[locus][i] = '0'
                    # assing second homozygous geno as 2
                else:
                    data[locus][i] = '2'

# write data dictionary, now in numeric values, to outfile
with open(outfile_name, 'wb') as outfile:
    writer = writer(outfile, delimiter=',')
    writer.writerow(fields)
    writer.writerows(zip(*[data[key] for key in fields]))
#This connects Python to the terminal
#The user should only change user and database

try:
    connection = psycopg2.connect(user="******",
                                  password="",
                                  host="localhost",
                                  port="5432",
                                  database="smithlig")

    cursor = connection.cursor()
    # Print PostgreSQL Connection properties
    #These are the properties to connect your python script to the terminal
    print(connection.get_dsn_parameters(), "\n")
    ##This runs the query which we did in PSQL
    query = "SELECT species_id, category, parks.park_name, abundance, latitude, longitude, acres FROM flora_fauna, parks where parks.park_name = flora_fauna.park_name;"
    #This fetches the data from the query
    cursor.execute(query)
    data = cursor.fetchall()

    #Make sure to upload a blank CSV, this is where you will put the queried data
    # This puts the queried data in the uploaded blank csv
    with open("combined.csv", 'w') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerows(data)

#This prints the error message if there is one
except (Exception, psycopg2.Error) as error:
    print("Error while connecting to PostgreSQL", error)
Beispiel #24
0
    print("1. Cancel by Last Name \n")
    print("2. Cancel by Confirmation #\n")
    cancelSelect = input("Select an option: ")
    lines = list()
    if cancelSelect == "1":
        lastName = input("Enter last name: ")
        with open('hotelguests.csv', 'r') as readFile:
            reader = csv.reader(readFile)
            for row in reader:
                lines.append(row)
                for field in row:
                    if field == lastName:
                        lines.remove(row)
        with open('hotelguests.csv', 'w') as writeFile:
            writer = csv.writer(writeFile)
            writer.writerows(lines)
            time.sleep(2)
            print("Reservation removed from PMS.")

    else: 
        confNumber = input("Enter confirmation number: ")
        if cancelSelect == "2":
            with open('hotelguests.csv', 'r') as readFile:
                reader = csv.reader(readFile)
                for row in reader:
                    lines.append(row)
                    for field in row:
                        if field == confNumber:
                            lines.remove(row)

        with open('hotelguests.csv', 'w') as writeFile:
Beispiel #25
0
def fetch_unified_messaging_page(url: str):
    datax = {}
    req = Request(url, headers=hdr)
    html = urlopen(req, timeout=30)
    soup = BeautifulSoup(html, features="lxml")
    header_blocks = soup.find("header", {"class": "productBanner-alt"})
    row_px = header_blocks.findAll("h1")
    row_p = header_blocks.findAll("p")

    datax[row_px[0].text.strip()] = row_p[0].text.strip()
    container_fluid = soup.find("div", {"class": "container-fluid"})

    list_features = []

    row_title = container_fluid.findAll("h2")

    for title in row_title:
        datax[title.text.strip()] = ""
        list_features.append(title.text.strip())

    row_content = container_fluid.findAll("h3")
    n = 0
    for container in row_content:
        if n == 0:
            datax[list_features[0]] = container.text.strip()
        if n == 1:
            datax[list_features[1]] = container.text.strip()
        if n == 2:
            datax[list_features[2]] = container.text.strip()
        if n == 3:
            datax[list_features[4]] = container.text.strip()
        n = n + 1

    mini_dict = {}
    sub_list_features = []
    row_para = container_fluid.findAll("p")
    for container in row_para:
        data = container.findAll("span")[0].text.strip()
        sub_list_features.append(data)
        for span in container.findAll("span"):
            span.decompose()
        r = container.get_text().strip('\n').encode('utf-8')
        r = str(r)
        r = r.replace("b'", "")
        r = r.replace("'", "")
        r = r.strip()
        mini_dict[data] = r
    datax[list_features[3]] = mini_dict
    print(datax)
    # datax = [ast.literal_eval(i) for i in dict]

    with open('datacrawler.csv', 'w') as csvfile:
        fieldnames = ['Feature', 'Desc', 'Sub Features', 'Sub Desc']
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)

        writer.writeheader()
        writer.writerows([
            {
                'Feature': row_px[0].text.strip(),
                'Desc': datax[row_px[0].text.strip()].replace(",", "."),
                'Sub Features': list_features[0],
                'Sub Desc': datax[list_features[0]].replace(",", ".")
            },
            {
                'Feature': row_px[0].text.strip(),
                'Desc': datax[row_px[0].text.strip()].replace(",", "."),
                'Sub Features': list_features[1],
                'Sub Desc': datax[list_features[1]].replace(",", ".")
            },
            {
                'Feature': row_px[0].text.strip(),
                'Desc': datax[row_px[0].text.strip()].replace(",", "."),
                'Sub Features': list_features[2],
                'Sub Desc': datax[list_features[2]].replace(",", ".")
            },
            {
                'Feature': row_px[0].text.strip(),
                'Desc': datax[row_px[0].text.strip()].replace(",", "."),
                'Sub Features': list_features[4],
                'Sub Desc': datax[list_features[4]].replace(",", ".")
            },
            {
                'Feature':
                row_px[0].text.strip(),
                'Desc':
                datax[row_px[0].text.strip()].replace(",", "."),
                'Sub Features':
                sub_list_features[0],
                'Sub Desc':
                datax[list_features[3]][sub_list_features[0]].replace(
                    ",", ".")
            },
            {
                'Feature':
                row_px[0].text.strip(),
                'Desc':
                datax[row_px[0].text.strip()].replace(",", "."),
                'Sub Features':
                sub_list_features[1],
                'Sub Desc':
                datax[list_features[3]][sub_list_features[1]].replace(
                    ",", ".")
            },
            {
                'Feature':
                row_px[0].text.strip(),
                'Desc':
                datax[row_px[0].text.strip()].replace(",", "."),
                'Sub Features':
                sub_list_features[2],
                'Sub Desc':
                datax[list_features[3]][sub_list_features[2]].replace(
                    ",", ".")
            },
        ])
#         rank  = td_tag[0].get_text()
#         team  = td_tag[1].find("a").get_text()
#         won  = td_tag[2].get_text()
#         lost  = td_tag[3].get_text()
#         tied  = td_tag[4].get_text()
#         pct  = td_tag[5].get_text()
#         first  = td_tag[6].get_text()
#         total  = td_tag[7].find("a").get_text()
#         division  = td_tag[8].find("a").get_text()

# #         csv_writer.writerow([rank, team, won, lost, tied, pct, first, total, division])

############ one table scap ##############
# from bs4 import BeautifulSoup
# import csv
# html = open("table.html").read()
# soup = BeautifulSoup(html)
table = soup.find("table")

output_rows = []
for table_row in table.findAll('tr'):
    columns = table_row.findAll('td')
    output_row = []
    for column in columns:
        output_row.append(column.text)
    output_rows.append(output_row)

with open('output.csv', 'wb') as csvfile:
    writer = csv.writer(csvfile)
    writer.writerows(output_rows)
Beispiel #27
0
        # GETTING SUBJECT AND MARKS
        rows = results.find_all("tr")
        dataToPush = {"ID": eachId, "Name": name}
        # Getting Subject Details into Array
        for rowIndex, row in enumerate(rows[1:]):
            row.find(class_="hide").decompose()
            row.find(id="gvResults_imgRank_" + str(rowIndex)).decompose()
            row.find(id="gvResults_lblRankDetails_" +
                     str(rowIndex)).decompose()

            eachSubject = row.select_one("td:nth-of-type(2)").text
            subType = row.select_one("td:nth-of-type(4)").text
            eachMarks = row.select_one("td:nth-of-type(5)").text

            with open('results.csv', newline='') as myFile:
                csv_input = csv.reader(myFile)
                header = next(csv_input)
                for titleIndex, eachTitle in enumerate(header):
                    if eachSubject in eachTitle:
                        if subType in eachTitle:
                            dataToPush[eachTitle] = eachMarks

        allStudents.append(dataToPush)
        print(eachId)

with open('results.csv', 'w', newline='') as csvFile:
    writer = csv.DictWriter(csvFile, fieldnames=fieldNames)
    writer.writeheader()
    writer.writerows(allStudents)
Beispiel #28
0
def write_csv_file(data, file_path, header, way="w"):
    with open(file_path, way) as f:
        writer = DictWriter(f, header)
        if way == "w" or (way == "a" and not exists(file_path)):
            writer.writeheader()
        writer.writerows(data)
    'name': 'Prateek',
    'year': '3'
}, {
    'branch': 'EP',
    'cgpa': '9.1',
    'name': 'Sahil',
    'year': '2'
}]

# field names
fields = ['name', 'branch', 'year', 'cgpa']

# name of csv file
filename = "university_records2.csv"

# writing to csv file
with open(filename, 'w') as csvfile:
    # creating a csv dict writer object
    writer = csv.DictWriter(csvfile, fieldnames=fields)

    # writing headers (field names)
    writer.writeheader()

    # writer.writerow({'branch': 'COE', 'cgpa': '9.0', 'name': 'Nikhil', 'year': '2')
    #as like that

    # writing data rows
    writer.writerows(mydict)

# https://www.geeksforgeeks.org/working-csv-files-python/
print("Calcul...")
dist = [
    Distance(au=earth.at(ts.utc(2017, 1, day)).observe(mercury).distance().au)
    for day in days]
mv = [AcceleratedMovement(distance=d, time=traveltime / 2) for d in dist]
vel = [m.velocity for m in mv]
accel = [m.accel for m in mv]
time = [lille_metro.move(d).time for d in dist]

print("Exportation des données...")
csvarray = [days, dist, vel, accel, time]
csvdata = [list(i) for i in zip(*csvarray)]
with open("metro.csv", "wb") as f:
    writer = writer(f)
    writer.writerows(csvdata)

print("Production du graphe...")
plt.figure(figsize=[10, 7])
plt.plot(days, [d.km for d in dist])
plt.title("Distance Alsace-Mercure (km)")
plt.savefig('metrodistance.svg')
plt.figure(figsize=[10, 7])
plt.plot(days, [v.kph for v in vel], 'g')
plt.title("Vitesse de pointe (km/h)")
plt.savefig('metrovelocity.svg')
plt.figure(figsize=[10, 7])
plt.plot(days, [t.d for t in time], 'r')
plt.title("Temps de parcours (jours)")
plt.savefig('metrotime.svg')
                    features.append(0)

        if len(features) == 0:
            features = [i for i in range(33)]

        feature_table.append(features)

    return feature_table, file_name_table


if __name__ == "__main__":
    path = input('Enter the path: ')

    features, files = extractfeatures(path)

    model = pickle.load(open('dt_model_dynamic', 'rb'))

    predictions = model.predict(features)

    final_list = []
    for i in range(len(predictions)):
        if predictions[i]:
            final_list.append([files[i], 'M'])
        else:
            final_list.append([files[i], 'B'])

    with open("dynamic.csv", "w") as f:
        writer = writer(f)
        writer.writerow(['FILE_HASH', 'Predicted Label'])
        writer.writerows(final_list)
Beispiel #32
0
    def _process_data(self,
                      year,
                      udise_state_code,
                      udise_dist_code="none",
                      udise_block_code="none"):
        """This will process the data by loading the data file for a given
        year, udise_state_code, udise_dist_code and udise_block_code.
        
        Args:
            year (TYPE): Education year of the report
            udise_state_code (TYPE): UDISE state code
            udise_dist_code (str, optional): UDISE district code
            udise_block_code (str, optional): UDISE block code
        
        Raises:
            Exception: Throws an exception if the data file doesn't exist
        """
        data_file = os.path.join(
            self.data_folder,
            DATA_FILE_NAME_FORMAT.format(year=year,
                                         udise_state_code=udise_state_code,
                                         udise_dist_code=udise_dist_code,
                                         udise_block_code=udise_block_code))
        data_rows = []
        valid_columns = self.attribute_mapping.keys()
        print("Processing {data_file}".format(data_file=data_file))
        if path.exists(data_file):
            json_data_file = open(data_file, "r")
            json_data = json.loads(json_data_file.read())
            json_data_file.close()

            rows = json_data["rowValue"]
            for row in rows:
                for column in valid_columns:
                    # Process only if the data exists for that column
                    if column in row:
                        data_row = {}
                        data_row["Value"] = row[column]
                        data_row["Period"] = self._year_to_period(year)
                        data_row["LocationCode"] = row["location_code"]
                        data_row["LocationType"] = row["rpt_type"]

                        data_row["SocialCategory"] = SOCIAL_CATEGORY_MAPPING[
                            row["item_name"]]

                        attribute_map = self.attribute_mapping[column]
                        if "Gender" in attribute_map:
                            data_row["Gender"] = attribute_map["Gender"]
                        data_row["SchoolLevel"] = attribute_map["SchoolLevel"]
                        stat_var, variable_name = self._create_variable(
                            data_row)
                        data_row["StatisticalVariable"] = variable_name
                        data_rows.append(data_row)

            # Write the final rows to CSV
            write_header = False
            if path.exists(self.csv_file_path) is False:
                write_header = True

            with open(self.csv_file_path, 'a') as file_object:
                writer = csv.DictWriter(file_object,
                                        extrasaction='ignore',
                                        fieldnames=self.csv_headers)
                if write_header:
                    writer.writeheader()
                writer.writerows(data_rows)
                file_object.close()
        else:
            raise Exception("Data file: {data_file} doesn't exist".format(
                data_file=data_file))