Example #1
0
def get_scores(year, month ,day):
    home_teams = []
    away_teams = []
    home_scores = []
    away_scores = []
    
    games = Boxscores(datetime(year, month, day))
    all_games = games.games[str(month)+'-'+str(day)+'-'+str(year)]
    
    for game in all_games:
        home_teams.append(game['home_name'])
        away_teams.append(game['away_name'])
        home_scores.append(game['home_score'])
        away_scores.append(game['away_score'])
    
    name = str(year)+"_"+str(month)+"_"+str(day)+"_scores.csv"
    with open(name, 'w', newline='') as file:
        writer = csv.writer(file)
        writer.writerow(["home_team","away_team","home_score","away_score"])
        for i,team in enumerate(home_teams):
            writer.writerow([home_teams[i],away_teams[i],home_scores[i],away_scores[i]])
            
    path = os.getcwd()
    os.rename(path+"\\"+name, path+"\\"+"scores\\"+name)       
    return home_teams,away_teams,home_scores,away_scores
Example #2
0
    def _force(self):
        if config.sections["CALCULATOR"].force:
            num_forces = np.array(pt.shared_arrays['a'].num_atoms)*3
            if pt.shared_arrays['configs_per_group'].testing:
                testing = -1 * np.sum(num_forces[-pt.shared_arrays['configs_per_group'].testing:])
            else:
                testing = 0

            a, b, w = self._make_abw(pt.shared_arrays['a'].force_index, num_forces.tolist())
            config_indicies,energy_list,force_list,stress_list = self._config_error()
            if config.sections["SOLVER"].detailed_errors and self.weighted == "Unweighted":
                from csv import writer
                true, pred = b, a @ self.fit
                if pt.shared_arrays['configs_per_group'].testing:
                    ConfigType = ['Training'] * (
                                np.shape(true)[0] - np.sum(num_forces[-pt.shared_arrays['configs_per_group'].testing:])) + \
                                 ['Testing'] * (np.sum(num_forces[-pt.shared_arrays['configs_per_group'].testing:]))
                else:
                    ConfigType = ['Training'] * np.shape(true)[0]
                with open('detailed_force_errors.dat', 'w') as f:
                    writer = writer(f, delimiter=' ')
                    writer.writerow(['FileName Type True-Ref Predicted-Ref Difference(Pred-True)'])
                    writer.writerows(zip(force_list,ConfigType, true, pred, pred-true))

            self._errors([[0, testing]], ['*ALL'], "Force", a, b, w)
            if testing != 0:
                self._errors([[testing, 0]], ['*ALL'], "Force_testing", a, b, w)
Example #3
0
    def _energy(self):
        if config.sections["CALCULATOR"].energy:
            testing = -1 * pt.shared_arrays['configs_per_group'].testing
            a, b, w = self._make_abw(pt.shared_arrays['a'].energy_index, 1)
            pt.single_print(np.shape(a))
            config_indicies, energy_list, force_list, stress_list = self._config_error(
            )
            self._errors([[0, testing]], ['*ALL'], "Energy", a, b, w)
            if config.sections[
                    "SOLVER"].detailed_errors and self.weighted == "Unweighted":
                from csv import writer
                true, pred = b, a @ self.fit
                ConfigType = ['Training'] * (np.shape(true)[0]-pt.shared_arrays['configs_per_group'].testing) + \
                                             ['Testing'] * (pt.shared_arrays['configs_per_group'].testing)
                with open('detailed_energy_errors.dat', 'w') as f:
                    writer = writer(f, delimiter=' ')
                    writer.writerow([
                        'FileName Type True-Ref Predicted-Ref Difference(Pred-True)'
                    ])
                    writer.writerows(
                        zip(energy_list, ConfigType, true, pred, pred - true))

            if testing != 0:
                self._errors([[testing, 0]], ['*ALL'], "Energy_testing", a, b,
                             w)
Example #4
0
 def create_csv_file(self, writer, word=None):
     '''takes a CSV writer as parameter'''
     if self.n > 0:
         writer.writerow([word, self.n])
     if self.d:
         for node in self.d:
             self.d[node].create_csv_file(writer, node)
Example #5
0
def write_to_csv(logfile, smoothed_value, threshold_flag):
    # open csv file to record data
    f = open(logfile, "a")
    writer = writer(f)
    record_time = datetime.now().strftime("%X")
    writer.writerow([record_time, smoothed_value, threshold_flag])
    f.close()
Example #6
0
    def csv(self, response, archive, options, **params):
        response.setHeader('Content-Type', 'application/vns.ms-excel')
        response.setHeader('Content-Disposition',
                           'attachment; filename=events.csv')
        from csv import writer
        writer = writer(response)

        wroteHeader = False
        for fields, evt in self._query(archive, **params):
            data = []
            if not wroteHeader:
                writer.writerow(fields)
                wroteHeader = True
            details = evt.get(DETAILS_KEY)

            for field in fields:
                val = evt.get(field, '')
                if field in ("lastTime", "firstTime", "stateChange") and val:
                    val = self._timeformat(val, options)
                elif field == DETAILS_KEY and val:
                    # ZEN-ZEN-23871: add all details in one column
                    val = json.dumps(val)
                elif not (val or val is 0) and details:
                    # ZEN-27617: fill in value for requested field in details
                    val = details.get(field, '')
                data.append(
                    str(val).replace('\n', ' ').strip()
                    if (val or val is 0) else ''
                )
            writer.writerow(data)
Example #7
0
def top_up(account, ammount):
    ### function tops up a selected account###
    # shutil to merge temp into the old file
    import shutil
    import csv

    filename = "bank.csv"
    temp = "temp_bank.csv"

    with open(filename, "r") as csvFile:

        reader = csv.DictReader(csvFile)

        with open(temp, "w") as temp:

            fieldnames = ["acc_name", "balance"]
            writer = csv.DictWriter(temp, fieldnames=fieldnames)
            writer.writeheader()
            for line in reader:
                if line["acc_name"] == str(account):
                    line["balance"] = int(line["balance"]) + int(ammount)
                    writer.writerow(line)
                else:
                    writer.writerow(line)

    shutil.move(temp.name, filename)
Example #8
0
    def csv(self, response, archive, options, **params):
        response.setHeader('Content-Type', 'application/vns.ms-excel')
        response.setHeader('Content-Disposition',
                           'attachment; filename=events.csv')
        from csv import writer
        writer = writer(response)

        wroteHeader = False
        for fields, evt in self._query(archive, **params):
            data = []
            if not wroteHeader:
                writer.writerow(fields)
                wroteHeader = True
            details = evt.get(DETAILS_KEY)

            for field in fields:
                val = evt.get(field, '')
                if field in ("lastTime", "firstTime", "stateChange") and val:
                    val = self._timeformat(val, options)
                elif field == DETAILS_KEY and val:
                    # ZEN-ZEN-23871: add all details in one column
                    val = json.dumps(val)
                elif not (val or val is 0) and details:
                    # ZEN-27617: fill in value for requested field in details
                    val = details.get(field, '')
                data.append(
                    str(val).replace('\n', ' ').strip() if (
                        val or val is 0) else '')
            writer.writerow(data)
Example #9
0
def open_csv_first(logfile):
    # open csv file to record data
    if not path.exists(logfile, ):
        f = open(logfile, "a")
        writer = csv.writer(f)
        writer.writerow(["Time", "Value", "Over Threshold"])
        f.close()
Example #10
0
  def set_data(self):
    with open(self.filename, 'w', newline='') as csvfile:
      writer = csv.DictWriter(csvfile, fieldnames=self.data[0].headers)
      writer.writeheader()

      for el in self.data:
        writer.writerow(el.data)
Example #11
0
def AddProductToProductList():
    print("fuckit")
    print(ProductNameEntry.get())
    print(productPriceEntry.get())
    with open('productlist.csv', 'a+', newline='') as file:
        writer = csv.writer(file)
        row = [ProductNameEntry.get(), productPriceEntry.get()]
        writer.writerow(row)
Example #12
0
 def writeNewLinksCsv(self, newLink):
     with open(self.wd + "barchartLinks.csv", 'a+', newline='') as linksCSV:
         writer = csv.writer(linksCSV, delimiter=',')
         if newLink not in self.csvLinks:
             writer.writerow(
                 [self.indexId, newLink,
                  self.title])  #str(newLinks[i]).split(self.subDomain)[1]])
             self.indexId += 1
Example #13
0
    def export_selected_objects(self, request, queryset):
	response = HttpResponse(content_type='text/csv')
	response['Content-Disposition'] = 'attachment; filename="reps.csv"'
	writer = csv.writer(response)
	writer.writerow(['Name', 'Company', 'Alumni', 'Present'])
	for rep in queryset.all():
	   writer.writerow([rep.rep, rep.company, rep.is_alumni, rep.is_present])
	return response
Example #14
0
def UpdateAllProducts():
    print('Update all products')
    with open('productlist.csv', 'w', newline='') as file:
        writer = csv.writer(file)
        for name, price in zip(ManageProductsList, ManageProductsPriceList):
            print(name.get(), price.get())
            if(name.get() != ''):
                writer.writerow([name.get(), price.get()])
Example #15
0
def append_dict_as_row(csv_file, hours_data, fields):
    try:
        with open(csv_file, 'a+', newline='') as csvfile:
            writer = DictWriter(csvfile, fieldnames=fields)
            writer.writeheader()
            for data in hours_data:
                writer.writerow(data)
    except IOError:
        print("I/O error")
Example #16
0
 def export_selected_objects(self, request, queryset):
     response = HttpResponse(content_type='text/csv')
     response['Content-Disposition'] = 'attachment; filename="reps.csv"'
     writer = csv.writer(response)
     writer.writerow(['Name', 'Company', 'Alumni', 'Present'])
     for rep in queryset.all():
         writer.writerow(
             [rep.rep, rep.company, rep.is_alumni, rep.is_present])
     return response
Example #17
0
def out_put_symbols_csv():
    """
    Outpur symbols to symbols.csv
    """
    from csv import writer
    with open("symbols.csv", "w") as output_file:
        writer = writer(output_file)
        writer.writerow(["name", "symbol"])
        for name, symbol in SYMBOLS.items():
            writer.writerow([name, symbol])
def save():
    with open(output_path, "a", newline='') as f:
        writer = csv.writer(f)

        position = mylist
        xr, yr = position[0][0], position[0][1]
        xl, yl = position[1][0], position[1][1]
        list_of_elem = [counter, xr, yr, xl, yl]
        writer.writerow(list_of_elem)
        print("saved ", counter)
Example #19
0
def add_details(Subject_ID, Subject_Name, Modules, SLO, Expected_Outcome):
    df = pd.read_csv("Data.csv")
    columns = list(df.head(0))
    ID = int(columns[-1]) + 1
    df[ID] = ""
    df.to_csv("Data.csv", index=False)
    fields = [ID, Subject_ID, Subject_Name, Modules, SLO, Expected_Outcome]
    with open('Subjects.csv', 'a') as f:
        writer = csv.writer(f)
        writer.writerow(fields)
Example #20
0
def windDataLogging(windDirRaw, windDirNow, windDirAvg):
    with open('wind.csv', 'a', newline='') as f:
        writer = csv.writer(f)
        writer.writerow([
            time.time(), windDirRaw,
            round(windDirNow, 2),
            round(windDirAvg, 2)
        ])

    return (True)
Example #21
0
    def __call__(self, value, system):
        fout = StringIO()
        writer = UnicodeWriter(fout, quoting=QUOTE_ALL)

        writer.writerow(value['header'])
        writer.writerows(value['rows'])

        resp = system['request'].response
        resp.content_type = 'text/csv'
        resp.content_disposition = 'attachment;filename="report.csv"'
        return fout.getvalue()
Example #22
0
def csv_message(message, state='error', url=None, code=200):
    keys = ['message', 'state', 'url']
    response = Response(mimetype='text/csv', status=code)
    writer = DictWriter(response.stream, keys)
    writer.writerow(dict(zip(keys, keys)))
    writer.writerow({'message': message.encode('utf-8'),
                     'state': state,
                     'url': url.encode('utf-8') if url else ''})
    if url is not None:
        response.headers['Location'] = url
    return response
Example #23
0
def billScrape(soup, writer, billPage):

    sponsor_text = ''
    for sponsor in soup.find_all("table", class_="standard01"):
        sponsor_text = sponsor.find("a", target="_blank").get_text()
        #print(sponsor_text)

    title_text = ''
    for title in soup.find_all("h1", class_="legDetail"):
        title_text = title.get_text()
        #print(title_text)

    name_text = ''
    for name in soup.find_all("h2", class_="primary"):
        # name_text = name.get_text()
        name_text = name.contents[1]
        print(name_text)

    #for tracker in soup.find_all("li", class_="first selected last"):
    #tracker_text = tracker.get_text()
    #print(tracker_text)

    tracker2_text = ''
    for tracker2 in soup.find_all("h3", class_="currentVersion"):
        tracker2_text = tracker2.find("span").get_text()
        # print(tracker2_text)

    index = billPage.find('?')
    # print(billPage[0:index-1]+'/text?format=txt')
    billTextUrl = billPage[0:index - 1] + '/text?format=txt'

    billTextGet = requests.get(billTextUrl)
    soupBillText = BeautifulSoup(billTextGet.text, 'html.parser')

    # billTextLink = [0]
    # for billTextUrl in soup.find_all("ul", _class="cdg-summary-wrapper-list"):
    #     billTextUrl2 = billTextUrl.find("a", href=True)
    #     billTextLink = billTextUrl2['href']
    #     print(billTextLink)
    # billTextSearch = requests.get(billTextLink)

    # soupBillText = BeautifulSoup(billTextSearch.text, 'html.parser')

    writer.writerow([name_text, sponsor_text, title_text, tracker2_text])

    billText = soupBillText.find('pre', id='billTextContainer')
    if billText is not None:

        billText2 = billText.get_text()

        BillTxt = open(name_text + ".txt", "a")
        BillTxt.write(billText2)
        BillTxt.close()
Example #24
0
 def register_student(self, list_of_elem):
     with open('student_info.csv', 'a+', newline='') as write_obj:
         fieldnames = ['ID', 'Name', 'Amount', 'Amount Remaining']
         writer = csv.DictWriter(write_obj,
                                 fieldnames=fieldnames,
                                 delimiter='|')
         writer.writeheader()
         writer.writerow({
             'ID': list_of_elem[0],
             'Name': list_of_elem[1],
             'Amount': list_of_elem[2],
             'Amount Remaining': list_of_elem[3]
         })
Example #25
0
def csv_message(message, state='error', url=None, code=200):
    keys = ['message', 'state', 'url']
    response = Response(mimetype='text/csv', status=code)
    writer = DictWriter(response.stream, keys)
    writer.writerow(dict(zip(keys, keys)))
    writer.writerow({
        'message': message.encode('utf-8'),
        'state': state,
        'url': url.encode('utf-8') if url else ''
    })
    if url is not None:
        response.headers['Location'] = url
    return response
Example #26
0
 def log_stock_pick_CSV(data):
     fields = [
         'date',
         'stock',
         'Adj Close',
     ]
     fileName = config.STOCK_RESULTS
     try:
         with open(fileName, 'a+', newline='') as csvfile:
             writer = csv.DictWriter(csvfile, fieldnames=fields)
             #                writer.writeheader()
             writer.writerow(data)
     except Exception as e:
         print("Exception in log_stock_pick: " + e)
Example #27
0
def write_rows(writer, rows):
    '''Write a batch of row data to the csv writer'''
    for row in rows:
        try:
            writer.writerow(row)
        except UnicodeEncodeError:  # pragma: no cover
            # Python 2 csv does badly with unicode outside of ASCII
            new_row = []
            for item in row:
                if isinstance(item, text_type):
                    new_row.append(item.encode('utf-8'))
                else:
                    new_row.append(item)
            writer.writerow(new_row)
def button(update, context):
    query = update.callback_query
    if query.data == "1":
        record = ["spam", SMS]
        with open('dataset/bot_dataset.csv', 'a') as f:
            writer = csv.writer(f)
            writer.writerow(record)
    elif query.data == "0":
        record = ["ham", SMS]
        with open('dataset/bot_dataset.csv', 'a') as f:
            writer = csv.writer(f)
            writer.writerow(record)
        print(record)
    query.edit_message_text(text="Thanks for your help! You're helping me to become the best version of myself")
Example #29
0
def add_user_data_to_csv(
    file,
    mix,
    email,
):
    """ used to append test data one wants to keep to a csv file """
    with open(file, 'a', newline='') as csvfile:
        fieldnames = ['DATE', 'MIX', 'EMAIL']
        writer = csv.DictWriter(csvfile,
                                fieldnames=fieldnames,
                                dialect="excel")
        today = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        writer.writerow({'DATE': today, 'MIX': mix, 'EMAIL': email})
        csvfile.close()
Example #30
0
def del_users(first, last):
    global count
    with open('users.csv') as file:
        reader = csv.reader(file)
        rows = list(reader)

    with open('users.csv', 'w') as file:
        writer = csv.writer(file)
        for row in rows:
            if row[0] == first and row[1] == last:
                count += 1
            else:
                writer.writerow(row)
        return f'Users updated: {count}.'
Example #31
0
def main():
    pgtoken = "0"
    # countrylist = []
    # sessionlist = []

    # clean csv if exist as used append function to do one by one export
    clean = []
    with open('venv/Scripts/pagination1.csv', 'w', newline='') as myfile:
        writer = csv.writer(myfile)
        writer.writerows(clean)

    while pgtoken is not None:
        print(pgtoken)
        analytics = initialize_analyticsreporting()
        response = get_report(analytics, pgtoken)
        headerlist, cdict, sdict = print_response(response)
        print(cdict, sdict)

        # for i in range(len(cdict.keys())):
        #     col = list(cdict.values())[i]
        #     print(col)

        # col1=list(cdict.values())[0]
        # col2 = list(cdict.values())[1]
        # col3 = list(cdict.values())[2]

        # accumulate data if export all record to csv
        # countrylist.append(c)
        # sessionlist.append(s)

        #  store_response =(zip(cdict.values(),slist))
        # # # store_response[pgtoken] = newdict
        #  print(store_response)

        if pgtoken == "0":
            with open('venv/Scripts/pagination1.csv', 'a',
                      newline='') as myfile:
                writer = csv.writer(myfile)
                writer.writerow(headerlist)
                writer.writerows(zip(*cdict.values(), *sdict.values()))

        else:
            with open('venv/Scripts/pagination1.csv', 'a',
                      newline='') as myfile:
                writer = csv.writer(myfile)
                writer.writerows(zip(*cdict.values(), *sdict.values()))

        pgtoken = response['reports'][0].get(
            'nextPageToken')  # update the pageToken
Example #32
0
def scrape(url):
    page = requests.get(url)
    tree = html.fromstring(page.content)
    title2 = str(lxml.html.parse(url).find(".//title").text)
    title2 = title2.replace('-' + title2.split("-", 1)[1], '')
    price = tree.xpath("//span[@itemprop='price']//text()")
    i = 0
    for span in tree.cssselect('span'):
        clas = span.get('class')
        rel = span.get('rel')
        if clas == "packaging-des":
            if rel != None:
                if i == 0:
                    weight = rel
                elif i == 1:
                    dim = str(rel)
                i = i + 1

    weight = weight
    height = dim.split("|", 3)[0]
    length = dim.split("|", 3)[1]
    width = dim.split("|", 3)[2]
    # Sometimes aliexpress doesn't list a price
    # This dumps a 0 into price in that case to stop the errors
    if len(price) == 1:
        price = float(str(price[0]))
    elif len(price) == 0:
        price = int(0)
    for inpu in tree.cssselect('input'):
        if inpu.get("id") == "hid-product-id":
            sku = inpu.get('value')
    for meta in tree.cssselect('meta'):
        name = meta.get("name")
        prop = meta.get("property")
        content = meta.get('content')
        if prop == 'og:image':
            image = meta.get('content')
        if name == 'keywords':
            keywords = meta.get('content')
        if name == 'description':
            desc = meta.get('content')
    listvar = (
    [str(title2), str(name), '', '', str(desc), 'publish', '', '', '0', '1', 'open', str(sku), 'no', 'no', 'visible',
     '', 'instock', 'no', 'no', str(price * 2), str(price * 1.5), str(weight), str(length), str(width), str(height),
     'taxable', '', '', '', 'no', '', '', '', '', '', '', '', '', '', str(keywords), str(image), '', 'simple', '', '',
     '', '0', '', '', '', '', '', '', '', ''])
    with open("output.csv", 'ab') as f:
        writer = csv.writer(f)
        writer.writerow(listvar)
Example #33
0
    def csv(self, response, archive, **params):
        response.setHeader('Content-Type', 'application/vns.ms-excel')
        response.setHeader('Content-Disposition', 'attachment; filename=events.csv')
        from csv import writer
        writer = writer(response)

        wroteHeader = False
        for fields, evt in self._query(archive, **params):
            if not wroteHeader:
                writer.writerow(fields)
                wroteHeader = True
            data = []
            for field in fields:
                val = evt.get(field, '')
                data.append(str(val).replace('\n',' ').strip() if val or val is 0 else '')
            writer.writerow(data)
Example #34
0
    def csv(self, response, archive, **params):
        response.setHeader('Content-Type', 'application/vns.ms-excel')
        response.setHeader('Content-Disposition', 'attachment; filename=events.csv')
        from csv import writer
        writer = writer(response)

        wroteHeader = False
        for fields, evt in self._query(archive, **params):
            if not wroteHeader:
                writer.writerow(fields)
                wroteHeader = True
            data = []
            for field in fields:
                val = evt.get(field, '')
                data.append(str(val).replace('\n',' ').strip() if val or val is 0 else '')
            writer.writerow(data)
Example #35
0
    def write_result(self):
        if self.result:
            from csv import writer
            with open(self.output, 'w') as bench_file:
                writer = writer(bench_file)
                writer.writerow(
                    ['Method', 'Dataset', 'Cat', 'Size.B', 'Memory.KB',
                     'Elapsed.s', 'User.s', 'System.s'])
                writer.writerows(self.result)

            # Move obtained results to the result/ directory
            make_path('result')
            for file in os.listdir(current_dir):
                if file.endswith('.csv') or file.endswith('.svg') or \
                        file.endswith('.pos') or file.endswith('.fil'):
                    shutil.copy(file, 'result/')
                    remove_path(file)
Example #36
0
 def csv(response, devices, fields):
     response.setHeader('Content-Type', 'application/vns.ms-excel')
     response.setHeader('Content-Disposition', 'attachment; filename=devices.csv')
     from csv import writer
     writer = writer(response)
     writer.writerow(fields)
     for device in devices:
         data = []
         for field in fields:
             value = device.get(field, '')
             if isinstance(value, list):
                 value = "|".join([v.get('name') for v in value])
             if isinstance(value, dict):
                 value = event(value) if field == 'events' else value.get('name')
             if not (value or value is 0):
                 value = ''
             data.append(str(value).strip())
         writer.writerow(data)
    def stats(self, data):
        """Produce stats for count of lamps and densities. """
        from csv import writer
        
        p = self.partitions.find_or_new(table='streetlights')   
 
        p.database.attach(neighborhoods,'nb')
 
        name = data['name']
        
        # The areas are in square feet. WTF?
        feetperm = 3.28084
        feetperkm = feetperm * 1000
        
        
        with open(self.filesystem.path('extracts',name), 'wb') as f:
            writer = writer(f)
            writer.writerow(['count', 'neighborhood','area-sqft','area-sqm','area-sqkm', 'density-sqkm',])
            
            for row in p.database.query("""
            SELECT count(streetlights_id) as count, objectid, cpname, shape_area
            FROM streetlights, {nb}.communities
            WHERE streetlights.neighborhood_id = {nb}.communities.objectid
            GROUP BY {nb}.communities.objectid
            """):
                
                n = float(row['count'])
                area = float(row['shape_area'])
                
                writer.writerow([ 
                n,  
                row['cpname'].title(),
                area,
                area / (feetperm * feetperm),
                area / (feetperkm * feetperkm),
                n / (area / (feetperkm * feetperkm)) 
                ])
        foundTargetGrammar = True
    if foundTargetGrammar and currGrammar != targetGrammar:
        foundAllTargetSentences = True
    i += 1

# Set up the sample learner and dictionary used to record
# which sentences trigger which parameters
sentenceParameterTriggers = defaultdict(lambda: [])
sampleLearner = Child()
oldGrammar = [0,0,0,0,0,0,1,0,0,0,1,0,1]

# After processing each sentence, the sample learner's
# grammar will be compared to the old one. Differences will
# be noted and the sentence that caused the change along with the
# morphed parameter will be added to the dictionary
for sentence in selectedSentences:
    sentenceStr = sentence.rsplit('\t', 3)[2]
    sampleLearner.consumeSentence(sentence)
    sampleLearner.setParameters()
    for i, parameter in enumerate(oldGrammar):
        if parameter != sampleLearner.grammar[i] and (not 'p{}'.format(i+1) in sentenceParameterTriggers[sentenceStr]):
            sentenceParameterTriggers[sentenceStr].append('p{}'.format(i+1))
    oldGrammar = [0,0,0,0,0,0,1,0,0,0,1,0,1]

# The output file will opened and the corresponding
# sentences and parameters added line by line
with open(outputFile, 'a+') as outFile:
    writer = writer(outFile)
    for key in sentenceParameterTriggers:
        writer.writerow((key, sentenceParameterTriggers[key]))
Example #39
0
                     month,
                     day,
                     date(year,month,day).weekday(),
                     hour,
                     minute,
                     second))
    
writefile = 'texting_database.csv'
with open( writefile, 'w' ) as f:
    writer = writer(f)
    writer.writerow(('ID',
                     'Phone Number',
                     'Name',
                     'Sent',
                     'Year'
                     'Month'
                     'Day',
                     'Day of Week',
                     'Hour',
                     'Minute',
                     'Second'))
    for (index,text) in enumerate(database):
        entry = database[index]
        writer.writerow(entry)
        
#phonebook = {}
#for text in body:
#    #build up the phonebook with numbers and associated texts
#    x = sms.clean_phone(sms.get_phone(text))
#    z = sms.get_name(text)
#    if x not in phonebook:
def PeakFit(separation, array, fileName):
    import numpy as np
    from scipy.optimize import curve_fit
    import matplotlib.pyplot as plt
    from math import floor
    from csv import writer
    from os import path
    from time import strftime, localtime
    
    x = array[:,0]  # 2-theta or radial distance. Refer to Fit2D output (chi plot).
    y = array[:,1] # intensity from Fti2D output (chi plot).
    
    done = False
    while not done:
      n = int(raw_input('Please enter the number of peaks to fit: ')) #ADD RAISE EXCEPTION FOR DATA OUT OF BOUND ???????
      if(len(x) < n * separation):
        print ('Index out of bound. Input a smaller value.')
      else:
         done = True

     
#    sigma = int(raw_input('Please the half maximum width of peaks to fit: '))

    center = []           # placeholders
    height = []
    popt_array = [["center","height","baseline"]]
    
    for i in range(n):
        center.append(x[separation * (i + 1) - i/6]) # Defining starting point for peak fits. 
        height.append(y[separation * (i + 1)- i/6]) # Defining starting point for peak fits.
#    print center # For Debugging. 
#    print height # For Debugging.
    print '%15s %15s %15s'%("mu","height","baseline")
    plt.figure()
    for i in range(n):
        xdata = x[i*separation + int(floor(0.5 * separation)) - i/6 : (i + 2) * separation - int(floor(0.5 * separation))- i/6] # defining the X-range of the peak.
        ydata = y[i*separation + int(floor(0.5 * separation)) - i/6 : (i + 2) * separation - int(floor(0.5 * separation))- i/6] # defining the Y-range of the peak.
                
        par = [center[i], height[i], 50] # Random initialization of Gaussian fit function. 
                        # The 4 parameters correspond to sigma, center, A and C in function definition in line 88.
        popt, pcov = curve_fit(GaussianModel,xdata, ydata, par) # popt is the optimum set of parameters for the Gaussian fit. 
                                                            # pcov is the covariance of the parameters. (not using in our case)
        popt_array.append(popt)                                 # writing popt to list to make a csv file with fit parameters and data. 
    
        print "%15.10f %15.10f %15.10f" % (popt[0],popt[1],popt[2])

        plt.plot(xdata, ydata) # Plotting raw data.
        plt.plot(np.arange(min(xdata),max(xdata),0.001), GaussianModel(np.arange(min(xdata),max(xdata),0.001), *popt)) # Plotting fit data. 
         # Saving peak fit plot and raw peak plot as overlaid png.
    plt.savefig(fileName+'peak'+'.svg')    
    plt.show()

#    plt.close() 

    outFileName = 'fitted_'+ fileName + '_' + strftime("%b%d%H%M",localtime()) +'.csv' 
                                    # 
    with open(outFileName, 'wb') as outfile: 
        
        writer = writer(outfile) # These lines are for writing out a csv file after removing the header from the chiplot. 
        for row in popt_array:
            writer.writerow(row)
        print 'The fit data has been output to ' + outFileName + ' in current directory.'