Exemplo n.º 1
0
    def test_temporary_file(self, tmpdir):
        fake_csv_tmpfile = tmpdir.join('fake.csv')
        with fake_csv_tmpfile.open('w') as csv:
            csv.write('amy,2000-01-01\n')
            csv.write('judy,1980-02-01\n')

        assert calculate_age_sum(fake_csv_tmpfile.strpath) == 52
def join_segment_and_weather(segment_query):
    data_out = os.path.join(os.getcwd(),'joined.csv')

    wban_list = list(set([wban['WBAN'] for wban in list(leaderboard_collection.find(segment_query,{'_id':0,'WBAN':1}))])) #list of wban station ids
    wban_query = {'WBAN':{'$in':wban_list}}
    leaders = export_segment(segment_query)
    weather = export_weather(wban_query)

    concatenated = concatenate_files(leaders,weather,'concat.txt')
    consoleCmds = 'python .\\analyze\\mrjob_join.py'
    print consoleCmds

    with open(concatenated,'r') as concat:
        with open('output.txt','w') as output:
            p = subprocess.Popen(consoleCmds, stdin=concat, stdout=output)
    p.wait() #wait for the command to finish

    with open('joined.csv','w') as csv:
        with open('output.txt','r') as output:
            for line in output:
                try:
                    csv.write(ast.literal_eval(line.split('\t')[1]) + '\n')
                except:
                    pass
    os.remove('output.txt')
    os.remove(concatenated)
    os.remove(leaders)
    os.remove(weather)
    return data_out
Exemplo n.º 3
0
def _batch2(model):
	csv = open('lg_kdz.%s.csv' % (model), 'wb+')
	csv.write('model,region,country,chip_type,prod_type,buyer_name,swversion,live_date,firmware,\n')
	lg = LGMobile()
	for country in lg.ftp_country_info():
		ccode = country.country_code
		print ccode, model
		for sw in lg.tool_mode_country_check(ccode, model).itervalues():
			csv.write(sw.csv() + '\n')
	csv.close()
Exemplo n.º 4
0
    def export(self, path):
        """Exports recipe ingredients to csv

        CSV is in the format "id,qty" with separate lines for each ingredient

        Args:
            path (pathlib.Path): Directory for the CSV file to be written to
        """
        csvpath = path / (self.id + ".csv")
        with csvpath.open("w") as csv:
            for i in self.ingredients:
                csv.write(i.id + "," + str(i.qty) + "\n")
Exemplo n.º 5
0
def saveEntropyCSV(infolder,outfile):
	filelist = os.listdir(infolder)
	if '.DS_Store' in filelist:
			filelist.remove('.DS_Store')
	subprocess.call(['touch',outfile])
	csv = open(outfile, 'r+')
	for csvfile in filelist:
		P_i = makeP_i( infolder + '/' + csvfile )[0]
		S = entropy(P_i)
		F = csvfile.split('_')[0] # e.g. '0.044_0.038.csv'
		k = csvfile.split('_')[1][ :-4] #remove '.csv'
		csv.write( F + ',' + k + ',' + str(S) + '\n')
	csv.close()
Exemplo n.º 6
0
def main(argv):
    if len(argv) == 1:
        msg = 'usage: python xlsx2vtb.py <xlsx_filename>\n'
        sys.stderr.write(msg)
        return 1

    filename = argv[1]
    workbook = WorkBook.fromfile(filename)
    csvs = workbook.csvlist()
    for csv in csvs:
        csv.write()
    vtbs = workbook.vtblist()
    for vtb in vtbs:
        vtb.write()
Exemplo n.º 7
0
def export_csv(modeladmin, request, queryset):
    response = HttpResponse(content_type='text/csv')
    response['Content-Disposition']  = 'attachement; filename=feedback.csv'
    write = csv.write(response, csv.excel)

    response.write(u'\ufeff'.encode('utf8'))
    write.writerow([
        smart_str(u"ID"),
        smart_str(u"TITLE"),
        smart_str(u"DATE_POSTED"),
        smart_str(u"CUSTOMER"),
        smart_str(u"COMPANY"),
        smart_str(u"STATUS"),
        ])
    for obj in queryset:
      write.writerow([
           smart_str(obj.pk),
           smart_str(obj.title),
           smart_str(obj.date_posted),
           smart_str(obj.customer),
           smart_str(obj.company),
           smart_str(obj.status),
           ])
      write.save(response)
      return response
      export_csv.short_description = u"Export CSV"
Exemplo n.º 8
0
 def generateCsvSetOfFiles(cls, top_folder):
     print "\nWALKING THE TREE", top_folder+",", "SEARCHING FOR PICTURES"
     csv_fullpath = DbMethods.temporaryCsvFile()
     csv = open(csv_fullpath, 'w')    
     for dirpath, dirnames, files in os.walk(top_folder):
         for name in files:
             if (name.lower().endswith('bmp')
             or  name.lower().endswith('gif')
             or  name.lower().endswith('jpg')
             or  name.lower().endswith('png')
             or  name.lower().endswith('tiff')):
                 row = dirpath+'`'+name+'\n'
                 csv.write(row)    
     csv.close()
     print "CSV FOLDER/FILE HAS BEEN GENERATED AT", csv.name 
     return csv_fullpath
Exemplo n.º 9
0
def freqWords(string,corpus,number):
    global pub,wordList
    wordList=[]
    stopset = set(stopwords.words('english'))
    words = WordPunctTokenizer().tokenize(string)
    wordsCleaned = [word.lower() for word in words if word.lower() not in stopset and len(word) > 2 ]
    for i in range(len(wordsCleaned)):
        wordList.append((corpus.tf_idf(wordsCleaned[i],string),wordsCleaned[i]))
    wordList = list(set(wordList))
    wordList = sorted(wordList,reverse=True)
    final = [word[1] for word in wordList[:number]]
    csv = open('db\cyttron-keywords.csv','a')
    if len(final) > 1:
        csv.write('"' + ','.join(final[:-1]) + ',' + final[-1] + '";')
    else:
        csv.write('"' + ''.join(final) + '";')
    csv.close()
    return final
Exemplo n.º 10
0
def getData(year, month, day, csv):
	global first
	newline = "<br />"
	url = urlify(year, month, day)
	response = urllib2.urlopen(url)
	line = response.readline()
	# line = response.readline().replace(newline, "")
	line = response.readline().replace(newline, "")
	if (first == 1):
		date = "year,month,day,"
		csv.write(date + line)
		first = 0
	while (line != ""):
		line = response.readline().replace(newline, "")
		if (line == ""):
			break
		date = "%d,%d,%d," % (year, month, day)
		csv.write(date + line)
Exemplo n.º 11
0
def siodoc(boias,dirout):

	'''
	Entra com o endereco de onde baixa o dado da boia
	'''

	data = dt.datetime.strftime(dt.datetime.now(),'%Y%m%d%H')
	site = urllib2.urlopen("http://metocean.fugrogeos.com/marinha/Member/"+boiassiodoc)
	print 'Baixando dado do SIODOC'

	#datefile = '%02d%02d%02d' %(y,m,d)
	filename = "SIODOC_"+data+".csv"
	
	#create .csv file
	csv = open(dirout+'SIODOC/'+filename,"w")	
	csv.write(site.read())
	csv.close()

	return
def findAndCopy(csv, root, indicator, nextSign):
    value = None
    try:
        statisticsNumber = root.getElementsByTagName("statisticsNumber")[0].firstChild.nodeValue
        if int(statisticsNumber) > 1:
            value = 0
            for i in range(int(statisticsNumber)):
                value = value + int(root.getElementsByTagName(indicator)[i].firstChild.nodeValue)
        elif int(statisticsNumber) <= 1:
            value = root.getElementsByTagName(indicator)[0].firstChild.nodeValue
            
    except Exception:
        try:
            value = root.getElementsByTagName(indicator)[0].firstChild.nodeValue
        except Exception:
            value = "no data avalible"

    csv.write(str(value)) 
    print (indicator + ": " + str(value))  
    csv.write(nextSign)
    return
Exemplo n.º 13
0
        def merger():
            f1 = csv.reader(open('aapl_historical_test.csv', 'rb'))
            f2 = csv.reader(open('ibm_historical_test.csv', 'rb'))

            mydict = {}
            for row in f1:
                mydict[row[0]] = row[1:]

            for row in f2:
                mydict[row[0]] = mydict[row[0]].extend(row[1:])

            fout = csv.write(open('merged.csv','w'))
            for k,v in mydict:
                fout.write([k]+v)
Exemplo n.º 14
0
def nGrams(string,corpus,number,clean=True):
    global wordList
    biList=[]
    triList=[]
    words = WordPunctTokenizer().tokenize(string)
    stopset = set(stopwords.words('english'))
    if clean == True:
        words = [word.lower() for word in words]
    if clean == False:
        words = [word.lower() for word in words]
    filter = lambda words: len(words) < 2 or words.isdigit()
    
    bcf = BigramCollocationFinder.from_words(words)
    bcf.apply_word_filter(filter)
    biResult = bcf.nbest(BigramAssocMeasures.likelihood_ratio, number)

    tcf = TrigramCollocationFinder.from_words(words)
    tcf.apply_word_filter(filter)
    triResult = tcf.nbest(TrigramAssocMeasures.likelihood_ratio, number)

    for i in range(len(biResult)):
        if len(biResult) > 0:
            biPrint = " ".join(biResult[i])
            biList.append(biPrint)
        else:
            biList=[]
    csv = open('db\cyttron-keywords.csv','a')            
    if len(biList) > 1:
        csv.write('"' + ','.join(biList[:-1]) + ',' + biList[-1] + '";')
    else:
        csv.write('"' + ''.join(biList) + '";')
    csv.close()
    
    for i in range(len(triResult)):
        if len(triResult) > 0:
            triPrint = " ".join(triResult[i])
            triList.append(triPrint)
        else:
            triList=[]
    csv = open('db\cyttron-keywords.csv','a')
    if len(triList) > 1:
        csv.write('"' + ','.join(triList[:-1]) + ',' + triList[-1] + '"\n')
    else:
        csv.write('"' + ''.join(triList) + '"\n')
    csv.close()
    print biList
    print triList
Exemplo n.º 15
0
 def getcontent(self, filename):
 	contentfile = file('1.csv', 'wb')
 	writer = csv.write(contentfile)
 	writer.writerow(['作者', '内容'])
     for url in geturls:
         req = urllib2.Request(url, heads=self.heads)
         res = urllib2.urlopen(req)
         html = res.read().decode('utf-8')
         soup = BeautifulSoup(html)
         blockcontents = soup.find_all(
             'div', class_="article block untagged mb15")
         for blockcontent in blockcontents:
             auther = blockcontent.find('div', class_="auther")
             content = blockcontent.find('div', class_="content")
             writer.writerow([auther, contern])
     contentfile.close()
Exemplo n.º 16
0
 def run( self ):
     dealer_rule = self.dealer_rule_map[self.dealer_rule]()
     split_rule = self.split_rule_map[self.split_rule]()
     try:
         payout = ast.literal_eval( slef.payout )
         assert len(payout) == 2
     except Exception as e:
         raise Exception('Invalid payout {0}'.format(self.payout))
     
     table = Table( decks=self.decks, limit=self.limit, 
     dealer=dealer_rule, split=split_rule, payout=payout)
     player_rule = self.player_rule_map[self.player_rule]()
     betting_rule = self.betting_rule_map[self.betting_rule]()
     player = Player( play=player_rule, betting=betting_rule, 
     rounds=self.rounds, stake=self.stake)
     simulate = Simulate(table, player, self.samples)
     with open( self.outputfile, 'w', newline='' ) as target:
         wtr = csv.write(target)
         wtr.writerows(simulate)
Exemplo n.º 17
0
    req = urllib.request.Request(url)
    res = urllib.request.urlopen(req)
    html = res.read()
    return html


narou_root_url = 'http://ncode.syosetu.com/'
work_id = 'n9735cv'

url = narou_root_url + work_id
html = url_html(url)
soup = BeautifulSoup(html, 'html.parser')

csv_fname = 'narou_docs/narou.' + work_id + '.csv'
csv = open(csv_fname, 'w')
csv.write('name, since, user_note, text, url, when, access, card_note\n')

pretext_id = 0
who = soup.select('.novel_writername')[0].a.text
access = 'public'
note = 'corpus narou dev oz'
for sub in soup.select('.novel_sublist2'):
    url = 'http://ncode.syosetu.com' + sub.a.get('href')
    date = sub.select('.long_update')[0]\
        .get_text().split('\n')[1].split(' ')
    when = ''.join(date[0].split('/')) + 'T' + ''.join(
        date[1].split(':')) + '00+0900'
    work_fname = 'narou_docs/narou.' + '.'.join(
        url.split('/')[3:5]) + '.doc.txt'
    lines = open(work_fname).readlines()
    for line in lines:
Exemplo n.º 18
0
            items = i.find_all('tbody')
            rows = i.find_all('tr')
            for row in rows:
                cols = row.find_all('td')
                cols = [ele.text.strip() for ele in cols]
                data.append([ele for ele in cols if ele])

        student_name = soup.find('td', attrs={'style': 'padding-left:15px'})
        student_name = student_name.text.strip()

        print("Extracting results for: " + str(area_code) +
              str(college_code).upper() + str(year) + branch.upper() + usn)
        usn = "University Seat Number: " + "," + str(area_code) + str(
            college_code).upper() + str(year) + branch.upper() + usn + "\n"
        student = "Student name: " + "," + student_name[2:] + "\n"
        csv.write(usn)
        csv.write(student)

        thead = table.find('thead')
        trow = thead.find('tr')
        h = thead.find_all('th', attrs={'style': 'text-align:center;'})
        hvar = []
        for i in h:
            i = i.text.strip()
            hvar.append(i)

        hh = hvar[0] + "," + hvar[1] + "," + hvar[2] + "," + hvar[
            3] + "," + hvar[4] + "," + hvar[
                5] + "," + "Grades" + "," + "Grade Point" + "\n"
        csv.write(hh)
Exemplo n.º 19
0
def CAM_Writer(name, path, cam_times, gaps, indexes, base, header, Attitudes,
               Altitudes, Camera_Labels, if_reative, base_alt,
               if_gimbal):  #Esta function va a escribir el archivo
    download_dir = os.path.join(
        path, name + '.log'
    )  #path+'\\'+name+'.log' #where you want the file to be downloaded to
    csv = open(download_dir, "w")
    location_constructor = [
    ]  # Lo que va a escribir ese jugoso location csv file que Agisoft le encanta, con atitudes y todo (delay incluido)
    # Para el log de mission planner, se necesita un header
    for a_row in header:
        for every_string in a_row:
            if every_string != a_row[-1]:
                csv.write(every_string + ',')
            else:
                csv.write(every_string)
        csv.write('\n')
    # Aca comienza lo bacano
    i = 0
    for an_index in indexes:
        gap = m.fabs(cam_times[i] - int(base[an_index][1]))
        if m.fabs(gap - gaps[i]) < 0.001:
            if base[an_index][0] == 'GPS' or base[an_index][0] == ' GPS':
                base[an_index][0] = 'CAM'
                del base[an_index][2]
                del base[an_index][4]
                del base[an_index][4]
                base[an_index][7] = base[an_index][6]
                base[an_index].append(
                    '55'
                )  # Adiciona otra dimension a la fila GPS, debido a que esta se queda corta una
            if if_reative == 1:  # Si se tiene la altura del sitio de depsegue, esta se usa + la del barometro para generar una altura muy buena
                use_altitude = Altitudes[i] + base_alt
                base[an_index][6] = str(
                    use_altitude
                )  # Escribo al CAM a TODAS las altitudes la altitud calculada
                base[an_index][7] = str(use_altitude)
                base[an_index][8] = str(use_altitude)
            else:  #Si no, pues se usa la altitud GPS (la cual estaba en la columna 7 de la fila GPS
                base[an_index][7] = base[an_index][6]
                base[an_index][8] = base[an_index][6]
            if if_gimbal == 1:
                location_constructor.append([
                    Camera_Labels[i], base[an_index][4], base[an_index][5],
                    base[an_index][6],
                    str(Attitudes[i][2]), '0.0', '0'
                ])
            else:
                location_constructor.append([
                    Camera_Labels[i], base[an_index][4], base[an_index][5],
                    base[an_index][6],
                    str(Attitudes[i][2]),
                    str(Attitudes[i][1]),
                    str(Attitudes[i][0])
                ])
            if if_gimbal == 1:
                base[an_index][9] = '0'
                base[an_index][10] = '0.0'
                base[an_index][11] = str(Attitudes[i][2])
            else:
                base[an_index][9] = str(Attitudes[i][0])
                base[an_index][10] = str(Attitudes[i][1])
                base[an_index][11] = str(Attitudes[i][2])
            for every_string in base[an_index]:
                if every_string != base[an_index][-1]:
                    csv.write(every_string + ',')
                else:
                    csv.write(every_string)
            csv.write('\n')
        else:
            print('Error on index' + str(an_index))
        i = i + 1
    download_dir = os.path.join(
        path, name + '_location.csv'
    )  #path+'\\'+name+'.log' #where you want the file to be downloaded to
    csv = open(download_dir, "w")
    for a_line in location_constructor:
        for every_string in a_line:
            if every_string != a_line[-1]:
                csv.write(every_string + ',')
            else:
                csv.write(every_string)
        csv.write('\n')
Exemplo n.º 20
0
import json
import csv


#Read a json file 
with open('employee.json', 'r') as f:
    datastore = json.load(f)
    print (datastore)

empployee_data = datastore['employee_details'] 
#open a file for writing csv
write_file = open('parm.csv','w')

# create the csv write object
csvconverter = csv.write(write_file)

count = 0
for emp in empployee_data:
    if count == 0:
        header = emp.keys()
        csvwriter.writerow(header)
        count += 1
    csvconverter.writerow(emp.values())

empployee_data.close()
Exemplo n.º 21
0
def CSV (liste) :
  nom_fichier = 'donnees.csv'
  csv = open(nom_fichier, "w")
  csv.write('"mot";'+'"effectif";'+'"rang";'+'"rang*effectif";'+'"longueur";'+"\n")
  counteur = 1
  for tupple in liste :
    csv.write('"'+tupple[0].encode("utf-8")+'"'+";")
    csv.write('"'+str(tupple[1]).encode("utf-8")+'"'+";")
    csv.write('"'+str(counteur)+'"'+";")
    csv.write('"'+str(counteur*tupple[1])+'"'+";")
    csv.write('"'+str(len(tupple[0]))+'"'+";"+"\n")
    counteur+=1
Exemplo n.º 22
0
print " Pierre Beland, 06-2013"
print " Statistics of history of contribution by user and team"
print "date range and bbox as input"
print "Objects (ie. nodes, ways, relations) created, modified and deleted"
print "Original script (Seb's stats script v0.4 ) written by Sebastien Pierrel produced only statistics for objects created"
print "========================================================================="
print "Input variables "
print "list of users by team : vectors users [1] to [6]"
print "date_from=" + str(date_from)
print "date_to=" + str(date_to)
print "bbox : min_lon=" + str(min_lon) + ", max_lon=" + str(max_lon) + ", min_lat=" + str(min_lat) + ", max_lat=" + str(max_lat)

print "Checking changesets for "
osmApi = OsmApi.OsmApi(app = "api.openstreetmap.fr",debug=True)
csv = open(nom_csv, 'wb')
csv.write("ekip, user, changeset, node_c,way_c,relation_c, node_m,way_m,relation_m, node_d,way_d,relation_d \n")
csv.flush()
nom_csv_team=nom_csv+'_team'
csv_team = open(nom_csv_team, 'wb')
csv_team.write("ekip, user, changeset, node_c,way_c,relation_c, node_m,way_m,relation_m, node_d,way_d,relation_d \n")
csv_team.flush()
print "trainee            number of changesets"
for ekip in range(1,8):
    stats_team= {"changeset":0,"node_c":0, "way_c":0, "relation_c":0,"node_m":0, "way_m":0, "relation_m":0,"node_d":0, "way_d":0, "relation_d":0}
    print "\n ekip " + str(ekip)
    for user in users[ekip]:
        stats = {"changeset":0,"node_c":0, "way_c":0, "relation_c":0,"node_m":0, "way_m":0, "relation_m":0,"node_d":0, "way_d":0, "relation_d":0}
        changesets = getChangesets(username=user)
        nb_changesets=len(changesets)
        # print string.rjust(`x`, 2), string.rjust(`x*x`, 3),
        #print str(user) +"\t\t" + str(nb_changesets)
Exemplo n.º 23
0
if verbose == True:
    print("Length of corrected DATA dictionary=", len(DATA))
    print("Length of corrected INT dictionary=", len(INT))
    print("Corrected time array:\n", corrected_time)

# <codecell>
### Saves the Time Profile Data to .csv in the run directory ###
if savedata == True:  # When saving is active:
    save = Base + Dye + "\\" + Concentration + "\\" + Run + "\\" + "timeprofiledata.csv"  # Path and name of file
    csv = open(save, "w")  # Opens the file to write
    for each, value in enumerate(INT):  # For the length of INT:
        x = corrected_time[each]  # First column is times (x_axis)
        y = INT[each] / max(
            INT)  # Second column is relative integral values (y_axis)
        row = str(x) + "," + str(y) + "\n"  # Sets up each row with delimiter
        csv.write(row)  # Writes each row to file
    if verbose == True:
        print("File saved as:", save)
    csv.close()

# <codecell>
### Plots spectroscope data ###
peak = max(datacut[Cut]["Intensity"])  # Determines the maxima of the spectrum

for i in range(len(DATA)):  # For each file:
    #for i in np.arange(0,9): # For a small test sample:
    textstr = "{}\nConcentration={}\nIntegration Time={}\nLaser Power={}\nFilter={} nm\n{}".format(
        Long_Dye, Long_Concentration, Integration_Time, Laser_Power, Filter,
        clocks[i])

    yfrac = 1 - ((peak - DATA[i]["Intensity"]) / peak)
    except ValueError:
        print('您輸入了非數值之字元,請重新輸入數字代碼!')

end_date = dt.datetime.today().date()
start_date = end_date - dt.timedelta(days=dt_num - 1)

#####################################
## 瀏覽器開啟、爬蟲開始
print('即將取得近 %d 天的貼文,欲取得的粉專共有 %d 個' % (dt_num, len(page_list)))
print('\n>> 請注意,程式執行完畢後會自動關閉 Chrome 瀏覽器,請勿手動關閉以免程式錯誤。\n若出現防火牆訊息,可點選關閉或接受。')
input('---請按下 Enter 鍵以開始爬蟲---')

csv = open('Facebook 粉專爬文_%s.csv' % end_date.strftime('%m%d'),
           'w',
           encoding='utf8')
csv.write('粉專名稱,編號,日期時間,內文,文章連結,按讚數,留言+分享數,\n')

print('>> 正在開啟瀏覽器...')
driver = webdriver.Chrome('./chromedriver.exe')
print('>> 開啟網頁中...')
driver.get('https://www.facebook.com')
print('>> 登入中...')
driver.find_element_by_id('email').send_keys(username)
driver.find_element_by_id('pass').send_keys(password)
driver.find_element_by_id('loginbutton').click()

brand = ''
brand_index = 0
for index in page_list:
    brand = index.split('/')[3]
    print('正在開啟網頁...')
Exemplo n.º 25
0
def video():
    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-o",
                    "--output",
                    type=str,
                    default="barcodes.csv",
                    help="path to output CSV file containing barcodes")
    #    ap.add_argument("-o1", "--output2", type=str, default=files_name,
    #            help="path to output CSV file containing barcodes")
    args = vars(ap.parse_args())
    # initialize time and date and make filename friendly
    time_header = str(datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S'))

    # initialize the video stream and allow the camera sensor to warm up
    print("[ALERT] starting video stream...")
    print("Press 'Q' to exit")
    vs = VideoStream(src=0).start()
    # this is for a mobile solution
    #vs = VideoStream(usePiCamera=True).start()
    time.sleep(5.0)

    # open the output CSV file for writing and initialize the set of
    # barcodes found thus far
    csv = open(args["output"], "w")

    # time track variables. These are used to keep track of QR codes as they enter the screen
    found = []
    found_time = []
    found_status = []
    ctxAuth = AuthenticationContext(url=settings['url'])
    # loop over the frames from the video stream
    while True:
        # grab the frame from the threaded video stream and resize it to
        # have a maximum width of 400 pixels
        frame = vs.read()
        frame = imutils.resize(frame, width=400)

        # find the barcodes in the frame and decode each of the barcodes
        barcodes = pyzbar.decode(frame)
        timestr = strftime("%m/%d/%Y %H:%M")
        # loop over the detected barcodes
        for barcode in barcodes:
            # extract the bounding box location of the barcode and draw
            # the bounding box surrounding the barcode on the image
            (x, y, w, h) = barcode.rect
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)

            # the barcode data is a bytes object so if we want to draw it
            # on our output image we need to convert it to a string first
            barcodeData = barcode.data.decode("utf-8")
            barcodeType = barcode.type

            # draw the barcode data and barcode type on the image
            text = "{} ({})".format(barcodeData, barcodeType)
            cv2.putText(frame, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (0, 0, 255), 2)

            # if the barcode text is currently not in our CSV file, write
            # the timestamp + barcode to disk and update the set
            # if barcode data has never been seen, check the user in and record id, date, and time information
            if barcodeData not in found:
                csv.write("{},{},{},{}\n".format(system_id,
                                                 datetime.datetime.now(),
                                                 barcodeData, "IN"))
                csv.flush()
                contentstr = "{},{},{},{}\n".format(system_id, timestr,
                                                    barcodeData, "IN")

                create_list_item(ctx, contentstr)
                fname = "QRT" + "-" + system_id + "_" + time_header + ".csv"
                upload_file(ctx, contentstr, fname, bkcsvfolder)

                found.append(barcodeData)
                found_time.append(datetime.datetime.now())
                found_status.append("IN")
                sys.stdout.write('\a')
                sys.stdout.flush()
                print(barcodeData + " checking IN at " +
                      str(datetime.datetime.now()) + " at location: " +
                      system_id)

            # if barcode information is found...

            elif barcodeData in found:
                time_check = datetime.datetime.now() - found_time[found.index(
                    barcodeData)]
                status_check = found_status[found.index(barcodeData)]

                # if time exceeds wait period and user is checked in then check them out

                if time_check > t_value and status_check == "IN":
                    index_loc = found.index(barcodeData)
                    found_status[index_loc] = "OUT"
                    found_time[index_loc] = datetime.datetime.now()
                    csv.write("{},{},{},{},{}\n".format(
                        system_id, datetime.datetime.now(), barcodeData, "OUT",
                        time_check))
                    csv.flush()
                    contentstr = "{},{},{},{},{}\n".format(
                        system_id, timestr, barcodeData, "OUT", time_check)

                    create_list_item(ctx, contentstr)
                    fname = "QRT" + "-" + system_id + "_" + time_header + ".csv"
                    upload_file(ctx, contentstr, fname, bkcsvfolder)

                    sys.stdout.write('\a')
                    sys.stdout.flush()
                    print(barcodeData + " checking OUT at " +
                          str(datetime.datetime.now()) + " at location: " +
                          system_id + " for duration of " + str(time_check))
                # if found and check-in time is less than the specified wait time then wait
                elif time_check < t_value and status_check == "OUT":
                    pass
                # if found and time check exceeds specified wait time and user is checked out, delete ID and affiliated data from the list. This resets everything for said user and allows the user to check back in at a later time.
                elif time_check > t_value and status_check == "OUT":
                    del found_status[index_loc]
                    del found_time[index_loc]
                    del found[index_loc]
            else:
                print("Something happened... error")

        # show the output frame
        cv2.imshow("QR Toolbox", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # close the output CSV file do a bit of cleanup
    print("[ALERT] cleaning up... \n")
    csv.close()
    cv2.destroyAllWindows()
    vs.stop()
Exemplo n.º 26
0
with open('adr_tst.csv') as cs:
    rdr = csv.DictReader(cs)
    for row in rdr:
        st = row['street']
        hs = row['house']
        sear = {
            'macroRegionId': 107000000000,
            'regionId': 107401000000,
            'street': st,
            'house': hs
        }
        r = requests.get('http://rosreestr.ru/api/online/address/fir_objects',
                         params=sear)
        output = open("result.txt", "a")
        output.write(r.text)
        output.close()
        rw = rw + 1
        print('Seek for ' + st, hs)
        print(r.text)
        js_pars = json.loads(r.text)
        rjs = js_pars['objectId']
        js_out = open('js_out.csv', 'w')
        csvwriter = csv.write(js_out)
        for jsd in rjs:
            if count == 0:
                header = jsd.keys()
                csvwriter.writerow(header)
                count += 1
            csvwriter.writerow(jsd.values())
        js_out.close()
print(rw)
Exemplo n.º 27
0
def write_vendor_usage(csv, filename, tryBool):
    if (tryBool):
        csv.write("%s,%s,%s,%s,%s,%s,%s,%s\n" %
                  (usage__buildingCode, usage__issuerAbbriviation,
                   usage__accountNumber, usage__charge, usage__meterNumber,
                   usage__date, usage__usageAmount, usage__usageType))
Exemplo n.º 28
0
with open('test5.csv') as csvfile:
    readC = csv.reader(csvfile, delimiter=',')
    totalsum = 0
    for avgclc in readC:
        totalsum = totalsum + int(avgclc[0])
    totalavg = totalsum / 60000
with open('test5.csv') as csvfile:
    readCSV = csv.reader(csvfile, delimiter=',')
    j = 0
    i = 0
    sum = 0
    avg = 0
    for row in readCSV:
        if j < 250:
            sum = sum + int(row[0])
            j = j + 1
        elif j == 250:
            j = 0
            avg = sum / 250
            if (avg > totalavg):
                print("Yes")
                binary.append("Yes")
            else:
                print("No")
                binary.append("No")
            sum = 0
        i = i + 1
csv = open("output5.csv", "w")
for bit in binary:
    csv.write(bit + "\n")
Exemplo n.º 29
0
com_li=[]
#text_file=open("company_names.txt","w")
#text_file.write("start:\n")
for each in com_tags:
    names=cop_for.match(each)
    if names!=None:
        #print str(names.group(1))
        com_li.append(str(names.group(1)))
print com_li


today=datetime.date.today()
starting=today-datetime.timedelta(days=30)

sdate=str(int(starting.strftime('%d')))
smonth=str(int(starting.strftime('%m'))-1)
syear=str(int(starting.strftime('%Y')))
    
edate=str(int(today.strftime('%d')))
emonth=str(int(today.strftime('%m'))-1)
eyear=str(int(today.strftime('%Y')))

date_url="&a="+smonth+"&b="+sdate+"&c="+syear+"&d="+emonth+"&e="+edate+"&f="+eyear+"&g=d&ignore=.csv"
print date_url
for each in com_li:
    main_url=company_url+"s="+each+date_url
    u = urllib2.urlopen(main_url)
    with open('Companies\\'+each+'.csv',"w") as csv:
        csv.write(u.read())
        csv.close()
Exemplo n.º 30
0
osmApi = OsmApi.OsmApi()
csv = open("stats.csv", "wb")
for user in users:
    stats = {"node": 0, "way": 0, "relation": 0}
    # print "Checking changesets for " + str(user)
    str("Checking changesets for " + str(user))
    changesets = getChangesets(username=user)
    for id in changesets:
        csstat = getChangesetStats(id)
        stats["node"] += csstat["node"]
        stats["way"] += csstat["way"]
        stats["relation"] += csstat["relation"]
    #        stats = updateStat(stats, getChangesetStats(id))
    #    print user + ", " + str(len(changesets)) + ", " + str(stats["node"]) + ", " + str(stats["way"]) + ", " + str(stats["relation"])
    csv.write(
        str(user)
        + ", "
        + str(len(changesets))
        + ", "
        + str(stats["node"])
        + ", "
        + str(stats["way"])
        + ", "
        + str(stats["relation"])
        + "\n"
    )
    csv.flush()
csv.close()
# print "Done."
"Done."
Exemplo n.º 31
0
def create_file():
    try:

        accesslog = open('/opt/wow/WOWHoneypot/log/access_log')
        lines = accesslog.readlines()
        accesslog.close

        logcount = 0
        rawdata = "■アクセスlogのrawdataは以下となってます。\n\n"
        for line in lines:
            if line.find(yestarday.strftime("%Y-%m-%d")) >= 0 and line.find(
                    ExclusionIP) < 0:
                logcount += 1
                rawdata += "-=-=" + str(logcount) + "件目のlog=-=-\n\n"
                rawdata += line[:-1]
                bunkatsu = line.rsplit(" ", 1)
                decdata = (base64.decodestring(
                    bunkatsu[-1].encode("ascii")).decode("utf8"))
                rawdata += ("\n\n" + str(decdata) + "\n\n")

        ac_log = str()
        for line in lines:
            if line.find(yestarday.strftime("%Y-%m-%d")) >= 0 and line.find(
                    ExclusionIP) < 0:
                bunkatsu2 = line.rsplit(" ", 1)
                decdata2 = (base64.decodestring(
                    bunkatsu2[-1].encode("ascii")).decode("utf8"))
                ac_log += line.rsplit(" ", 1)[0] + decdata2

        all_iplist = []
        for line in lines:
            if line.find(yestarday.strftime("%Y-%m-%d")) >= 0 and line.find(
                    ExclusionIP) < 0:
                all_ips = re.search(
                    "(([1-9]?[0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}(([1-9]?[0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\s)",
                    line)
                all_iplist.append(all_ips.group())
                unique_iplist = list(dict.fromkeys(all_iplist))

        if os.path.exists("/opt/wow/WOWHoneypot/log/hunting.log"):
            with open('/opt/wow/WOWHoneypot/log/hunting.log',
                      "r") as huntinglog:
                lines2 = huntinglog.readlines()
                huntinglog.close

            huntingdata = "■huntinglogのrawdataは以下となってます。\n\n"
            for line in lines2:
                if line.find(yestarday.strftime(
                        "%Y-%m-%d")) >= 0 and line.find(ExclusionIP) < 0:
                    huntingdata += line[:-1]
                    huntingdata += "\n\n"
        else:
            huntingdata = ("■hunting.logは生成されていません。\n\n")

        body = "■" + yestarday.strftime("%Y-%m-%d") + "のアクセス数は" + str(logcount) + "件でした。\n\n"+ "■送信元IPアドレスの数は\n"\
        + str(len(unique_iplist)) + "件です。\n\n"

        if not os.path.exists("./log/"):
            os.mkdir("./log/")

        csv = open("./log/access_" + yestarday.strftime("%Y-%m-%d") + ".log",
                   "w")
        csv.write(ac_log)
        csv.close

        f = open("./log/honeypot_" + yestarday.strftime("%Y-%m-%d") + ".log",
                 "w")
        f.write(huntingdata + rawdata)
        f.close

        return body

    except Exception as e:
        body = str(e)
        print = str(e)
        return body
Exemplo n.º 32
0
for line in fileinput.input(file_name, inplace=False):
	line = re.sub('(\\|)+(\d)+(\\|)+.+', "", line.rstrip())
	line = re.sub('\\||{|}|fb|style=\".*\"', "", line.rstrip())
	line = re.sub('\\[|\\]', "", line.rstrip())
	line = re.sub('FIFA World Cup(\d){4}(#\d\\*)*', "", line.rstrip())
	line = re.sub('#\d\\^', "", line.rstrip())
	line = re.sub('(^style=.+)|-|<sup>...</sup>|\\(|\\)', "", line.rstrip())
	line = re.sub(' , ', " ", line.rstrip())
	lines.append(line)


num_lines = len(lines)
i = 0

outputFile = open("output.csv", 'wb')
wr = csv.write(outputFile);

while(not re.match('[A-Z]{3}', lines[i])):
	i += 1

csv = []
csv.append(["country", "year", "placing"])

while(i < num_lines and re.match('[A-Z]{3}', lines[i])):
	country = lines[i].rstrip()
	x = [1,2,3,4]
	for placing in x:
		l2 = lines[i+placing]
		if(not re.match('align=centersort dash', l2)):
			for year in l2.split(" "):
				if len(year) > 1:
Exemplo n.º 33
0
try:
    r = 1
    reader = csv.reader(f)
    for row in reader:
        if r == 1:
            header = row
        if r == 2:
            subheader = row
        r = r + 1
        if (r > 2):
            break

    # print header

    csv = open(TransposeFile, "w")
    csv.write('id, header, subheader, value\n')

    for row in reader:
        r = 0
        for column in row:
            if (r > 1):
                transrow = id + ',' + header[r] + ',' + subheader[r] + ',' + column + '\n'
                csv.write(transrow)
                print('%s,%s,%s,%s' % (id, header[r], subheader[r], column))
            elif r == 0:
                id = column
            r = r + 1

finally:
    f.close()
Exemplo n.º 34
0
#              = [ (-63.3, 'manual_0.pdb), (...)]
all_cst_scores = [(float(row[0]),row[2]) for row in cr]
all_cst_scores.sort()
lowest_energy_filename = all_cst_scores[0][1]

rmsd_ref_pose = Pose()
pose_from_pdb( rmsd_ref_pose, os.path.basename(lowest_energy_filename) ) 

# why does cr empty itself upon first call?
cr = csv.reader(open('energies.sc','rb'))
for row in cr:

    cen_score_with_cst      = row[0]
    cen_score_without_cst   = row[1]
    filename                = row[2]

    this_pdb = os.path.join( this_directory, filename )

    pose = Pose()
    pose_from_pdb( pose, os.path.basename(filename) )

    # RMSD
    rmsd = CA_rmsd( rmsd_ref_pose, pose )

    with open('rmsds_energies.sc', 'a') as csv:
        linetowrite = str(rmsd)+','+cen_score_with_cst+','+cen_score_without_cst+','+filename
        csv.write( linetowrite + '\n' )



Exemplo n.º 35
0
import csv


def xor(val1, val2):
    if (len(val1) != len(val2)):
        print("Failure to xor due to unequal lengths in input.")
        sys.exit()
    xored = []
    for i in range(len(val1)):
        bit1 = int(val1[i])
        bit2 = int(val2[i])
        xorBit = int(bool(bit1) ^ bool(bit2))
        xored.append(xorBit)
    return ''.join(map(str, xored))


with open('output1.csv', 'r') as csv_file:
    csv_reader = csv.reader(csv_file)
    for row in csv_reader:
        count = 0
        row = str(row).split('\\t')
        r1 = row[0].replace("['", "")
        r2 = row[2].replace("']", "")
        xored = xor(r1, r2)
        for c in xored:
            if (c == '1'):
                count += 1
        with open('outputxor.csv', 'a') as csv:
            csv.write(r1 + '\t' + r2 + '\t' + xored + '\t' + str(count) + '\n')
cnt2 = 0
with open('/home/pfb16181/NetBeansProjects/birch/data/datasets/clef171819.csv',
          'r') as read_obj:
    csv_reader = reader(read_obj)
    for idx, row in enumerate(csv_reader):
        try:
            line = row[0]
            docid = line.split('\t')[-3].split('_')[0]
            qid = line.split('\t')[-2]
            # if docid in unique_flat_LIST_OF_LISTs_OF_PMIDS_FOR_EACH_TOPIC and qid in CLEF_TOPIC_LIST:
            if qid in CLEF_TOPIC_LIST:
                modified_line = row[0]
                temp = modified_line.split('\t')
                temp[-1] = str(cnt2)
                list_lines_saved_for_1718TestCLEF.append(temp)
                cnt2 += 1
        except Exception:
            cnt += 1

print(
    'created list_lines_saved_for_1718TestCLEF.. with {} exceptions and size = {}. length of csv reader {}'
    .format(cnt, len(list_lines_saved_for_1718TestCLEF), idx))

# write results to output.
sep = "\t"
with open('/home/pfb16181/NetBeansProjects/birch/data/datasets/clef1718.csv',
          'w') as csv:
    for row in list_lines_saved_for_1718TestCLEF:
        csv.write(sep.join(row))
        csv.write("\n")
Exemplo n.º 37
0
def write_training_test_results(df_time, methods):
    download_dir = "/Users/raghav/Documents/Uni/oc-nn/trainTest_Time/usps_trainTest.csv"  # where you want the file to be downloaded to
    print "Writing file to ", download_dir
    csv = open(download_dir, "a")
    for method in methods:
        if (method == "OC-NN-Linear"):
            row = method + "," + str(df_time["tf_OneClass_NN-Linear-Train"]) + "," + str(
                df_time["tf_OneClass_NN-Linear-Test"]) + "\n"
            csv.write(row)
        if (method == "OC-NN-Sigmoid"):
            row = method + "," + str(df_time["tf_OneClass_NN-Sigmoid-Train"]) + "," + str(
                df_time["tf_OneClass_NN-Sigmoid-Test"]) + "\n"
            csv.write(row)

        if (method == "CAE-OCSVM-Linear"):
            row = method + "," + str(df_time["cae_ocsvm-linear-Train"]) + "," + str(
                df_time["cae_ocsvm-linear-Test"]) + "\n"
            csv.write(row)

        if (method == "CAE-OCSVM-RBF"):
            row = method + "," + str(df_time["cae_ocsvm-rbf-Train"]) + "," + str(df_time["cae_ocsvm-rbf-Test"]) + "\n"
            csv.write(row)

        if (method == "AE2-SVDD-Linear"):
            row = method + "," + str(df_time["ae_svdd-linear-Train"]) + "," + str(df_time["ae_svdd-linear-Test"]) + "\n"
            csv.write(row)

        if (method == "AE2-SVDD-RBF"):
            row = method + "," + str(df_time["ae_svdd-rbf-Train"]) + "," + str(df_time["ae_svdd-rbf-Test"]) + "\n"
            csv.write(row)

        if (method == "OCSVM-Linear"):
            row = method + "," + str(df_time["sklearn-OCSVM-Linear-Train"]) + "," + str(
                df_time["sklearn-OCSVM-Linear-Test"]) + "\n"
            csv.write(row)

        if (method == "OCSVM-RBF"):
            row = method + "," + str(df_time["sklearn-OCSVM-RBF-Train"]) + "," + str(
                df_time["sklearn-OCSVM-RBF-Test"]) + "\n"
            csv.write(row)

        if (method == "RPCA_OCSVM"):
            row = method + "," + str(df_time["rpca_ocsvm-Train"]) + "," + str(df_time["rpca_ocsvm-Test"]) + "\n"
            csv.write(row)

        if (method == "Isolation_Forest"):
            row = method + "," + str(df_time["isolation-forest-Train"]) + "," + str(
                df_time["isolation-forest-Test"]) + "\n"
            csv.write(row)

    return
Exemplo n.º 38
0
def write_pseg_from_lines(invoice, csv, buildingCode, invoiceNumber, date,
                          invAmount, accountNumber, issuerCode, filename,
                          failedInvoices):

    successfullyWroteCsv = False

    electricAndOthersFlag = False

    # Go through the Billing Summary table and pick out every charge in a less regexy way
    dstSum = 0  #count if these lines add up if not, just write  a line that that describes this
    if (hasattr(invoice, 'lines')):
        for line in invoice.lines:
            #print(line['charge_description'], " " ,line['charge_amount'])
            if (('charges' in line['charge_description']) |
                ('Charges' in line['charge_description'])):
                # print(line)
                if ('Electric' in line['charge_description']):
                    dstAmount = line['charge_amount'].replace('$', '')
                    if (line['charge_description'].endswith('-')):
                        dstAmount = '-' + dstAmount

                    dstSum += float(dstAmount.replace(',', ''))
                elif (('gas' in line['charge_description']) |
                      ('Gas' in line['charge_description'])):
                    dstAmount = line['charge_amount'].replace('$', '')
                    if (line['charge_description'].endswith('-')):
                        dstAmount = '-' + dstAmount

                    dstSum += float(dstAmount.replace(',', ''))
                elif (('unmetered' in line['charge_description']) |
                      ('Unmetered' in line['charge_description'])):
                    dstAmount = line['charge_amount'].replace('$', '')
                    if (line['charge_description'].endswith('-')):
                        dstAmount = '-' + dstAmount

                    dstSum += float(dstAmount.replace(',', ''))
                elif ('month' in line['charge_description']):
                    print('expected total')
                else:
                    dstAmount = line['charge_amount'].replace('$', '')
                    if (line['charge_description'].endswith('-')):
                        dstAmount = '-' + dstAmount

                    dstSum += float(dstAmount.replace(',', ''))
                    if (invAmount == invoice.amount_total_electric +
                            round(float(dstAmount.replace(',', '')), 2)):
                        electricAndOthersFlag = True
                    # print("Electric adds up")

        if ((dstSum != invAmount) & (round(dstSum, 2) != invAmount)):
            ## print("Printing in sum doesn't add up in Line by line for loop")
            failedInvoices.append(filename)
        else:
            csv.write("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n" %
                      ('INVH', invoiceNumber, date, round(
                          invAmount, 2), accountNumber, "U", issuerCode, date,
                       date, "1", "YES", filename))
            dstSum = 0

            for line in invoice.lines:
                #print(line['charge_description'], " " ,line['charge_amount'])
                if (('charges' in line['charge_description']) |
                    ('Charges' in line['charge_description'])):
                    # print(line['charge_amount'].replace('$',''))
                    if ('Electric' in line['charge_description']):
                        dstAmount = line['charge_amount'].replace('$', '')
                        if (line['charge_description'].endswith('-')):
                            dstAmount = '-' + dstAmount
                        dstSum += float(dstAmount.replace(',', ''))
                        pseg_error_dst(invoice, csv, buildingCode, '5500-5000',
                                       date, dstAmount)
                    elif (('gas' in line['charge_description']) |
                          ('Gas' in line['charge_description'])):
                        dstAmount = line['charge_amount'].replace('$', '')
                        if (line['charge_description'].endswith('-')):
                            dstAmount = '-' + dstAmount
                        dstSum += float(dstAmount.replace(',', ''))
                        pseg_error_dst(invoice, csv, buildingCode, '5500-6000',
                                       date, dstAmount)
                    elif (('unmetered' in line['charge_description']) |
                          ('Unmetered' in line['charge_description'])):
                        dstAmount = line['charge_amount'].replace('$', '')
                        if (line['charge_description'].endswith('-')):
                            dstAmount = '-' + dstAmount
                        dstSum += float(dstAmount.replace(',', ''))
                        pseg_error_dst(invoice, csv, buildingCode, '5500-6000',
                                       date, dstAmount)
                    elif ('month' in line['charge_description']):
                        print('expected total')
                    else:
                        dstAmount = line['charge_amount'].replace('$', '')
                        if (line['charge_description'].endswith('-')):
                            dstAmount = '-' + dstAmount
                        dstSum += float(dstAmount.replace(',', ''))
                        print("FOUNDD THE NEGATIVE CHARGE " + dstAmount)
                        pseg_error_dst(invoice, csv, buildingCode, '5500-5000',
                                       date, dstAmount)

            add_to_total_expense(round(float(str(dstSum).replace(',', '')), 2))
            rename(invoice, filename, accountNumber, buildingCode,
                   invoice.issuer, invAmount, date)
            successfullyWroteCsv = True

    else:
        failedInvoices.append(filename)

    return successfullyWroteCsv
Exemplo n.º 39
0
def GetSnap():

	global total_snap_size;
	global total_old_snap_size;
	csv = open(snap_fileout, "w");
	columnTitleRow = "SnapshotId, StartTime, Base VolumeId, VolumeSize(GB), Tags\n";
	csv.write(columnTitleRow);

	print "Retrieving Snapshot info [Started]";
	snap_c = 0;
	for snapshot in ec2client.describe_snapshots(DryRun=dry_run,OwnerIds=[ownerid])['Snapshots']:
		row =[];
	 	
		if debug_run: print snapshot['SnapshotId'];
		if debug_run: print snapshot['StartTime'];
		if debug_run: print snapshot['VolumeId'];
		if debug_run: print snapshot['VolumeSize'];
		
		row.append(snapshot['SnapshotId']);
		row.append(snapshot['StartTime']);
		row.append(snapshot['VolumeId']);
		row.append(snapshot['VolumeSize']);

		total_snap_size += snapshot['VolumeSize'];
		
		timestamp = '{:%Y-%m-%d}'.format(snapshot['StartTime']);

		if re.match(timestampY, timestamp) is None:
			if debug_run: print "snap is old";
			total_old_snap_size += snapshot['VolumeSize']; 
 		

		if 'Tags' in snapshot.keys():
			Tag=snapshot['Tags'];
			if debug_run: print "Tags:- ",;
			for j in sorted(Tag):	
		  		if debug_run: print j['Key'] + " : "+ j['Value'],;
		  		row.append(j['Key'] + " : "+ j['Value']);
		  	if debug_run: print " ";	
		else:
		  	if debug_run: print "[This snapshot doesn't have tags]";
		  	row.append("[This snapshot doesn't have tags]");

		row.append("\n");
		csv.write(','.join(map(str, row)));
		snap_c +=1;

	print "Retrieving Snapshot info [Completed]";	
	total_snap ="Total "+str(snap_c)+" Snapshots and total Snapshots size on " + region +" is "+ str(total_snap_size)+" GB";
	total_old_snap ="Total Old Snapshots (Created a year before) size on " + region +" is "+ str(total_old_snap_size)+" GB";
	print "---------------------------------------------------------------------------------------"
	print total_snap;
	print total_old_snap;
	print "---------------------------------------------------------------------------------------"
	print "Please refer '"+snap_fileout+"' for more details\n";
	csv.write("-----------------------------------------------------------------------------------------------\n");
	csv.write(total_snap+"\n");
	csv.write(total_old_snap+"\n");
	csv.write("-----------------------------------------------------------------------------------------------\n");
	csv.write("*Amazon EBS snapshots are stored incrementally: only the blocks that have changed after your last snapshot are saved,\n");
	csv.write("and you are billed only for the changed blocks\n");
	csv.write("*When an EBS snapshot is copied new EBS snapshot volume ID shows as vol-ffffffff\n");

	csv.close();
	return;
Exemplo n.º 40
0
    def getSeasons(self, FirstDay, FirstSeason, LastDay,LastSeason, league):
        """

        crawling data from Website and saving
           - hometeam,
           - awayteam,
           - homegoals,
           - awaygoals,
           - date
        into a csv file.

        -----------
        Parameters:
        -----------

        FirstDay : int
            start day of the first season

        FirstSeason : int
            from this season on the data get crawled

        LastDay : int
            last day of the last season

        LastSeason : int
            till this season the data get crawled

        league : string
            selection of the league:
                input can only be "1. Bundesliga" , "2. Bundesliga", "3. Bundesliga", "1. Handball Bundesliga"

        -------
        Return:
        -------

        saving data into csv file

        """
        self.clear()
        csv = open("teamproject/BundesligaData.csv", "w")
        csv.write(
            "HomeTeam" +
            "," +
            "AwayTeam" +
            "," +
            "HomeGoals" +
            "," +
            "AwayGoals" +
            "," +
            "Date" +
            "," +
            "win"+
            "\n")

        if league == "1. Bundesliga" or league == "2. Bundesliga" or league == "3. Bundesliga" or league == "1. Handball Bundesliga":
            for i in range(FirstSeason, (LastSeason + 1)):
                counter = 0
                startday_counter = 0
                Game = {}
                Date = {}

                GameDay = {}
                HomeTeam = {}
                AwayTeam = {}
                GoalsHome = {}
                GoalsAway = {}

                win_team = {}

                if FirstSeason == LastSeason:
                    start_season_day = FirstDay
                    end_season_day = LastDay
                elif i == FirstSeason and FirstDay != 1:
                    start_season_day = FirstDay
                    end_season_day = 34
                elif i == LastSeason and LastDay != 34:
                    start_season_day = 1
                    end_season_day = LastDay
                else:
                    start_season_day = 1
                    end_season_day = 34

                if league == "1. Bundesliga":
                    game_data = json.loads(requests.get(f'http://www.openligadb.de/api/getmatchdata/bl1/{i}').text)
                elif league == "2. Bundesliga":
                    game_data = json.loads(requests.get(f'http://www.openligadb.de/api/getmatchdata/bl2/{i}').text)
                elif league == "3. Bundesliga":
                    game_data = json.loads(requests.get(f'http://www.openligadb.de/api/getmatchdata/bl3/{i}').text)
                elif league == "1. Handball Bundesliga":
                    game_data = json.loads(requests.get(f'http://www.openligadb.de/api/getmatchdata/hbl/{i}').text)


                for game in game_data:
                    startday_counter += 1
                    if (startday_counter / 9) + 1 > start_season_day and (startday_counter / 9) <= end_season_day:

                        Date[counter] = game['MatchDateTime']
                        Team1 = game['Team1']
                        HomeTeam[counter] = Team1['TeamName']

                        Team2 = game['Team2']
                        AwayTeam[counter] = Team2['TeamName']

                        Matchresults = game['MatchResults']

                        Result_half  = Matchresults[0]
                        TeamA_half = Result_half['PointsTeam1']
                        TeamB_half = Result_half['PointsTeam2']

                        if not len(Matchresults) == 1:
                            Result = Matchresults[1]
                            TeamA = Result['PointsTeam1']
                            TeamB = Result['PointsTeam2']
                        else:
                            TeamA = -1
                            TeamB = -1

                        if TeamA_half + TeamB_half > TeamA + TeamB:
                            GoalsHome[counter] = TeamA_half
                            GoalsAway[counter] = TeamB_half
                            if TeamA_half > TeamB_half:
                                win_team[counter] = "h"
                            elif TeamA_half < TeamB_half:
                                win_team[counter] = "a"
                            elif TeamA_half == TeamB_half:
                                win_team[counter] = "d"

                        else:
                            GoalsHome[counter] = TeamA
                            GoalsAway[counter] = TeamB
                            if TeamA_half > TeamB_half:
                                win_team[counter] = "h"
                            elif TeamA_half < TeamB_half:
                                win_team[counter] = "a"
                            elif TeamA_half == TeamB_half:
                                win_team[counter] = "d"

                        match = HomeTeam[counter] + "," + AwayTeam[counter] + "," + str(GoalsHome[counter]) + "," + str(GoalsAway[counter]) + "," + Date[counter] + "," + win_team[counter] + "\n"
                        csv.write(match)
                        counter += 1
        else:
            print('Wrong string for crawling a certain league')
Exemplo n.º 41
0
def GetAmi():

	global total_amivol_size;
	csv = open(ami_fileout, "w");
	
	columnTitleRow = "ImageId, CreationDate, State, BlockDeviceMappings 01:, BlockDeviceMappings 02, BlockDeviceMappings 03, Tags\n";
	csv.write(columnTitleRow);

	print "Retrieving AMI info [Started]";
	ami_c = 0;
	for ami in ec2client.describe_images(DryRun=dry_run,Owners=['self'])['Images']:
		#filter"ImageIds=['ami-7ae6541a']
		row =[];
	 	#print(volume)
		if debug_run: print "AMI: " +ami['ImageId'] + " Creation date: "+ str(ami['CreationDate']),;
		row.append(ami['ImageId']);
		row.append(ami['CreationDate']);
		row.append(ami['State']);


		if 'BlockDeviceMappings' in ami.keys():
			EBS=ami['BlockDeviceMappings'];
			if debug_run: print "EBSs:- ",;
			
			for j in EBS:	
				#if debug_run: print j;
				if "Ebs" in j:
					Ebs_d = j['Ebs'];
					Ebs_dn = j['DeviceName'];

					#print Ebs_d;

					if 'SnapshotId' in Ebs_d.keys():

						if debug_run: print Ebs_d['SnapshotId']+" : "+str(Ebs_d['VolumeSize']),;
						row.append(j['DeviceName']+":"+Ebs_d['SnapshotId'] + " :"+ str(Ebs_d['VolumeSize'])+"GB");
				
						total_amivol_size += Ebs_d['VolumeSize'];
					else:
						if debug_run: print "No Snapshot info available";
						row.append("No Snapshot info available");	

				else:
					if debug_run: print "This is ephemeral  not a EBS"
					row.append(j['DeviceName']+" : Ephemeral");	

		  	if debug_run: print " ";	
		else:
		  	if debug_run: print "[This AMI doesn't have BlockDeviceMappings]";
		  	row.append("No EBS");
		  	
		
		if 'Tags' in ami.keys():
			Tag=ami['Tags'];
			if debug_run: print "Tags:- ",;
			for j in sorted(Tag):	
		  		if debug_run: print j['Key'] + " : "+ j['Value'],;
		  		row.append(j['Key'] + " : "+ j['Value']);
		  	if debug_run: print " ";	
		else:
		  	if debug_run: print "[This AMI doesn't have tags]";
		  	row.append("[This AMI doesn't have tags]");
		  	

		if debug_run: print "Array out----------------------------------"  	
		row.append("\n");


		csv.write(','.join(map(str, row)));
		ami_c +=1;

	print "Retrieving AMI info [Completed]";	
	total_amivol ="Total "+str(ami_c)+" AMIs and total Volumes size attached to AMIs on " + region +" is "+ str(total_amivol_size)+" GB";
	print "---------------------------------------------------------------------------------------"
	print total_amivol;
	print "---------------------------------------------------------------------------------------"
	print "Please refer '"+ami_fileout+"' for more details\n";
	csv.write("-----------------------------------------------------------------------------------------------\n");
	csv.write(total_amivol+"\n");
	csv.write("-----------------------------------------------------------------------------------------------\n");

	csv.close();
	return;
Exemplo n.º 42
0
def awsCall(barcodeData, idR, c, URL, URLV):
    #Send AWS
    z = datetime.strftime("%A")
    arrival_time = str(time.strftime("%H:%M"))
    Day = z.lower()
    qr = str(barcode)

    # defining a params dict for the parameters to be sent to the API
    PARAMS = {
        'qrCode': barcodeData,
        'arrival_time': arrival_time,
        'Day': Day,
        'residential_id': idR,
        'cpu_serial': c
    }
    print(PARAMS)
    # sending get request and saving the response as response object
    conexion = False
    try:
        with eventlet.Timeout(10):
            r = requests.get(url=URL, params=PARAMS)
    except:
        logging.info('Error al verificar el codigo')
        time.sleep(4)
        return

    # extracting data in json format
    try:
        message = r.json()
        print(message)
        QRstatus = message['result']
        logging.info('%s', QRstatus)

        tipolector = configLec.get('garitappiot', 'tipo')
        funcionlector = configLec.get('garitappiot', 'funcion')

        if funcionlector == 'Entrada' and tipolector == 'CONRF':
            if QRstatus == "Valido": fotofile = message['photo']
            if QRstatus == "Invalido": fotofile = "no valido"
            fotofile = message['photo']
            cortesia = "Bienvenido"
            print(QRstatus, fotofile)
        if funcionlector == 'Salida' and tipolector == 'CONRF':
            if QRstatus == "Valido": fotofile = message['photo']
            if QRstatus == "Invalido": fotofile = "no valido"
            cortesia = "Buen viaje"
            print(QRstatus, fotofile)
        if funcionlector == 'Entrada' and tipolector == 'NORF':
            fotofile = "norfacial"
            cortesia = "Bienvenido"
        if funcionlector == 'Salida' and tipolector == 'NORF':
            fotofile = "norfacial"
            cortesia = "Buen viaje"

        print(QRstatus, cortesia, fotofile)
        if QRstatus == "Invalido":
            led.on()
            time.sleep(2)
            led.off()
            time.sleep(3)
        if QRstatus == "Valido" and tipolector == 'CONRF':
            for x in range(3):
                logging.info('Comenzando reconocimiento facial')
                os.system(
                    'sudo wget http://' + IP +
                    ':9000/?action=snapshot -O /home/pi/Documents/QRscan/cara.jpg'
                )
                img = Image.open("/home/pi/Documents/QRscan/cara.jpg")
                img.save("/home/pi/Documents/QRscan/patron.jpg",
                         dpi=(640, 480))
                targetFile = '/home/pi/Documents/QRscan/patron.jpg'
                sourceFile = fotofile
                coincidence = 0
                client = boto3.client('rekognition')
                imageTarget = open(targetFile, 'rb')
                try:
                    response = client.compare_faces(
                        SimilarityThreshold=70,
                        SourceImage={
                            'S3Object': {
                                'Bucket': 'garitapp.guest.id.pictures',
                                'Name': sourceFile
                            }
                        },
                        TargetImage={'Bytes': imageTarget.read()})
                    for faceMatch in response['FaceMatches']:
                        similarity = str(faceMatch['Similarity'])
                        coincidence = float(similarity)
                    print(coincidence)
                    logging.info('Similitud de un %s', similarity)
                    imageTarget.close()
                    if coincidence >= 85:
                        GPIO.setup(pluma, GPIO.OUT)
                        GPIO.output(pluma, GPIO.LOW)
                        time.sleep(1)
                        GPIO.output(pluma, GPIO.HIGH)
                        GPIO.setup(pluma, GPIO.IN)
                        logging.info('Acceso concedido')
                        csv.write("{},{},{},{}\n".format(
                            datetime, barcodeData, "Valido", coincidence))
                        csv.flush()
                        #validate if it is simple invitation
                        if barcodeData.split("_")[0] == "001":
                            # defining a params dict for the parameters to be sent to the API
                            PARAMS = {
                                'qrCode': barcodeData,
                                'arrival_time': arrival_time,
                                'Day': Day,
                                'residential_id': idR,
                                'cpu_serial': c
                            }
                            # sending get request and saving the response as response object
                            while True:
                                try:
                                    with eventlet.Timeout(10):
                                        r = requests.get(url=URLV,
                                                         params=PARAMS)
                                    break
                                except:
                                    pass
                    else:
                        logging.info('Acceso denegado')
                        csv.write("{},{},{}\n".format(datetime, barcodeData,
                                                      "Invalido"))
                        csv.flush()
                        time.sleep(3)
                    break
                except:
                    logging.info('Usuario no detectado')
                    csv.write("{},{},{}\n".format(datetime, barcodeData,
                                                  "Invalido"))
                    csv.flush()
                    time.sleep(5)
        if QRstatus == "Valido" and tipolector == 'NORF':
            GPIO.setup(pluma, GPIO.OUT)
            GPIO.output(pluma, GPIO.LOW)
            time.sleep(1)
            GPIO.output(pluma, GPIO.HIGH)
            GPIO.setup(pluma, GPIO.IN)
            logging.info('Acceso concedido')
            csv.write("{},{},{}\n".format(datetime, barcodeData, "Valido"))
            csv.flush()
    except:
        logging.info('Error leyedo datos')
        time.sleep(5)
    pass
Exemplo n.º 43
0
def GetVolumes():

	global total_unattached;
	csv = open(vol_fileout, "w");
	
	columnTitleRow = "VolumeId, Size(GB), VolumeType, State, Attached Instnace, Device, Tags\n";
	csv.write(columnTitleRow);

	print "Retrieving Volume info [Started]";
	vol_c = 0;
	for volume in ec2client.describe_volumes(DryRun=dry_run)['Volumes']:
		row =[];
	 	#print(volume)
		if debug_run: print "Vol: " +volume['VolumeId'] + " Size: "+ str(volume['Size']) + "GB",;
		row.append(volume['VolumeId']);
		row.append(volume['Size']);
		row.append(volume['VolumeType']);
		

		if volume['Attachments']:
			Attachment=volume['Attachments'];
			for i in Attachment:	
		  		if debug_run: print i['State'] + " to "+ i['InstanceId'] +" as "+ i['Device'],;
		  		row.append(i['State']);
		  		row.append(i['InstanceId']);
		  		row.append(i['Device']);

		else:
		  	if debug_run:  print "[This volume not attached to any instance]",;
		  	row.append("[This volume not attached to any instance]");
		  	total_unattached += volume['Size'];

		
		if 'Tags' in volume.keys():
			Tag=volume['Tags'];
			if debug_run: print "Tags:- ",;

			for j in sorted(Tag):	
		  		if debug_run: print j['Key'] + " : "+ j['Value'],;
		  		row.append(j['Key'] + " : "+ j['Value']);
		  	if debug_run: print " ";	
		else:
		  	if debug_run: print "[This volume doesn't have tags]";
		  	row.append("[This volume doesn't have tags]");

		if debug_run: print "Array out----------------------------------"  	
		row.append("\n");


		csv.write(','.join(map(str, row)));
		vol_c +=1;

	print "Retrieving Volume info [Completed]";	
	total_vol ="Total "+str(vol_c)+" Volumes and total unattached Volumes size on " + region +" is "+ str(total_unattached)+" GB";
	print "---------------------------------------------------------------------------------------"
	print total_vol;
	print "---------------------------------------------------------------------------------------"
	print "Please refer '"+vol_fileout+"' for more details\n";
	csv.write("-----------------------------------------------------------------------------------------------\n");
	csv.write(total_vol+"\n");
	csv.write("-----------------------------------------------------------------------------------------------\n");

	csv.close();
	return;
Exemplo n.º 44
0
def write_file(file, filename):
    with open("csv_upload_directory/%s.csv" % filename, 'w') as csv:
        for d in file:
            csv.write(d)
            csv.write('\n')
    csv.close()
Exemplo n.º 45
0
# Ob times are always CDT
ts1 = mx.DateTime.strptime(d['TIMESTAMP'], '%Y-%m-%d %H:%M:%S')
gts1 = ts1 + mx.DateTime.RelativeDateTime(hours=5)

iem = access.Ob( 'RSAI4', "OT")
iem.setObTimeGMT( gts1 )
drct = d['WindDir']
iem.data['drct'] = drct
sknt = float(d['WS_mph_S_WVT']) / 1.15
iem.data['sknt'] = sknt
gust = float(d['WS_mph_Max']) / 1.15
iem.data['gust'] = gust
iem.updateDatabase( cursor=icursor )

csv.write("%s,%s,%s,%.1f,%.1f\n" % ('RSAI4', 
            gts1.strftime("%Y/%m/%d %H:%M:%S"),
      drct, sknt, gust) )


# Red Rock
try:
    req = urllib2.Request("ftp://%s:%[email protected]/Red Rock_Table3Min_current.dat" % (secret.CTRE_FTPUSER,
                                                        secret.CTRE_FTPPASS))
    data = urllib2.urlopen(req, timeout=30).readlines()
except:
    if now.minute % 15 == 0:
        print 'Download CTRE Bridge Data Failed!!!'
    sys.exit(0)

if len(data) < 2:
    sys.exit(0)
Exemplo n.º 46
0
def lemur2csv(lemurfile, csvfile):
	lemur=loadlemur(lemurfile)
	csv=open(csvfile,'w')
	for e,g in lemur.items():
		csv.write(e+"\t"+g)
	csv.close()
Exemplo n.º 47
0
def write_to_csv(invoice, csv, filename, usageCSV, failedInvoices,
                 creditInvoices):
    # print(filename)
    wroteInvBool = False

    try:
        print(invoice.issuer, hasattr(invoice, 'issuer'))
        if ((invoice.issuer == 'null')):
            failedInvoices.append(filename)
            return

        #cross invoice object
        fullDate = invoice.date
        date = str(fullDate.month) + '/' + str(fullDate.day) + '/' + str(
            fullDate.year)
        buildingCode = building_code(
            invoice)  #grab building code - Takes invoice as input
        invoiceNumber = invoice.invoice_number
        invAmount = invoice.amount
        accountNumber = get_account_number(invoice)
        issuerCode = get_issuer_code(invoice)
        # print(issuerCode)
        chargeCode = get_charge_code(invoice)
        meterReading = get_meter_amount(invoice, "")

        #set globals
        usage__charge = invAmount  # this will be reset for PSEG gas or electric in PSEG section
        usage__meterNumber = '0'
        usage__usageAmount = '0'
        usage__date = date
        usage__glCode = chargeCode
        usage__issuerAbbriviation = issuerCode
        usage__accountNumber = accountNumber
        usage__buildingCode = buildingCode

        if (hasattr(invoice, "credit_bool")):
            newName = str(buildingCode) + " " + invoice.issuer + " " + str(
                accountNumber) + " " + "$" + str(
                    invAmount) + " " + date.replace('/', "-") + ".pdf"
            creditInvoices.append([newName, filename])
            rename(invoice, filename, accountNumber, buildingCode,
                   invoice.issuer, "(" + str(invAmount) + ")", date)
            return

        if (invoice.issuer == "Comcast"):
            invoiceNumber = date.replace("/", '') + accountNumber[12:]

        if (invoice.issuer != "PSE&G"):
            # print("Are we here")
            csv.write("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n" %
                      ('INVH', invoiceNumber, date, round(
                          invAmount, 2), accountNumber, "U", issuerCode, date,
                       date, "1", "YES", filename))

            #DST Line
            csv.write("%s,%s,%s,%s,%s\n" % ("DIST", buildingCode, chargeCode,
                                            meterReading, round(invAmount, 2)))

            add_to_total_expense(invAmount)
            wroteInvBool = True
            rename(invoice, filename, accountNumber, buildingCode,
                   invoice.issuer, invAmount, date)

            if (invoice.issuer == 'American Water'):
                if (hasattr(invoice, 'total_gallons')):
                    usage__usageAmount = invoice.total_gallons

        else:
            ## THIS IS JUST FOR PSEG ####
            psegInfo = check_pseg_info(invoice)
            if (psegInfo == -1):
                csv.write(
                    "%s,%s,%s\n" %
                    ("Could not parse pdf", filename, ', , , , , , , , Error'))
            gas = electric = electricSupply = gasSupply = other = unmetered = 0
            psegCharges = [
                'gas_charge', 'amount_total_electric', 'other_charges'
            ]
            addsUp = 0
            for charge in psegCharges:
                if (hasattr(invoice, charge)):
                    addsUp = addsUp + float(invoice.__getattribute__(charge))
            if (addsUp != invAmount):
                wroteProperly = write_pseg_from_lines(
                    invoice, csv, buildingCode, invoiceNumber, date, invAmount,
                    accountNumber, issuerCode, filename, failedInvoices)
                if (wroteProperly):
                    wroteInvBool = True
            else:
                ######### if DIST lines add up to invoice lines #############
                csv.write(
                    "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n" %
                    ('INVH', invoiceNumber, date, invAmount, accountNumber,
                     "U", issuerCode, date, date, "1", "YES", filename))

                for charge in psegCharges:
                    # print(charge)
                    if (hasattr(invoice, charge)):
                        # print('has attribute')
                        #DST Line
                        chargeCode = get_charge_code(
                            str(charge))  #returns the GL CODE
                        meterAmount = get_meter_amount(invoice, charge)
                        chargeAmount = get_charge_amount(invoice, charge)
                        csv.write("%s,%s,%s,%s,%s\n" %
                                  ("DIST", buildingCode, chargeCode,
                                   meterAmount, chargeAmount))
                add_to_total_expense(round(invAmount, 2))
                rename(invoice, filename, accountNumber, buildingCode,
                       invoice.issuer, invAmount, date)
                wroteInvBool = True

                ## Running Total for header BTCH Lines ##

        # write_vendor_usage(invoice, filename, True)

    except Exception as error:
        print(error)
        wroteInvBool = False
        failedInvoices.append(filename)
        # write_vendor_usage(invoice, filename, False)
        pass

    return wroteInvBool
Exemplo n.º 48
0
		articles.update(pickle.load(inputfile))

	break
	
print len(articles)

for article in articles:
	date = articles[article][0]
	date = date[:date.rfind("/")] + date[date.find("/")+1:] 
	if len(date) < 3:
		continue
	date = int(date)
	if date < 198000:
		print date
		continue
	if date not in dates:
		dates[date] = 1
	else:
		dates[date] += 1

print len(dates)

with open("dates.csv", "w") as csv:
	for x in dates:
		w = "%s,%s\n"%(x,dates[x])
		csv.write(w)



# plt.bar(dates.keys(), dates.values())
# plt.show()
Exemplo n.º 49
0
def lemur2csv(lemurfile, csvfile):
    lemur = loadlemur(lemurfile)
    csv = open(csvfile, 'w')
    for e, g in lemur.items():
        csv.write(e + "\t" + g)
    csv.close()
Exemplo n.º 50
0
 def _generate_blank(self):
     csv = open(self.csv, "w")
     csv.write("TYPE,START POINT,END POINT,TURN COMMENTS,TURN CUE,"
               "ROAD,ROAD COMMENTS,LANES,SHOULDER,SPEED,TRAFFIC\n")
     csv.close()
Exemplo n.º 51
0
import csv
import sys
import re

filename = sys.argv[1]
results = []
with open(filename) as csvfile:
    reader = csv.reader(csvfile) # change contents to floats
    # print(reader)
    for row in reader: # each row is a list
        if len(row) > 0:
            results.append(row)
results = "module.exports = " + str(results)
results = re.sub('\', ', '\',', results)
results = re.sub(', ', ',', results)
csv = open('data/' + filename, "w") 
csv.write(results)
print(results)
Exemplo n.º 52
0
#!/usr/bin/env python3

import csv

# Simplest example of reading a CSV file
with open('some.csv', newline='') as f:
    reader = csv.reader(f)
    for row in reader:
        print(row)

# Reading a file with an alternate format:
with open('passwd', newline='') as f:
    reader = csv.reader(f, delimiter=':', quoting=csv.QUOTE_NONE)
    for row in reader:
        print(row)

# Simplest writing example
with open('some.csv', mode='w', newline='') as f:
    writer = csv.writer(f)
    writer.writerows(someiterable)

# Better interface?

with open('some.csv') as f:
    for line in f:
        print(csv.read(line))

with open('some.csv', mode='w') as f:
    csv.write(line for line in f)

Exemplo n.º 53
0
import csv

dic = {"John": "*****@*****.**", "Mary": "*****@*****.**"}  #dictionary

download_dir = "exampleCsv.csv"  #where you want the file to be downloaded to

csv = open(download_dir, "w")
#"w" indicates that you're writing strings to the file

columnTitleRow = "name, email\n"
csv.write(columnTitleRow)

for key in dic.keys():
    name = key
    email = dic[key]
    row = name + "," + email + "\n"
    csv.write(row)
Exemplo n.º 54
0
def createCSV(fname, nodes):
    csv = open(fname,'wb')
    for node in nodes.values():
        for k,v in node.links.items():            
            csv_item = node.links[k].__str__()
            csv.write(csv_item+'\n')
import os,csv,json

with open('people.json') as f:
	jsonfile = json.load(f)

with open('student-dev-ports.csv','w') as csvfile:
	streamwriter = csv.write(csvfile,delimiter=' ',quotechar='|', quoting=csv.QUOTE_MINIMAL)
	streamwriter.writerow(['Student Name','Dev Port'])  
	for r in jsonfile: 
		streamwriter.writerow([r['name'],r['dev']]) 
Exemplo n.º 56
0
        if '.xls' in args.input_path:
            dframe = pd.read_excel(args.input_path)
        elif '.csv' in args.input_path:
            dframe = pd.read_csv(args.input_path)
        else:
            print("Unknown input file type")
            sys.exit(1)
        docLst = text_preprocess(dframe, args)
    else:
        docLst = load_text_pre(args)

    # 加载/生成count vector词频统计
    tf, tf_vectorizer = get_count_vector(docLst, args)

    # 训练LDA模型
    lda_model = train_lda(tf, args)

    # 保存并输出topic_word矩阵
    print("#########Topic-Word Distribution#########")
    tf_vectorizer._validate_vocabulary()
    tf_feature_names = tf_vectorizer.get_feature_names()
    print_top_words(lda_model, tf_feature_names, args.n_top_words)

    # 保存doc_topic_distr
    doc_topic_dist = lda_model.transform(tf)
    with open(args.doc_topic_path, 'w') as f:
        writer = csv.write(f)
        f.writerow(['Index', 'TopicDistribution'])
        for idx, dist in enumerate(doc_topic_dist):
            # 注意:由于sklearn LDA函数限制,此函数中输出的topic_word矩阵未normalize
            f.writerow([idx + 1] + dist)
Exemplo n.º 57
0
import csv


## 2. Abrimos nuestro XML
archivo = open('59CHRTMZ01R-Alignment.xml','r')
texto = archivo.read()
archivo.close()
#print(texto)

## 3. Interpretamos el XML con BeautifulSoup
soup = BeautifulSoup(texto)

## 4. Aplicamos los métodos que ya conocemos: .findAll(), find(), .string
hits = soup.findAll('hit')

output = csv.write(open('BLAST.csv','w'))
output.writerows(['Hit','Evalue'])
for x in hits:
    hsps = x.findAll('hit_hsps')
    for y in hsps:
        print(y.findAll('hsp_num'))
        
    
    
        hit_id = x.find('hit_id').string
        evalue = float(x.find('hsp_evalue').string)
        seq = x.find('hsp_hseq').string
        defi = x.find('hit_def').string
        print(hit_id,defi,evalue,seq)
        output.writerows([hit_id,evalue])
        
    # collecting final group list and group-wise list of assigned clusters
    return finalGroupList, groupWiseMatchList


# checking group overlap in cluster matching across courses
# defining global variables
for n in range(7, 40):
    courseCount = 3
    groupCount = n
    groupList = list(range(1, groupCount + 1))
    fileName = './Group Overlap Data/avgdiff_data_' + str(n) + '.csv'
    csv = open(fileName, 'w')

    for k in range(100000):
        finalList, groupWiseMatchList = finalgrouplist(
            courseCount, groupList, clusterstructure(courseCount, groupCount))
        diffLenSum = 0
        for i in range(groupCount):
            j = i + 1
            flatList = [
                x for sublist in groupWiseMatchList[i] for x in sublist
            ]
            listLen = len(flatList)
            diffLen = listLen - len(set(flatList)) - 2
            diffLenSum += diffLen

        avgDiff = diffLenSum / groupCount
        csv.write(str(avgDiff) + '\n')

    print('Script done for: ', n)
Exemplo n.º 59
0
    count += 1

    filtercol = ''
    for i in range(0, L):
        filtercol = filtercol + str(m[i]) + ','

    fftcol = ''
    for i in range(0, lfft):
        fftcol = fftcol + str(Y[i]) + ','
    filtercol = filtercol + str(classify) + "\n"

    an = ''
    for i in range(0, count):
        if i == (count - 1):
            an = an + str(data_test[i])
        else:
            an = an + str(data_test[i]) + ','

    fftcol = fftcol + str(classify) + "," + an + "\n"

    #write filter data
    file = open('filter_data.csv', "a")
    file.write(filtercol)
    file.close()

    #write fft data
    file = open('fft_data.csv', "a")
    file.write(fftcol)
    file.close()
print('Writing complete !!')