def uploaded(): error = None filename = escape(session['csvname']) if filename == None: error = "Session error: filename is empty!" return render_template('csvpreview.html', error=error) # check for valid csv try: csv = CSVHandler(UPLOAD_FOLDER+"/"+filename) except IOError: return "File not found!" except: return "Unexpected error: " + sys.exc_info()[0] data = csv.head(20) # get ontology terms allTerms=getOntologyClasses() # app.logger.debug(">>>"+str(csv.NF)) # generate output msg = "File ("+filename+") uploaded. Congratulations!" # render template r = render_template('csvpreview.html', msg=msg, data=data, terms=allTerms, ncols=csv.NF) # close the CSV reader csv.close() return r
def extractKeywords(selection,corpus,nr): csv = open('db\cyttron-keywords.csv','w') cyttronCorpus = TextCollection(corpus) for i in range(len(selection)): currentEntry = selection[i].lower() freqWords(currentEntry,cyttronCorpus,nr) freqNouns(currentEntry,cyttronCorpus,nr) nGrams(currentEntry,cyttronCorpus,nr,clean=True) csv.close()
def _batch2(model): csv = open('lg_kdz.%s.csv' % (model), 'wb+') csv.write('model,region,country,chip_type,prod_type,buyer_name,swversion,live_date,firmware,\n') lg = LGMobile() for country in lg.ftp_country_info(): ccode = country.country_code print ccode, model for sw in lg.tool_mode_country_check(ccode, model).itervalues(): csv.write(sw.csv() + '\n') csv.close()
def saveEntropyCSV(infolder,outfile): filelist = os.listdir(infolder) if '.DS_Store' in filelist: filelist.remove('.DS_Store') subprocess.call(['touch',outfile]) csv = open(outfile, 'r+') for csvfile in filelist: P_i = makeP_i( infolder + '/' + csvfile )[0] S = entropy(P_i) F = csvfile.split('_')[0] # e.g. '0.044_0.038.csv' k = csvfile.split('_')[1][ :-4] #remove '.csv' csv.write( F + ',' + k + ',' + str(S) + '\n') csv.close()
def generateCsvSetOfFiles(cls, top_folder): print "\nWALKING THE TREE", top_folder+",", "SEARCHING FOR PICTURES" csv_fullpath = DbMethods.temporaryCsvFile() csv = open(csv_fullpath, 'w') for dirpath, dirnames, files in os.walk(top_folder): for name in files: if (name.lower().endswith('bmp') or name.lower().endswith('gif') or name.lower().endswith('jpg') or name.lower().endswith('png') or name.lower().endswith('tiff')): row = dirpath+'`'+name+'\n' csv.write(row) csv.close() print "CSV FOLDER/FILE HAS BEEN GENERATED AT", csv.name return csv_fullpath
def nGrams(string,corpus,number,clean=True): global wordList biList=[] triList=[] words = WordPunctTokenizer().tokenize(string) stopset = set(stopwords.words('english')) if clean == True: words = [word.lower() for word in words] if clean == False: words = [word.lower() for word in words] filter = lambda words: len(words) < 2 or words.isdigit() bcf = BigramCollocationFinder.from_words(words) bcf.apply_word_filter(filter) biResult = bcf.nbest(BigramAssocMeasures.likelihood_ratio, number) tcf = TrigramCollocationFinder.from_words(words) tcf.apply_word_filter(filter) triResult = tcf.nbest(TrigramAssocMeasures.likelihood_ratio, number) for i in range(len(biResult)): if len(biResult) > 0: biPrint = " ".join(biResult[i]) biList.append(biPrint) else: biList=[] csv = open('db\cyttron-keywords.csv','a') if len(biList) > 1: csv.write('"' + ','.join(biList[:-1]) + ',' + biList[-1] + '";') else: csv.write('"' + ''.join(biList) + '";') csv.close() for i in range(len(triResult)): if len(triResult) > 0: triPrint = " ".join(triResult[i]) triList.append(triPrint) else: triList=[] csv = open('db\cyttron-keywords.csv','a') if len(triList) > 1: csv.write('"' + ','.join(triList[:-1]) + ',' + triList[-1] + '"\n') else: csv.write('"' + ''.join(triList) + '"\n') csv.close() print biList print triList
def freqWords(string,corpus,number): global pub,wordList wordList=[] stopset = set(stopwords.words('english')) words = WordPunctTokenizer().tokenize(string) wordsCleaned = [word.lower() for word in words if word.lower() not in stopset and len(word) > 2 ] for i in range(len(wordsCleaned)): wordList.append((corpus.tf_idf(wordsCleaned[i],string),wordsCleaned[i])) wordList = list(set(wordList)) wordList = sorted(wordList,reverse=True) final = [word[1] for word in wordList[:number]] csv = open('db\cyttron-keywords.csv','a') if len(final) > 1: csv.write('"' + ','.join(final[:-1]) + ',' + final[-1] + '";') else: csv.write('"' + ''.join(final) + '";') csv.close() return final
def siodoc(boias,dirout): ''' Entra com o endereco de onde baixa o dado da boia ''' data = dt.datetime.strftime(dt.datetime.now(),'%Y%m%d%H') site = urllib2.urlopen("http://metocean.fugrogeos.com/marinha/Member/"+boiassiodoc) print 'Baixando dado do SIODOC' #datefile = '%02d%02d%02d' %(y,m,d) filename = "SIODOC_"+data+".csv" #create .csv file csv = open(dirout+'SIODOC/'+filename,"w") csv.write(site.read()) csv.close() return
def import_data(directory): if not check_integrity(directory): print "directory cannot be proccessed: {}".format(directory) directories_not_processes.append(directory) return files_in_dir = os.listdir(directory) files = {ACCIDENTS: find_and_open_csv(directory, "AccData.csv", files_in_dir), URBAN_INTERSECTION: find_and_open_csv(directory, "IntersectUrban.csv", files_in_dir), NON_URBAN_INTERSECTION: find_and_open_csv(directory, "IntersectNonUrban.csv", files_in_dir), STREETS: find_and_open_csv(directory, "DicStreets.csv", files_in_dir), DICTIONARY: find_and_open_csv(directory, "Dictionary.csv", files_in_dir) } for accident in import_accident(files): yield accident for csv in files.values(): csv.close()
count += 1 filtercol = '' for i in range(0, L): filtercol = filtercol + str(m[i]) + ',' fftcol = '' for i in range(0, lfft): fftcol = fftcol + str(Y[i]) + ',' filtercol = filtercol + str(classify) + "\n" an = '' for i in range(0, count): if i == (count - 1): an = an + str(data_test[i]) else: an = an + str(data_test[i]) + ',' fftcol = fftcol + str(classify) + "," + an + "\n" #write filter data file = open('filter_data.csv', "a") file.write(filtercol) file.close() #write fft data file = open('fft_data.csv', "a") file.write(fftcol) file.close() print('Writing complete !!')
def lemur2csv(lemurfile, csvfile): lemur = loadlemur(lemurfile) csv = open(csvfile, 'w') for e, g in lemur.items(): csv.write(e + "\t" + g) csv.close()
def scan(): # construct the argument parser and parse the arguments start = time.time() ap = argparse.ArgumentParser() ap.add_argument("-o", "--output", type=str, default="barcodes.csv", help="path to output CSV file containing barcodes") args = vars(ap.parse_args()) # initialize the video stream and allow the camera sensor to warm up # print("[INFO] starting video stream...") vs = VideoStream(src=0, resolution=(1280, 720)).start() # vs = VideoStream(usePiCamera=True,resolution=(960, 720),framerate=30).start() time.sleep(0.5) # open the output CSV file for writing and initialize the set of # barcodes found thus far csv = open(args["output"], "w") found = set() scanning = True # loop over the frames from the video stream while scanning: # grab the frame from the threaded video stream and resize it to # have a maximum width of 400 pixels frame = vs.read() frame = imutils.resize(frame, width=720) # find the barcodes in the frame and decode each of the barcodes barcodes = pyzbar.decode(frame) # loop over the detected barcodes for barcode in barcodes: # extract the bounding box location of the barcode and draw # the bounding box surrounding the barcode on the image (x, y, w, h) = barcode.rect cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2) # the barcode data is a bytes object so if we want to draw it # on our output image we need to convert it to a string first barcodeData = barcode.data.decode("utf-8") barcodeType = barcode.type # draw the barcode data and barcode type on the image text = "{} ({})".format(barcodeData, barcodeType) cv2.putText(frame, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) # if the barcode text is currently not in our CSV file, write # the timestamp + barcode to disk and update the set if barcodeData not in found: #csv.write("{},{}\n".format(datetime.datetime.now(), #barcodeData)) csv.write("{}\n".format(barcodeData)) csv.flush() found.add(barcodeData) scanning = False break # show the output frame cv2.imshow("Barcode Scanner", frame) key = cv2.waitKey(1) & 0xFF # if the `q` key was pressed, break from the loop if key == ord("q"): break csv.close() cv2.destroyAllWindows() vs.stop() while True: if scanning != True: name = read_query_output() break flight_id = gate_no(name[0]) infor = gate_infor(flight_id[0]) gate = infor[0] status = infor[1] board = infor[2] departure = infor[3] dest = infor[4] #print ("Gate number is {}".format(gate[0])) #print ("Location on x_coord is {}".format(x[0])) #print ("Location on y_coord is {}".format(y[0])) info_file = open("info_file.txt", "w+") info_file.write(name[0] + '\n') info_file.write(flight_id[0] + '\n') info_file.write(str(gate[0]) + '\n') info_file.write(status[0] + '\n') info_file.write(str(board[0]) + '\n') info_file.write(str(departure[0]) + '\n') info_file.write(dest[0] + '\n') info_file.close() clean_db() link(flight_id[0]) return ({ 'gate_no': gate, 'flight_id': flight_id, 'status': status, 'boarding_time': board, 'depature_time': departure, 'destination': dest })
def GetEc2(): csv = open(ec2_fileout, "w") columnTitleRow = "Instance, Start_time, End_time, MaxCPU(%), MinCPU(%), AvgCPU(%), DataPointsSize, LowUsage EC2 count, Tags\n" csv.write(columnTitleRow) print "Retrieving EC2/CPU info for " + str(daysTocheck) + " days [Started]" vol_c = 0 #progress spinner for stdout spinner = itertools.cycle(['-', '/', '|', '\\']) for ec2 in ec2client.describe_instances(DryRun=dry_run)['Reservations']: #row =[]; #print(ec2); #initiate progress spinner for stdout sys.stdout.write(spinner.next()) sys.stdout.flush() if ec2['Instances']: instance = ec2['Instances'] reser_id = ec2['ReservationId'] for i in instance: if debug_run: print "|" + reser_id + " : " + i['InstanceId'] + " : " + i[ 'InstanceType'] + " : " + str( i['LaunchTime']) + " : " + str(i['State']), #Getting clouldwatch details csv_arr = GetCpu(i['InstanceId']) if 'Tags' in i.keys(): Tag = i['Tags'] if debug_run: print "Tags:- ", for j in Tag: if debug_run: print j['Key'] + " : " + j['Value'], csv_arr.append(j['Key'] + " : " + j['Value']) if debug_run: print ",", else: if debug_run: print "[This Instance doesn't have tags]" csv_arr.append("[This Instance doesn't have tags]") else: print "[This Reservations not attached to any instance]" csv_arr.append("\n") csv.write(','.join(map(str, csv_arr))) if debug_run: break #Closing progress spinner for stdout sys.stdout.write('\b') print "Retrieving EC2/CPU info for " + str( daysTocheck) + " days [Completed]" total_ec2 = "Total " + str( total_ec2_cpu_thresh ) + " EC2 instances are under utilizing CPUs (under " + str( cpu_v) + "% avg usage) on " + region print "---------------------------------------------------------------------------------------" print total_ec2 print "---------------------------------------------------------------------------------------" print "Please refer '" + ec2_fileout + "' for more details\n" csv.write( "-----------------------------------------------------------------------------------------------\n" ) csv.write(total_ec2 + "\n") csv.write( "-----------------------------------------------------------------------------------------------\n" ) csv.close()
def _generate_blank(self): csv = open(self.csv, "w") csv.write("TYPE,START POINT,END POINT,TURN COMMENTS,TURN CUE," "ROAD,ROAD COMMENTS,LANES,SHOULDER,SPEED,TRAFFIC\n") csv.close()
def main(): #create socket serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #get local machine name host = socket.gethostname() port = 9999 #bind the port serversocket.bind((host, port)) #queue up requests serversocket.listen(5) #establish connection clientsocket, addr = serversocket.accept() #open motion builder '''try: subprocess.Popen('C:\\Program Files\\Autodesk\\MotionBuilder 2016\\bin\\x64\\motionbuilder.exe') except: print 'Motion Builder Failed To Open' else: print 'Motion Builder Opened Successfully''' #create csv dir = os.path.dirname(os.path.realpath(__file__)) csv = open(str(dir.replace("\\", "/")) + "/animDataLocal.csv", 'w+b') listener = SampleListener() controller = Leap.Controller() LFrame = Leap.Frame() controller.add_listener(listener) connected = listener.on_connect(controller) #recieve returns oldFrame, leftWristPosition, leftWristRotation, rightWristPosition, rightWristRotation, IndexLeftData, ThumbLeftData, MiddleLeftData, RingLeftData, PinkyLeftData, IndexRightData, ThumbRightData, MiddleRightData, RingRightData, PinkyRightData = listener.on_frame( controller) #set some variables for counting and checking frameCount = 0 testArray = [0, 0, 0] while True: if not msvcrt.kbhit(): if oldFrame != listener.on_frame(controller)[0]: if (str(leftWristPosition) != '0'): #if we have legit values continue if (leftWristPosition[0] == 0) and (leftWristPosition[1] == 0) and (leftWristPosition[2] == 0): #write 0's if we cant find wrist csv.write(('"leftWrist"') + "," + str(frameCount) + "," + "0" + "," + "0" + "," + "0" + "," + "0" + "," + "0" + "," + "0" + "\n") else: #write wrist location and rotations and bone rotations leftWristPosition = re.sub('[()]', '', str(leftWristPosition)) leftWristPosition = re.sub(r'\s+', '', str(leftWristPosition)) csv.write(('"leftWrist"') + "," + str(frameCount) + "," + str(leftWristPosition) + "," + str(360 - leftWristRotation[1]) + "," + str(leftWristRotation[0]) + "," + str(leftWristRotation[2]) + "," + "\n") for i in range(0, 4): csv.write(('"Left Thumb "') + str(ThumbLeftData[i][0]) + ":" + "," + str(frameCount) + "," + str(ThumbLeftData[i][1][0]) + "," + str(ThumbLeftData[i][1][1]) + "," + str(ThumbLeftData[i][1][2]) + "\n") for i in range(0, 4): csv.write(('"Left Index "') + str(IndexLeftData[i][0]) + ":" + "," + str(frameCount) + "," + str(IndexLeftData[i][1][0]) + "," + str(IndexLeftData[i][1][1]) + "," + str(IndexLeftData[i][1][2]) + "\n") for i in range(0, 4): csv.write(('"Left Middle "') + str(MiddleLeftData[i][0]) + ":" + "," + str(frameCount) + "," + str(MiddleLeftData[i][1][0]) + "," + str(MiddleLeftData[i][1][1]) + "," + str(MiddleLeftData[i][1][2]) + "\n") for i in range(0, 4): csv.write(('"Left Ring "') + str(RingLeftData[i][0]) + ":" + "," + str(frameCount) + "," + str(RingLeftData[i][1][0]) + "," + str(RingLeftData[i][1][1]) + "," + str(RingLeftData[i][1][2]) + "\n") for i in range(0, 4): csv.write(('"Left Pinky "') + str(PinkyLeftData[i][0]) + ":" + "," + str(frameCount) + "," + str(PinkyLeftData[i][1][0]) + "," + str(PinkyLeftData[i][1][1]) + "," + str(PinkyLeftData[i][1][2]) + "\n") if (str(rightWristPosition) != '0'): #if we have legit values continue if (rightWristPosition[0] == 0) and (rightWristPosition[1] == 0) and (rightWristPosition[2] == 0): csv.write(('"rightWrist"') + "," + str(frameCount) + "," + "0" + "," + "0" + "," + "0" + "," + "0" + "," + "0" + "," + "0" + "\n") else: #write wrist location and rotations and bone rotations rightWristPosition = re.sub('[()]', '', str(rightWristPosition)) rightWristPosition = re.sub(r'\s+', '', str(rightWristPosition)) csv.write(('"rightWrist"') + "," + str(frameCount) + "," + str(rightWristPosition) + "," + str(360 - rightWristRotation[1]) + "," + str(rightWristRotation[0]) + "," + str(rightWristRotation[2]) + "," + "\n") for i in range(0, 4): csv.write(('"Right Thumb "') + str(ThumbRightData[i][0]) + ":" + "," + str(frameCount) + "," + str(ThumbRightData[i][1][0]) + "," + str(ThumbRightData[i][1][1]) + "," + str(ThumbRightData[i][1][2]) + "\n") for i in range(0, 4): csv.write(('"Right Index "') + str(IndexRightData[i][0]) + ":" + "," + str(frameCount) + "," + str(IndexRightData[i][1][0]) + "," + str(IndexRightData[i][1][1]) + "," + str(IndexRightData[i][1][2]) + "\n") for i in range(0, 4): csv.write(('"Right Middle "') + str(MiddleRightData[i][0]) + ":" + "," + str(frameCount) + "," + str(MiddleRightData[i][1][0]) + "," + str(MiddleRightData[i][1][1]) + "," + str(MiddleRightData[i][1][2]) + "\n") for i in range(0, 4): csv.write(('"Right Ring "') + str(RingRightData[i][0]) + ":" + "," + str(frameCount) + "," + str(RingRightData[i][1][0]) + "," + str(RingRightData[i][1][1]) + "," + str(RingRightData[i][1][2]) + "\n") for i in range(0, 4): csv.write(('"Right Pinky "') + str(PinkyRightData[i][0]) + ":" + "," + str(frameCount) + "," + str(PinkyRightData[i][1][0]) + "," + str(PinkyRightData[i][1][1]) + "," + str(PinkyRightData[i][1][2]) + "\n") oldFrame, leftWristPosition, leftWristRotation, rightWristPosition, rightWristRotation, IndexLeftData, ThumbLeftData, MiddleLeftData, RingLeftData, PinkyLeftData, IndexRightData, ThumbRightData, MiddleRightData, RingRightData, PinkyRightData = listener.on_frame( controller) frameCount += 1 else: break connected = listener.on_disconnect(controller) csv.close()
def BtnCnv(self): # get path directory pathXLS = self.lbPath.text() # set path to variabel resPath, resFilename = os.path.split(pathXLS) current_dir = os.getcwd() resultPath = Path(os.path.abspath(os.path.join(current_dir, NEWDIR))) if len(pathXLS) == 0: QMessageBox.warning(self, "Warning", "Please select CSV file first!", QMessageBox.Ok) else: tempFile = self.getPONO(pathXLS) FileName = resFilename current_dir = os.getcwd() resPathFile = os.path.abspath( os.path.join(current_dir, NEWDIR, FileName)) resultPath = Path( os.path.abspath(os.path.join(current_dir, NEWDIR))) # create and open .csv file if os.path.exists(resPathFile): os.remove(resPathFile) else: os.makedirs(os.path.dirname(resPathFile), exist_ok=True) csv = open(resPathFile, 'w+') # write first header csv.write(HEAD_CODE_STORE + DELIM + HEAD_PO_NO + DELIM + HEAD_BARCODE + DELIM + HEAD_UOM + DELIM + HEAD_QTY + DELIM + HEAD_MODAL) # write new line csv.write("\n") a = CODE_STORE b = self.getPONO(pathXLS) c = self.getBRC(pathXLS) d = self.getUOM(pathXLS) e = self.getQTY(pathXLS) f = self.getMODAL(pathXLS) for resA, resB, resC, resD, resE, resF in zip( itertools.repeat(a, len(c)), b, c, d, e, f): resD = resD.split('/', 1)[0] # remove '/ 'on UOM resE = str(resE).split('.')[0] # remove decimal on QTY csv.write(resA + DELIM + resB + DELIM + resC + DELIM + resD + DELIM + resE + DELIM + resF) csv.write("\n") csv.close() reply = QMessageBox.information(self, "Information", "Success!", QMessageBox.Ok) if reply == QMessageBox.Ok: os.startfile(str(resultPath))
def write_file(file, filename): with open("csv_upload_directory/%s.csv" % filename, 'w') as csv: for d in file: csv.write(d) csv.write('\n') csv.close()
def findData(data_file, log_file): print("scraping data...") data = open(data_file, 'r') glider_name = "" time = log_file.split(".")[0].split("_")[-1] mission = "" lat = "" lon = "" battery_charge = "" battery_voltage = "" wp_lat = "" wp_lon = "" coulomb_amphr_total = "" leakdetect = "" leakdetect_forward = "" #log_file = "" for line in data: if line[:13] == "Vehicle Name:": glider_name = line.split(": ")[1].rstrip() if line[:11] == "MissionName": mission = line.split(":")[1].split(" ")[0] if line[:13] == "GPS Location:": lat = line.split(" ")[2].rstrip() lon = line.split("N")[1].split("E")[0].strip() #if line[:13] == " m_lat(lat)": # lat = line.split("(lat) ")[1].rstrip() #if line[:13] == " m_lon(lon)": # lon = line.split("(lon) ")[1].rstrip() if line[:46] == " sensor:m_lithium_battery_relative_charge(%)": battery_charge = line.split("=")[1].split(" ")[0] if line[:24] == " sensor:c_wpt_lat(lat)": wp_lat = line.split("=")[1].split(" ")[0] if line[:24] == " sensor:c_wpt_lon(lon)": wp_lon = line.split("=")[1].split(" ")[0] if line[:40] == " sensor:m_coulomb_amphr_total(amp-hrs)": coulomb_amphr_total = line.split("=")[1].split(" ")[0] if line[:37] == " sensor:m_leakdetect_voltage(volts)": leakdetect = line.split("=")[1].split(" ")[0] if line[:45] == " sensor:m_leakdetect_voltage_forward(volts)": leakdetect_forward = line.split("=")[1].split(" ")[0] if line[:26] == " sensor:m_battery(volts)": battery_voltage = line.split("=")[1].split(" ")[0] if line[:24] == " sensor:m_vacuum(inHg)": vacuum = line.split("=")[1].split(" ")[0] log_file_end = log_file + "\n" # Get info from the XML tree = etree.parse("/root/gliderState.xml") #print("XML data:") p = tree.xpath('/gliderState/writeTrack/dataParameters/dataParameter') for param in p: if (param.find("name").text == "Next waypoint coordinates"): wpLat = param.find("value").text.split(',')[0].strip("(") wpLon = param.find("value").text.split(',')[1].strip(")") #print(wpLat) #print(wpLon) if (param.find("name").text == "Next waypoint range"): wpDistance = param.find("value").text if (param.find("name").text == "Next waypoint bearing"): wpHeading = param.find("value").text # Generate CSV row = ",".join( (glider_name, mission, time, lat, lon, battery_voltage, battery_charge, wp_lat, wp_lon, coulomb_amphr_total, leakdetect, leakdetect_forward, wpLat, wpLon, wpDistance, wpHeading, log_file_end)) print(row) csv = open("slocum.csv", 'a') csv.write(row) csv.close() removeDupes("slocum.csv") # Generate some GEOJSON data # Create GEOJSON point of current glider location: pointMarker = Point( (convertISO2DecimalDegrees(lon), convertISO2DecimalDegrees(lat))) with open('slocum.geojson', 'w') as f: json.dump(pointMarker, f)
def show_entry_fields(): area_code = str(ar.get()) college_code = str(col.get()) semester = str(sem.get()) branch = str(br.get()) limit = int(lim.get()) if semester == "2": year = 16 elif semester == "4": year = 15 elif semester == "6": year = 14 else: year = 13 import csv download_dir = area_code + college_code.upper() + str( year) + branch.upper() + ".csv" csv = open(download_dir, "a") tot_sum = 0 sgpa = 0 import time for usn in range(1, limit + 1): print("Extracting results...") try: if usn < 10: usn = '00' + str(usn) elif usn < 99: usn = '0' + str(usn) else: usn = str(usn) results_page = 'http://results.vtu.ac.in/cbcs_17/result_page.php?usn=' + str( area_code) + college_code + str(year) + branch + usn page = urllib.request.urlopen(results_page) soup = BeautifulSoup(page, 'html.parser') data = [] table = soup.find('table', attrs={"class": "table table-bordered"}) table_body = table.find_all('tbody') for i in table_body: items = i.find_all('tbody') rows = i.find_all('tr') for row in rows: cols = row.find_all('td') cols = [ele.text.strip() for ele in cols] data.append([ele for ele in cols if ele]) student_name = soup.find('td', attrs={'style': 'padding-left:15px'}) student_name = student_name.text.strip() usn = "University Seat Number: " + "," + str(area_code) + str( college_code).upper() + str(year) + branch.upper() + usn + "\n" student = "Student name: " + "," + student_name[2:] + "\n" csv.write(usn) csv.write(student) thead = table.find('thead') trow = thead.find('tr') h = thead.find_all('th', attrs={'style': 'text-align:center;'}) hvar = [] for i in h: i = i.text.strip() hvar.append(i) hh = hvar[0] + "," + hvar[1] + "," + hvar[2] + "," + hvar[ 3] + "," + hvar[4] + "," + hvar[ 5] + "," + "Grades" + "," + "Grade Point" + "\n" csv.write(hh) for i in data: var1 = i[0] var2 = i[1] var3 = i[2] var4 = i[3] var5 = i[4] var6 = i[5] if len(var1) == 7 or len(var1) == 8: if (str(var1)[4]) == "L" or str(var1[5]) == "L": if int(var5) >= 90: gvar = "S+" cvar = 2 * 10 elif int(var5) >= 80 and int(var5) < 90: gvar = "S" cvar = 2 * 9 elif int(var5) >= 70 and int(var5) < 80: gvar = "A" cvar = 2 * 8 elif int(var5) >= 60 and int(var5) < 70: gvar = "B" cvar = 2 * 7 elif int(var5) >= 50 and int(var5) < 60: gvar = "C" cvar = 2 * 6 elif int(var5) >= 45 and int(var5) < 50: gvar = "D" cvar = 2 * 5 elif int(var5) >= 40 and int(var5) < 45: gvar = "E" cvar = 2 * 4 else: gvar = "F" cvar = 0 else: if int(var5) >= 90: gvar = "S+" cvar = 4 * 10 elif int(var5) >= 80 and int(var5) < 90: gvar = "S" cvar = 4 * 9 elif int(var5) >= 70 and int(var5) < 80: gvar = "A" cvar = 4 * 8 elif int(var5) >= 60 and int(var5) < 70: gvar = "B" cvar = 4 * 7 elif int(var5) >= 50 and int(var5) < 60: gvar = "C" cvar = 4 * 6 elif int(var5) >= 45 and int(var5) < 50: gvar = "D" cvar = 4 * 5 elif int(var5) >= 40 and int(var5) < 45: gvar = "E" cvar = 4 * 4 else: gvar = "F" cvar = 0 else: if int(var5) >= 90: gvar = "S+" cvar = 4 * 10 elif int(var5) >= 80 and int(var5) < 90: gvar = "S" cvar = 4 * 9 elif int(var5) >= 70 and int(var5) < 80: gvar = "A" cvar = 4 * 8 elif int(var5) >= 60 and int(var5) < 70: gvar = "B" cvar = 4 * 7 elif int(var5) >= 50 and int(var5) < 60: gvar = "C" cvar = 4 * 6 elif int(var5) >= 45 and int(var5) < 50: gvar = "D" cvar = 4 * 5 elif int(var5) >= 40 and int(var5) < 45: gvar = "E" cvar = 4 * 4 else: gvar = "F" cvar = 0 tot_sum += cvar j = var1 + "," + var2 + "," + var3 + "," + var4 + "," + var5 + "," + var6 + "," + gvar + "," + str( cvar) + "\n" csv.write(j) sgpa = float(tot_sum / 280) * 10 sgpa = str(sgpa) a = " " + "," + " " + "," + " " + "," + " " + "," + " " + "," + " " + "," + "SGPA" + "," + sgpa[ 0:4] + "\n" csv.write(a) tot_sum = 0 sgpa = 0 new_line = "\n\n\n\n\n\n\n\n\n\n" csv.write(new_line) except: print("Not Found") continue print("DONE") csv.close()
def clearCsv(): csv = open(filename, "w+") csv.close()
def process_corpus(): #corpus_contents = ' '.join(sys.argv[1:]) inputfile = corpus_name + ".txt" corpus_contents = open(inputfile, 'r').read() totalwords = [] totalsent = [] totaltags = [] sentcount = 0 # tokenize into sentences sentences = nltk.sent_tokenize(corpus_contents) # tokenize into words and create part of speech tags for sentence in sentences: totalsent.append(sentence) sentcount = sentcount + 1 words = nltk.word_tokenize(sentence) for word in words: totalwords.append(word) tagged = nltk.pos_tag(words) for tag in tagged: totaltags.append(tag) string = nltk.tuple2str(tag) # calculate word and sentence average waverage = sum(len(word) for word in totalwords) / len(totalwords) wtrunc = '%.3f' % (waverage) saverage = len(totalwords) / sentcount strunc = '%.3f' % (saverage) # add up total number of pos tags NNtags = [] VBDtags = [] JJtags = [] RBtags = [] punctags = [] for x in totaltags: if x[1] == 'NN': NNtags.append(x) elif x[1] == 'VBD': VBDtags.append(x) elif x[1] == 'JJ': JJtags.append(x) elif x[1] == 'RB': RBtags.append(x) elif x[1] == "." or "," or ";" or "-": punctags.append(x) # calculate part of speech ratios punctratio = len(punctags) / len(totalwords) NNratio = len(NNtags) / len(totalwords) VBDratio = len(VBDtags) / len(totalwords) JJratio = len(JJtags) / len(totalwords) RBratio = len(RBtags) / len(totalwords) # create csv for machine learning model open('user.csv', 'w').close() #erase file download_dir = "user.csv" csv = open(download_dir, "a") columnTitleRow = "author,avgword,avgsent,punctratio,nnratio,vbdratio,rbratio,jjratio\n" csv.write(columnTitleRow) row = "user," + str(waverage) + "," + str(saverage) + "," + str( punctratio) + "," + str(NNratio) + "," + str(VBDratio) + "," + str( RBratio) + "," + str(JJratio) + "\n" csv.write(row) csv.close() # use already generated pickle file to predict author test_file = "user.csv" df1 = pd.read_csv(test_file, header=0) test_data = df1.iloc[:, 1:] model2 = joblib.load("file.pkl") preds2 = model2.predict(test_data) # truncate to 3 decimal places and add % NNratio = NNratio * 100 ntrunc = '%.3f' % (NNratio) VBDratio = VBDratio * 100 vtrunc = '%.3f' % (VBDratio) JJratio = JJratio * 100 jtrunc = '%.3f' % (JJratio) RBratio = RBratio * 100 rtrunc = '%.3f' % (RBratio) # put author guess and stats into an array response = [] response.append(preds2[0]) response.append(str(wtrunc)) response.append(str(strunc)) response.append(str(ntrunc) + "%") response.append(str(vtrunc) + "%") response.append(str(jtrunc) + "%") response.append(str(rtrunc) + "%") print(response) return (response)
def video(): # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-o", "--output", type=str, default="barcodes.csv", help="path to output CSV file containing barcodes") # ap.add_argument("-o1", "--output2", type=str, default=files_name, # help="path to output CSV file containing barcodes") args = vars(ap.parse_args()) # initialize time and date and make filename friendly time_header = str(datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')) # initialize the video stream and allow the camera sensor to warm up print("[ALERT] starting video stream...") print("Press 'Q' to exit") vs = VideoStream(src=0).start() # this is for a mobile solution #vs = VideoStream(usePiCamera=True).start() time.sleep(5.0) # open the output CSV file for writing and initialize the set of # barcodes found thus far csv = open(args["output"], "w") # time track variables. These are used to keep track of QR codes as they enter the screen found = [] found_time = [] found_status = [] ctxAuth = AuthenticationContext(url=settings['url']) # loop over the frames from the video stream while True: # grab the frame from the threaded video stream and resize it to # have a maximum width of 400 pixels frame = vs.read() frame = imutils.resize(frame, width=400) # find the barcodes in the frame and decode each of the barcodes barcodes = pyzbar.decode(frame) timestr = strftime("%m/%d/%Y %H:%M") # loop over the detected barcodes for barcode in barcodes: # extract the bounding box location of the barcode and draw # the bounding box surrounding the barcode on the image (x, y, w, h) = barcode.rect cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2) # the barcode data is a bytes object so if we want to draw it # on our output image we need to convert it to a string first barcodeData = barcode.data.decode("utf-8") barcodeType = barcode.type # draw the barcode data and barcode type on the image text = "{} ({})".format(barcodeData, barcodeType) cv2.putText(frame, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) # if the barcode text is currently not in our CSV file, write # the timestamp + barcode to disk and update the set # if barcode data has never been seen, check the user in and record id, date, and time information if barcodeData not in found: csv.write("{},{},{},{}\n".format(system_id, datetime.datetime.now(), barcodeData, "IN")) csv.flush() contentstr = "{},{},{},{}\n".format(system_id, timestr, barcodeData, "IN") create_list_item(ctx, contentstr) fname = "QRT" + "-" + system_id + "_" + time_header + ".csv" upload_file(ctx, contentstr, fname, bkcsvfolder) found.append(barcodeData) found_time.append(datetime.datetime.now()) found_status.append("IN") sys.stdout.write('\a') sys.stdout.flush() print(barcodeData + " checking IN at " + str(datetime.datetime.now()) + " at location: " + system_id) # if barcode information is found... elif barcodeData in found: time_check = datetime.datetime.now() - found_time[found.index( barcodeData)] status_check = found_status[found.index(barcodeData)] # if time exceeds wait period and user is checked in then check them out if time_check > t_value and status_check == "IN": index_loc = found.index(barcodeData) found_status[index_loc] = "OUT" found_time[index_loc] = datetime.datetime.now() csv.write("{},{},{},{},{}\n".format( system_id, datetime.datetime.now(), barcodeData, "OUT", time_check)) csv.flush() contentstr = "{},{},{},{},{}\n".format( system_id, timestr, barcodeData, "OUT", time_check) create_list_item(ctx, contentstr) fname = "QRT" + "-" + system_id + "_" + time_header + ".csv" upload_file(ctx, contentstr, fname, bkcsvfolder) sys.stdout.write('\a') sys.stdout.flush() print(barcodeData + " checking OUT at " + str(datetime.datetime.now()) + " at location: " + system_id + " for duration of " + str(time_check)) # if found and check-in time is less than the specified wait time then wait elif time_check < t_value and status_check == "OUT": pass # if found and time check exceeds specified wait time and user is checked out, delete ID and affiliated data from the list. This resets everything for said user and allows the user to check back in at a later time. elif time_check > t_value and status_check == "OUT": del found_status[index_loc] del found_time[index_loc] del found[index_loc] else: print("Something happened... error") # show the output frame cv2.imshow("QR Toolbox", frame) key = cv2.waitKey(1) & 0xFF # if the `q` key was pressed, break from the loop if key == ord("q"): break # close the output CSV file do a bit of cleanup print("[ALERT] cleaning up... \n") csv.close() cv2.destroyAllWindows() vs.stop()
#use port 9999 port = 9999 # connection to hostname on the port. s.connect((host, port)) print 'Connected to Leap Motion Server...Data will stream from host after recording is done' #create csv in current path dir = os.path.dirname(os.path.realpath(__file__)) csv = open(str(dir.replace("\\", "/")) + "/animDataClient.csv", 'w+b') #Receive no more than 1024 bytes data = s.recv(1024) while (data): #handling the file transfer print "Receiving Leap Motion Data..." csv.write(data) data = s.recv(1024) print "Press a key to finish" try: sys.stdin.readline() except KeyboardInterrupt: pass finally: print "Closing" csv.close() s.close()
def GetSnap(): global total_snap_size; global total_old_snap_size; csv = open(snap_fileout, "w"); columnTitleRow = "SnapshotId, StartTime, Base VolumeId, VolumeSize(GB), Tags\n"; csv.write(columnTitleRow); print "Retrieving Snapshot info [Started]"; snap_c = 0; for snapshot in ec2client.describe_snapshots(DryRun=dry_run,OwnerIds=[ownerid])['Snapshots']: row =[]; if debug_run: print snapshot['SnapshotId']; if debug_run: print snapshot['StartTime']; if debug_run: print snapshot['VolumeId']; if debug_run: print snapshot['VolumeSize']; row.append(snapshot['SnapshotId']); row.append(snapshot['StartTime']); row.append(snapshot['VolumeId']); row.append(snapshot['VolumeSize']); total_snap_size += snapshot['VolumeSize']; timestamp = '{:%Y-%m-%d}'.format(snapshot['StartTime']); if re.match(timestampY, timestamp) is None: if debug_run: print "snap is old"; total_old_snap_size += snapshot['VolumeSize']; if 'Tags' in snapshot.keys(): Tag=snapshot['Tags']; if debug_run: print "Tags:- ",; for j in sorted(Tag): if debug_run: print j['Key'] + " : "+ j['Value'],; row.append(j['Key'] + " : "+ j['Value']); if debug_run: print " "; else: if debug_run: print "[This snapshot doesn't have tags]"; row.append("[This snapshot doesn't have tags]"); row.append("\n"); csv.write(','.join(map(str, row))); snap_c +=1; print "Retrieving Snapshot info [Completed]"; total_snap ="Total "+str(snap_c)+" Snapshots and total Snapshots size on " + region +" is "+ str(total_snap_size)+" GB"; total_old_snap ="Total Old Snapshots (Created a year before) size on " + region +" is "+ str(total_old_snap_size)+" GB"; print "---------------------------------------------------------------------------------------" print total_snap; print total_old_snap; print "---------------------------------------------------------------------------------------" print "Please refer '"+snap_fileout+"' for more details\n"; csv.write("-----------------------------------------------------------------------------------------------\n"); csv.write(total_snap+"\n"); csv.write(total_old_snap+"\n"); csv.write("-----------------------------------------------------------------------------------------------\n"); csv.write("*Amazon EBS snapshots are stored incrementally: only the blocks that have changed after your last snapshot are saved,\n"); csv.write("and you are billed only for the changed blocks\n"); csv.write("*When an EBS snapshot is copied new EBS snapshot volume ID shows as vol-ffffffff\n"); csv.close(); return;
def print_images(folder, dumpFileName): images = glob.glob('/home/ramesh/vidya/project_cancer/dataset/*.jpg') #meta_json = glob.glob('/home/ramesh/vidya/project_cancer/dataset/*.json') meta_json = glob.glob(folder+'/*.json') IsBenign = False temp_key = "" temp_value = "" temp_key_name = "" temp_value_name = "" with_header= True for jsonPtr in meta_json: data_from_json = json.load(open(jsonPtr)) # gets Dict #open a file for writing csv = open(dumpFileName, 'a') #create a csv writer object #print data_from_json for key1, value1 in data_from_json.iteritems(): if (key1 == "meta"): temp_key = "" temp_value = "" for key2, value2 in value1.iteritems(): if (key2 == "clinical"): for key3 in clinical_keys: temp_key = temp_key + "," + str(key3) try: temp_value = temp_value + "," + str(value1['clinical'][key3]) except: temp_value = temp_value + "," + str(' ') if (key2 == "acquisition"): for key3 in acqusition_keys: temp_key = temp_key + "," + str(key3) try: temp_value = temp_value + "," + str(value1['acquisition'][key3]) except: temp_value = temp_value + "," + str(' ') if (key1 == "name"): # Add folder name temp_key_name = str('Folder') + "," temp_value_name = str(folder) + "," # Add file name temp_key_name += str(key1) temp_value_name += str(value1) if (with_header): csv.write(temp_key_name + "," + temp_key + '\n') csv.write(temp_value_name + "," + temp_value + '\n') with_header = False else: csv.write(temp_value_name + "," + temp_value + '\n') # close opened file csv.close()
def GetAmi(): global total_amivol_size; csv = open(ami_fileout, "w"); columnTitleRow = "ImageId, CreationDate, State, BlockDeviceMappings 01:, BlockDeviceMappings 02, BlockDeviceMappings 03, Tags\n"; csv.write(columnTitleRow); print "Retrieving AMI info [Started]"; ami_c = 0; for ami in ec2client.describe_images(DryRun=dry_run,Owners=['self'])['Images']: #filter"ImageIds=['ami-7ae6541a'] row =[]; #print(volume) if debug_run: print "AMI: " +ami['ImageId'] + " Creation date: "+ str(ami['CreationDate']),; row.append(ami['ImageId']); row.append(ami['CreationDate']); row.append(ami['State']); if 'BlockDeviceMappings' in ami.keys(): EBS=ami['BlockDeviceMappings']; if debug_run: print "EBSs:- ",; for j in EBS: #if debug_run: print j; if "Ebs" in j: Ebs_d = j['Ebs']; Ebs_dn = j['DeviceName']; #print Ebs_d; if 'SnapshotId' in Ebs_d.keys(): if debug_run: print Ebs_d['SnapshotId']+" : "+str(Ebs_d['VolumeSize']),; row.append(j['DeviceName']+":"+Ebs_d['SnapshotId'] + " :"+ str(Ebs_d['VolumeSize'])+"GB"); total_amivol_size += Ebs_d['VolumeSize']; else: if debug_run: print "No Snapshot info available"; row.append("No Snapshot info available"); else: if debug_run: print "This is ephemeral not a EBS" row.append(j['DeviceName']+" : Ephemeral"); if debug_run: print " "; else: if debug_run: print "[This AMI doesn't have BlockDeviceMappings]"; row.append("No EBS"); if 'Tags' in ami.keys(): Tag=ami['Tags']; if debug_run: print "Tags:- ",; for j in sorted(Tag): if debug_run: print j['Key'] + " : "+ j['Value'],; row.append(j['Key'] + " : "+ j['Value']); if debug_run: print " "; else: if debug_run: print "[This AMI doesn't have tags]"; row.append("[This AMI doesn't have tags]"); if debug_run: print "Array out----------------------------------" row.append("\n"); csv.write(','.join(map(str, row))); ami_c +=1; print "Retrieving AMI info [Completed]"; total_amivol ="Total "+str(ami_c)+" AMIs and total Volumes size attached to AMIs on " + region +" is "+ str(total_amivol_size)+" GB"; print "---------------------------------------------------------------------------------------" print total_amivol; print "---------------------------------------------------------------------------------------" print "Please refer '"+ami_fileout+"' for more details\n"; csv.write("-----------------------------------------------------------------------------------------------\n"); csv.write(total_amivol+"\n"); csv.write("-----------------------------------------------------------------------------------------------\n"); csv.close(); return;
def lemur2csv(lemurfile, csvfile): lemur=loadlemur(lemurfile) csv=open(csvfile,'w') for e,g in lemur.items(): csv.write(e+"\t"+g) csv.close()
def GetVolumes(): global total_unattached; csv = open(vol_fileout, "w"); columnTitleRow = "VolumeId, Size(GB), VolumeType, State, Attached Instnace, Device, Tags\n"; csv.write(columnTitleRow); print "Retrieving Volume info [Started]"; vol_c = 0; for volume in ec2client.describe_volumes(DryRun=dry_run)['Volumes']: row =[]; #print(volume) if debug_run: print "Vol: " +volume['VolumeId'] + " Size: "+ str(volume['Size']) + "GB",; row.append(volume['VolumeId']); row.append(volume['Size']); row.append(volume['VolumeType']); if volume['Attachments']: Attachment=volume['Attachments']; for i in Attachment: if debug_run: print i['State'] + " to "+ i['InstanceId'] +" as "+ i['Device'],; row.append(i['State']); row.append(i['InstanceId']); row.append(i['Device']); else: if debug_run: print "[This volume not attached to any instance]",; row.append("[This volume not attached to any instance]"); total_unattached += volume['Size']; if 'Tags' in volume.keys(): Tag=volume['Tags']; if debug_run: print "Tags:- ",; for j in sorted(Tag): if debug_run: print j['Key'] + " : "+ j['Value'],; row.append(j['Key'] + " : "+ j['Value']); if debug_run: print " "; else: if debug_run: print "[This volume doesn't have tags]"; row.append("[This volume doesn't have tags]"); if debug_run: print "Array out----------------------------------" row.append("\n"); csv.write(','.join(map(str, row))); vol_c +=1; print "Retrieving Volume info [Completed]"; total_vol ="Total "+str(vol_c)+" Volumes and total unattached Volumes size on " + region +" is "+ str(total_unattached)+" GB"; print "---------------------------------------------------------------------------------------" print total_vol; print "---------------------------------------------------------------------------------------" print "Please refer '"+vol_fileout+"' for more details\n"; csv.write("-----------------------------------------------------------------------------------------------\n"); csv.write(total_vol+"\n"); csv.write("-----------------------------------------------------------------------------------------------\n"); csv.close(); return;
csstat= getChangesetStats(id) #print changeset stats["changeset"] += 1 stats["node_c"] += csstat["node_c"] stats["way_c"] += csstat["way_c"] stats["relation_c"] += csstat["relation_c"] stats["node_m"] += csstat["node_m"] stats["way_m"] += csstat["way_m"] stats["relation_m"] += csstat["relation_m"] stats["node_d"] += csstat["node_d"] stats["way_d"] += csstat["way_d"] stats["relation_d"] += csstat["relation_d"] # stats = updateStat(stats, getChangesetStats(id)) # print user + ", " + str(len(changesets)) + ", " + str(stats["node"]) + ", " + str(stats["way"]) + ", " + str(stats["relation"]) stats_team["changeset"] += stats["changeset"] stats_team["node_c"] += stats["node_c"] stats_team["way_c"] += stats["way_c"] stats_team["relation_c"] += stats["relation_c"] stats_team["node_m"] += stats["node_m"] stats_team["way_m"] += stats["way_m"] stats_team["relation_m"] += stats["relation_m"] stats_team["node_d"] += stats["node_d"] stats_team["way_d"] += stats["way_d"] stats_team["relation_d"] += stats["relation_d"] csv.write(str(ekip) +", " +str(user) +", " + str(len(changesets)) + ", " + str(stats["node_c"]) + ", " + str(stats["way_c"]) + ", " + str(stats["relation_c"])+ ", " + str(stats["node_m"]) + ", " + str(stats["way_m"]) + ", " + str(stats["relation_m"])+ ", " + str(stats["node_d"]) + ", " + str(stats["way_d"]) + ", " + str(stats["relation_d"]) + "\n") csv.flush() # end of team - print in summary file by team csv_team.write(str(ekip) +", " + str(date_from[1:10])+" - " + str(date_to[1:10])+", " + str(stats_team["changeset"]) + ", " + str(stats_team["node_c"]) + ", " + str(stats_team["way_c"]) + ", " + str(stats_team["relation_c"])+ ", " + str(stats_team["node_m"]) + ", " + str(stats_team["way_m"]) + ", " + str(stats_team["relation_m"])+ ", " + str(stats_team["node_d"]) + ", " + str(stats_team["way_d"]) + ", " + str(stats_team["relation_d"]) + "\n") csv_team.flush() csv.close() print "Done."
def file_scan(): from imutils.video import VideoStream from pyzbar import pyzbar import argparse import datetime import imutils import time import cv2 import csv # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-o", "--output", type=str, default="barcodes.csv", help="path to output CSV file containing barcodes") args = vars(ap.parse_args()) # initialize the video stream and allow the camera sensor to warm up print("[BILGI] Video kamera calisiyor...") vs = VideoStream(src=0).start() #vs = VideoStream(usePiCamera=True).start() time.sleep(2.0) # open the output CSV file for writing and initialize the set of # barcodes found thus far csv = open(args["output"], "w") found = set() count = 0 # loop over the frames from the video stream while True: # grab the frame from the threaded video stream and resize it to # have a maximum width of 400 pixels frame = vs.read() #frame = imutils.resize(frame, width=400) #find the barcodes in the frame and decode each of the barcodes barcodes = pyzbar.decode(frame) # loop over the detected barcodes for barcode in barcodes: # extract the bounding box location of the barcode and draw # the bounding box surrounding the barcode on the image (x, y, w, h) = barcode.rect cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2) # the barcode data is a bytes object so if we want to draw it # on our output image we need to convert it to a string first barcodeData = barcode.data.decode("utf-8") barcodeType = barcode.type # draw the barcode data and barcode type on the image text = "{} ({})".format(barcodeData, barcodeType) cv2.putText(frame, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) # if the barcode text is currently not in our CSV file, write # the timestamp + barcode to disk and update the set if barcodeData not in found: if 5012345678900 == int(barcodeData): csv.write("{},{}\n".format(barcodeData, "Bin1")) csv.flush() found.add(barcodeData) elif 811204012344 == int(barcodeData): csv.write("{},{}\n".format(barcodeData, "Bin2")) csv.flush() found.add(barcodeData) elif 8691014000012 == int(barcodeData): csv.write("{},{}\n".format(barcodeData, "Bin3")) csv.flush() found.add(barcodeData) elif 3245456345344 == int(barcodeData): print("I found the second") csv.write("{},{}\n".format(barcodeData, "Bin2")) csv.flush() found.add(barcodeData) # show the output frame cv2.imshow("Barcode Scanner", frame) key = cv2.waitKey(1) & 0xFF # if the `q` key was pressed, break from the loop #count = count + 1 if key == ord("q"): break # close the output CSV file do a bit of cleanup print("[BILGI] Islem sonlandi...") cv2.destroyAllWindows() vs.stop() csv.close()