def get_pid_list(path): """ extract pids from a file created by ncarlibadmin/report/lab_pub_reporter.py :return: """ csv = CsvFile() csv.read(path) print '{} records read'.format(len(csv.data)) pid_list = map(lambda x: x['pid'], csv.data) return pid_list
def display_flight(self): # Clear contents of displayField. self.text_csv_2.delete(0.0, "end") # END isn't a string self.filename = filedialog.askopenfilename( title="Select A File", filetypes=(("csv files", "*.csv"), ("all files", "*.*"))) opened = False if (self.filename[-4:] == ".csv"): try: csv = open(self.filename) self.output = csv.read() opened = True except SyntaxError: if isinstance(self.filename, str): self.output = "The file '" + self.filename + \ "'is not of correct type. Please enter only .csv files." else: self.output = "No drone .csv file selected" elif isinstance(self.filename, str): self.output = "The file '" + self.filename + \ "'is not of correct type. Please enter only .csv files." else: self.output = "No drone .csv file selected" if opened: self.parent.mapFrame.map.set_drone_data(self.filename) self.output = "Drone data loaded" self.text_csv_2.insert("end", self.output) # END isn't a string
def test_second_step_optional_columns_data_ok(self): """Test validation of optional columns data""" request = self.layer["request"] request.form = { "form.buttons.import": u"Importer", "form.widgets.column_0": u"parent_identifier", "form.widgets.column_1": u"identifier", "form.widgets.column_2": u"title", "form.widgets.column_3": u"informations", "form.widgets.decimal_import": u"False", "form.widgets.allow_empty": u"False", } annotations = IAnnotations(self.container) annotation = annotations[importform.ANNOTATION_KEY] = PersistentDict() annotation["has_header"] = False annotation["separator"] = u";" csv = StringIO() lines = [ ["", "key1", "Key 1", "infos"], ["key1", "key1.1", "Key 1.1", ""], ["key1.1", "key1.1.1", "Key 1.1.1", ""], ] for line in lines: csv.write(";".join(line) + "\n") csv.seek(0) annotation["source"] = NamedBlobFile( data=csv.read(), contentType=u"text/csv", filename=u"test.csv", ) form = importform.ImportFormSecondStep(self.container, request) form.updateFieldsFromSchemata() form.updateWidgets() data, errors = form.extractData() self.assertEqual(0, len(errors))
def test_write_element_into_csv_stripable_constraint(self): self.builder.parameters = {'property': u'P580,P582'} uuid.uuid4 = Mock(return_value="much unique such value") self.builder.write_element_into_csv(1929394, " funneh constraintname ") self.csv_file.close() with open("testData/test_constraints.csv", "rb") as csv: assert csv.read() == 'much unique such value,1929394,funneh constraintname,"{""property"": ""P580,P582""}"\r\n'
def members(request): if request.method == "POST": csv = request.FILES["csv-file"] lines = csv.read().split("\n") for line in lines[1:]: if len(line.strip()) > 0: add_update_member_from_farmigo_csv_entry(line) # query member data for page members = Member.objects.filter(season__name=CURRENT_SEASON).annotate( shift_count=Count("memberworkshift") ).extra(select={ "weekly_veggie_count": SHARE_COUNT_QUERY % (VEGETABLES, WEEKLY), "biweekly_veggie_count": SHARE_COUNT_QUERY % (VEGETABLES, BIWEEKLY), "weekly_fruit_count": SHARE_COUNT_QUERY % (FRUIT, WEEKLY), "biweekly_fruit_count": SHARE_COUNT_QUERY % (FRUIT, BIWEEKLY), "weekly_egg_count": SHARE_COUNT_QUERY % (EGGS, WEEKLY), "biweekly_egg_count": SHARE_COUNT_QUERY % (EGGS, BIWEEKLY), "weekly_flower_count": SHARE_COUNT_QUERY % (FLOWERS, WEEKLY), "biweekly_flower_count": SHARE_COUNT_QUERY % (FLOWERS, BIWEEKLY), "cheese_count": SHARE_COUNT_QUERY % (CHEESE, NOT_APPLICABLE), "meat_count": SHARE_COUNT_QUERY % (MEAT, NOT_APPLICABLE), "pickles_count": SHARE_COUNT_QUERY % (PICKLES_AND_PRESERVES, NOT_APPLICABLE), "bread_count": SHARE_COUNT_QUERY % (BREAD, NOT_APPLICABLE), "plant_count": SHARE_COUNT_QUERY % (PLANTS, NOT_APPLICABLE), }).order_by("day", "first_name", "last_name") return render_to_response("admin_members.html", RequestContext(request, { "members": members }) )
def load_friends(): with open ('friend.csv','rb')as friend_data: reader = csv.read(friend_data) for row in reader: vchat=vchat(row[0],row[1],row[2],row[3]) friend.append(vchat)
def test_second_step_import_encoding_form(self): """Test importing csv data with special chars in header and content""" form = importform.ImportFormSecondStep(self.container, self.layer["request"]) annotations = IAnnotations(self.container) annotation = annotations[importform.ANNOTATION_KEY] = PersistentDict() annotation["has_header"] = True annotation["separator"] = u";" csv = StringIO() lines = [ [u"猫".encode("utf8"), u"èè".encode("utf8"), u"ùù".encode("utf8")], ["", u"kéy1".encode("utf8"), u"Kèy 1".encode("utf8")], [ u"kéy1".encode("utf8"), u"kéy1.1".encode("utf8"), u"猫".encode("utf8") ], ] for line in lines: csv.write(";".join(line) + "\n") csv.seek(0) annotation["source"] = NamedBlobFile( data=csv.read(), contentType=u"text/csv", filename=u"test.csv", ) form.update() exception = None try: render = form.render() except UnicodeDecodeError as e: exception = e self.assertIsNone(exception) self.assertTrue(u"Column {0}".format(u"猫") in render)
def members(request): if request.method == "POST": if "upload-button" in request.POST: csv = request.FILES["csv-file"] lines = csv.read().split("\n") for line in lines[1:]: if len(line.strip()) > 0: add_update_member_from_farmigo_csv_entry(line) elif "assign-week-button" in request.POST: __assign_weeks() # query member data for page members = Member.objects.filter(season__name=CURRENT_SEASON).extra(select={ "weekly_veggie_count": SHARE_COUNT_QUERY % (VEGETABLES, WEEKLY), "biweekly_veggie_count": SHARE_COUNT_QUERY % (VEGETABLES, BIWEEKLY), "weekly_fruit_count": SHARE_COUNT_QUERY % (FRUIT, WEEKLY), "biweekly_fruit_count": SHARE_COUNT_QUERY % (FRUIT, BIWEEKLY), "weekly_egg_count": SHARE_COUNT_QUERY % (EGGS, WEEKLY), "biweekly_egg_count": SHARE_COUNT_QUERY % (EGGS, BIWEEKLY), "weekly_flower_count": SHARE_COUNT_QUERY % (FLOWERS, WEEKLY), "biweekly_flower_count": SHARE_COUNT_QUERY % (FLOWERS, BIWEEKLY), "beer_count": SHARE_COUNT_QUERY % (BEER, NOT_APPLICABLE), "cheese_count": SHARE_COUNT_QUERY % (CHEESE, NOT_APPLICABLE), "meat_count": SHARE_COUNT_QUERY % (MEAT, NOT_APPLICABLE), "bread_count": SHARE_COUNT_QUERY % (BREAD, NOT_APPLICABLE), "plant_count": SHARE_COUNT_QUERY % (PLANTS, NOT_APPLICABLE), }).order_by("day", "first_name", "last_name") return render_to_response("admin_members.html", RequestContext(request, { "members": members }) )
def test_import_census_admin_correct(self): csv = open("census/testing_files/csv_admin.csv", 'rb') data = SimpleUploadedFile(content=csv.read(), name=csv.name, content_type='multipart/form-data') u = User(username='******', password='******') u.save() rf = RequestFactory() content_type = 'multipart/form-data' headers = { 'HTTP_CONTENT_TYPE': content_type, 'HTTP_CONTENT_DISPOSITION': 'attachment; filename=csv_admin.csv' } request = rf.post('upload.html', {'file': data}, **headers) request.user = u response = admin.CensusAdmin.import_from_csv(self, request) self.assertEqual(response.status_code, 200) for row1, row2 in zip(request.FILES['file'], data): cadena1 = row1.decode('utf-8') cadena2 = row2.decode('utf-8') self.assertEqual(cadena1, cadena2) csv.close()
def test_second_step_basic_encoding(self): """Ensure that form can be displayed even with special characters""" form = importform.ImportFormSecondStep(self.container, self.layer["request"]) annotations = IAnnotations(self.container) annotation = annotations[importform.ANNOTATION_KEY] = PersistentDict() annotation["has_header"] = False annotation["separator"] = u";" csv = StringIO() lines = [ ["null", "key1", "key1.1", "Key 1.1", "informations"], [ "null", "", u"key1 éà$€".encode("utf8"), u"Key 1 éà$€".encode("utf8"), u"informations éà$€".encode("utf8"), ], ] for line in lines: csv.write(";".join(line) + "\n") csv.seek(0) annotation["source"] = NamedBlobFile( data=csv.read(), contentType=u"text/csv", filename=u"test.csv", ) exception = None try: form.update() except UnicodeDecodeError as e: exception = e self.assertIsNone(exception)
def run_sql(name, params): with open(rpath("postgres_results.csv"), "a") as result_csv: sql = rpath(name + ".sql") log.info("Running SQL query %s." % name) try: with open(sql, "r") as f: sql_query = f.read() for i, (_, param_value) in enumerate(params): sql_query = re.sub(":%d(?![0-9]+)" % (i + 1), param_value, sql_query) with open("sql", "w") as f: f.write(sql_query) out_fn = "%s.csv" % name with open(out_fn, "w") as out: p = Popen(["psql", "-t", "-A", "-F", "|", DB], stdout=out, stdin=PIPE) p.communicate(input=("\\timing \n " + sql_query).encode()) p.wait() with open(out_fn, "r") as csv: lines = csv.read().split("\n") time = lines[-2] with open(out_fn, "w") as csv: csv.write("\n".join(lines[1:-2])) result_csv.write("%s,%s\n" % (name, time.split(" ")[1])) result_csv.flush() except: log.exception("Failed to run SQL query %s." % name) log.info("Done running SQL query %s." % name)
def test_second_step_basic_delimiter(self): """Test edge case related to csv delimiter""" form = importform.ImportFormSecondStep(self.container, self.layer["request"]) annotations = IAnnotations(self.container) annotation = annotations[importform.ANNOTATION_KEY] = PersistentDict() annotation["has_header"] = False annotation["separator"] = u"," csv = StringIO() lines = [ ["", "key1", "Key 1"], ["key1", "key1.1", '"Key 1,1"'], ["key1.1", "key1.1.1", '"Key 1.1.1"'], ] for line in lines: csv.write(",".join(line) + "\n") csv.seek(0) annotation["source"] = NamedBlobFile( data=csv.read(), contentType=u"text/csv", filename=u"test.csv", ) exception = None try: form.update() except Exception as e: exception = e self.assertIsNone(exception)
def test_second_step_import_single_column(self): """Test importing csv data""" form = importform.ImportFormSecondStep(self.container, self.layer["request"]) annotations = IAnnotations(self.container) annotation = annotations[importform.ANNOTATION_KEY] = PersistentDict() annotation["has_header"] = False annotation["separator"] = u";" csv = StringIO() lines = [ ["", "key1", "Key 1"], ["", "key2", "Key 2"], ] for line in lines: csv.write(";".join(line) + "\n") csv.seek(0) annotation["source"] = NamedBlobFile( data=csv.read(), contentType=u"text/csv", filename=u"test.csv", ) data = { "column_0": None, "column_1": "identifier", "column_2": None, "decimal_import": False, "allow_empty": False, } form._import(data) self.assertEqual(2, len(self.container)) self.assertEqual(["key1", "key2"], sorted( [e.identifier for e in self.container.values()])) self.assertEqual(["key1", "key2"], sorted([e.title for e in self.container.values()]))
def read_csv(f): if f is not None: with open(f, 'r') as file: reader = csv.read(file) data = [row for row in reader] return data else: raise ValueError
def read_and_write(): api = FullContactClient() csv = CSVHelper() data = [] emails = csv.read("input.csv", "email") for email in emails: dict_of_data = api.load_data_for(email) data.append(dict_of_data) csv.write("output.csv", data, ['email', 'name', 'linkedin'])
def loadDataset(filename, split, trainingSet=[], testSet=[]): with open(filename, 'rb') as csv: lines = csv.read(filename) dataset = list(lines) for x in range(len(dataset) - 1): for y in range(4): dataset[x][y] = float(dataset[x][y]) if random.random < split: trainingSet.append(dataset[x][y]) else: testSet(dataset[x][y])
def populateWeatherCSV(): year = 2000 while(year < 2015): url = getURL(urlp1,airport,year,urlp2) csv = urllib2.urlopen(url) filename = "kansas"+str(year)+".csv" #CHANGE THIS BEFORE EACH QUERY output = open(directory + filename,'wb') output.write(csv.read()) output.close() updateDB(directory + filename) year += 1
def loadTrainData(): l =[] with open('train.csv') as file: lines = csv.read(file) for line in lines: l.append(line) l.remove([0]) l = np.array(l) label = l[:0] data = l[:,1:] return toInt()
def populateWeatherCSV(): year = 2000 while (year < 2015): url = getURL(urlp1, airport, year, urlp2) csv = urllib2.urlopen(url) filename = "kansas" + str( year) + ".csv" #CHANGE THIS BEFORE EACH QUERY output = open(directory + filename, 'wb') output.write(csv.read()) output.close() updateDB(directory + filename) year += 1
def bake_chart_data(dataset_folder: str, chart: dict, output_file: str = "chart.json", write: bool = True): out_dictionary = { "description": chart["description"], "legend": chart["legend"], "metadata": chart["metadata"], "data": [], "before": None, "section": "No section" if not "section" in chart.keys() else chart["section"] } calculation_method = chart["calculate"] files = [ file for file in os.listdir(dataset_folder) if file.lower().endswith(".csv") ] for file in files: with open(os.path.join(dataset_folder, file), "r") as csv: dataset = csv.read() out_dictionary["data"].append( transform_dataset(dataset, calculation_method, 99999.0)) if "before" in chart.keys(): prev_chart = chart["before"] prev_chart["legend"] = chart["legend"] prev_chart["description"] = chart["description"] prev_chart["calculate"] = calculation_method prev_chart = bake_chart_data(os.path.join(dataset_folder, "before"), prev_chart, write=False) similarity = compare_graphs(out_dictionary, prev_chart) prev_chart.pop("legend", None) prev_chart.pop("description", None) prev_chart.pop("before", None) out_dictionary["before"] = prev_chart out_dictionary["similarity"] = similarity if write: with open(output_file, "w") as output: json.dump(out_dictionary, output) else: return out_dictionary
def train_val_split(ann_file, train_perc=0.7): """用于对训练集进行划分,也可用于生成小样本数据集,输出训练集和验证集, 划分对象是ann_file 也就是把ann_file分解成train_ann, val_ann Args: ann_file(str): csv file path train_perc(float) Return: train_ann_file(csv) val_ann_file(csv) """ with open(ann_file) as f: lines = csv.read(f) for line in lines:
def main(): if (len(sys.argv) != 3): print('Usage: python dna.py database.csv sequence.txt') quit(): # Open the DNA database dna_database = None with open(sys.argv[1], mode='r') as dna_database: databse = csv.DictReader(dna_database) # Open the suspect's DNA sequence with open(sys.argv[2], mode='r') as dna_sequence: sequence = csv.read(dna_sequence)
def main(): IN = open("IN.csv", "r") IN_read = read(IN) for id, place_name, admin_name, latitude, longitude, accuracy in IN_read: db.execute( "INSERT INTO geo_data VALUES (:id,:place_name,:admin_name,:latitude,:longitude,:accuracy)", { "id": id, "place_name": place_name, "admin_name": admin_name, "latitude": latitude, "longitude": longitude, "accuracy": accuracy }) db.commit()
def test_second_step_import_encoding(self): """Test importing csv data with special chars in header and content""" form = importform.ImportFormSecondStep(self.container, self.layer["request"]) annotations = IAnnotations(self.container) annotation = annotations[importform.ANNOTATION_KEY] = PersistentDict() annotation["has_header"] = True annotation["separator"] = u";" csv = StringIO() lines = [ [u"猫".encode("utf8"), u"èè".encode("utf8"), u"ùù".encode("utf8")], ["", u"kéy1".encode("utf8"), u"Kèy 1".encode("utf8")], [ u"kéy1".encode("utf8"), u"kéy1.1".encode("utf8"), u"猫".encode("utf8") ], ] for line in lines: csv.write(";".join(line) + "\n") csv.seek(0) annotation["source"] = NamedBlobFile( data=csv.read(), contentType=u"text/csv", filename=u"test.csv", ) data = { "column_0": "parent_identifier", "column_1": "identifier", "column_2": "title", "decimal_import": False, "allow_empty": False, } form._import(data) self.assertEqual(1, len(self.container)) self.assertEqual([u"kéy1"], [e.identifier for e in self.container.values()]) key1 = self.container.get_by("identifier", u"kéy1") self.assertEqual(1, len(key1)) self.assertEqual([u"kéy1.1"], [e.identifier for e in key1.values()]) key1_1 = key1.get_by("identifier", u"kéy1.1") self.assertEqual(u"猫", key1_1.title)
def test_second_step_columns_data_format_nok(self): """Test validation of columns data format""" request = self.layer["request"] request.form = { "form.buttons.import": u"Importer", "form.widgets.column_0": u"identifier", "form.widgets.column_1": u"title", "form.widgets.decimal_import": u"selected", "form.widgets.allow_empty": u"False", } annotations = IAnnotations(self.container) annotation = annotations[importform.ANNOTATION_KEY] = PersistentDict() annotation["has_header"] = False annotation["separator"] = u";" csv = StringIO() lines = [ ["-1", "key1"], [".10", "key2"], ["-1.1", "key3"], ["-1 11", "Key4"], ] for line in lines: csv.write(";".join(line) + "\n") csv.seek(0) annotation["source"] = NamedBlobFile( data=csv.read(), contentType=u"text/csv", filename=u"test.csv", ) form = importform.ImportFormSecondStep(self.container, request) form.updateFieldsFromSchemata() form.updateWidgets() data, errors = form.extractData() self.assertEqual(1, len(errors)) self.assertEqual( "Bad format values: Line 4, col 1: '-1 11'", translate(errors[0].error.message), )
def generar_tabla_clientes(): csv = open("./DB/clientes.csv", "r", encoding="utf-8") #Abrir archivo csv doc = csv.read() # Devuelve un string que es el contenido del archivo csv.close() #Libera el rchivo para lectura y escritura lines = doc.split( "\n") #Divide el string segun el separador (barra n es salto de linea) # Llenar variables para enviar al html fields = lines[0].split( "," ) #Divde la primer linea del string con comas. Es para extreaer el encabezado de los campos clients = lines[ 1:] # Lista- Trae los registros a partir de la segunda linea for idx, client in enumerate( clients ): # itera por cada registro asignando el indice del registro en idx y el contenido del registro en client clients[idx] = clients[idx].split( ',' ) #Devuelve una lista/ agarra un string y lo divide en varios strings utilizando la coma. return render_template( 'clientes.html', fields=fields, clients=clients, client_count=len( clients)) #Llama al html con los parametros fields y clients .
def load(self, fName): # Handle CSV file if fName.endswith(".csv"): self.channels = 60 self.samples = 0 self.deltaT = 0.001 self.data = [[] * channels] # Assemble the columns, each column is one channel with open(fName) as infile: reader = csv.read(infile, delimiter=",", quoting=csv.QUOTE_NONE) for row in reader: for ii in range(0, channels): self.data[ii].append(float(row[ii])) #Count rows self.samples += 1 else: # Load a labview file ll = labviewloader.LabViewLoader() ll.load(fName) # Load the parameters from the file self.channels = int(ll.getHeaderValue("Channels", 1)) self.samples = int(ll.getHeaderValue("Samples", 1)) self.deltaT = float(ll.getHeaderValue("Delta_X", 1)) # collect all the data as floats self.data = [] for channel in range(0, self.channels): temp = ll.getDataCol(channel) accumulator = [] for value in temp: accumulator.append(float(str(value))) self.data.append(accumulator)
import matplotlib.pyplot as plt import matplotlib.style as style import numpy as np import inspect import csv with open('../data/pts.csv') as csv: plots = csv.read(csv) #print x,y
def generate_report(request_or_username, file_format='pdf', report_id=None, area_id=None, return_as_file=False): ''' function calls generation functions (specified in the configuration) and returns document ''' if type(request_or_username) == str: username = request_or_username request = None else: username = request_or_username.user.get_full_name() request = request_or_username report_template = ReportTemplate.objects.get(pk=report_id) report_module = __import__(report_template.generation_module, fromlist='something') filename = "{0}_{1}.{2}".format(report_template.slug, area_id, file_format) #todo, improve interaction with return, saving achieve, etc if file_format == 'pdf': pdf = get_pdf_report(report_template, username, area_id, request=request) if return_as_file: return pdf else: return HttpResponse(pdf.getvalue(), mimetype='application/pdf') elif file_format == 'html': report = getattr(report_module, report_template.generation_function)( username, file_format='html', report_id=report_id, area_id=area_id, request=request ) return HttpResponse(report, mimetype='text/html') elif file_format == 'rtf': report = getattr(report_module, report_template.generation_function)( username, file_format=file_format, report_id=report_id, area_id=area_id, request=request) if return_as_file: return report else: response = HttpResponse(report, mimetype='application/{}'.format(file_format)) response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename) return response elif file_format == 'docx': report = getattr(report_module, report_template.generation_function)( username, file_format=file_format, report_id=report_id, area_id=area_id, request=request) if return_as_file: return report else: response = HttpResponse(report, mimetype='application/vnd.openxmlfile_file_formats-officedocument.wordprocessingml.document') response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename) return response elif file_format == 'doc': report = getattr(report_module, report_template.generation_function)( username, file_format=file_format, report_id=report_id, area_id=area_id, request=request) if return_as_file: return report else: response = HttpResponse(report, mimetype='application/vnd.openxmlfile_file_formats-officedocument.wordprocessingml.document') response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename) return response elif file_format == 'csv': grid = getattr(report_module, report_template.generation_function)(username, file_format='grid', report_id=report_id, area_id=area_id) csv = create_csv_from_grid(grid) if return_as_file: return csv else: response = HttpResponse(csv.read(), mimetype='application/csv') response['Content-Disposition'] = 'attachment; filename="%s.csv"' % 'rapport' return response elif file_format == 'xls': grid = getattr(report_module, report_template.generation_function)(username, file_format='grid', report_id=report_id, area_id=area_id) xls = create_xls_from_grid(grid) if return_as_file: return xls else: response = HttpResponse(xls.read(), mimetype='application/xls') response['Content-Disposition'] = 'attachment; filename="%s.xls"' % 'rapport' return response else: return HttpResponse('file_file_format niet ondersteund', mimetype='text/html')
def main(): #create socket serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #get local machine name host = socket.gethostname() port = 9999 #bind the port serversocket.bind((host, port)) #queue up requests serversocket.listen(5) #create leap controller and listener listener = SampleListener() controller = Leap.Controller() controller.add_listener(listener) #true if leap is connected connected = listener.on_connect(controller) #set last frame oldFrame, leftWristPosition, leftWristRotation, rightWristPosition, rightWristRotation, IndexLeftData, ThumbLeftData, MiddleLeftData, RingLeftData, PinkyLeftData, IndexRightData, ThumbRightData, MiddleRightData, RingRightData, PinkyRightData = listener.on_frame(controller) #establish connection clientsocket,addr = serversocket.accept() #create csv dir = os.path.dirname(os.path.realpath(__file__)) csv = open(str(dir.replace("\\","/")) + "/animDataServer.csv", 'w+b') #set some variables for counting and checking frameCount = 0 #parse frame data to frame number and wrist position and send to the client pc while True: if not msvcrt.kbhit(): #print listener.on_frame(controller) if oldFrame != listener.on_frame(controller)[0]: if str(leftWristPosition) != '0': #if we have legit values continue if (leftWristPosition[0]==0) and (leftWristPosition[1]==0) and (leftWristPosition[2]==0): #write 0's if we cant find wrist csv.write(('"leftWrist"')+","+str(frameCount)+","+"0"+","+"0"+","+"0"+","+"0"+","+"0"+","+"0"+"\n") #clientsocket.send(('leftWrist')+","+str(frameCount)+","+"0"+","+"0"+","+"0"+","+"0"+","+"0"+","+"0") #clientsocket.sendall('leftWrist'+","+str(frameCount)+","+"0"+","+"0"+","+"0"+","+"0"+","+"0"+","+"0"+","+"\n") else: leftWristPosition = re.sub('[()]', '', str(leftWristPosition)) leftWristPosition = re.sub(r'\s+', '', str(leftWristPosition)) csv.write(('"leftWrist"')+","+str(frameCount)+","+str(leftWristPosition)+","+str(360-leftWristRotation[1])+","+str(leftWristRotation[0])+","+str(leftWristRotation[2])+","+"\n") #clientsocket.sendall('leftWrist'+","+str(frameCount)+","+str(leftWristPosition)+","+str(round(360-leftWristRotation[1],2))+","+str(round(leftWristRotation[0],2))+","+str(round(leftWristRotation[2],2))+","+"\n") for i in range(0,4): csv.write(('"Left Thumb "')+str(ThumbLeftData[i][0])+":"+","+str(frameCount)+","+str(ThumbLeftData[i][1][0])+","+str(ThumbLeftData[i][1][1])+","+str(ThumbLeftData[i][1][2])+"\n") #clientsocket.sendall('Left Thumb '+str(ThumbLeftData[i][0])+":"+","+str(frameCount)+","+str(round(ThumbLeftData[i][1][0],2))+","+str(round(ThumbLeftData[i][1][1],2))+","+str(round(ThumbLeftData[i][1][2],2))+","+"\n") for i in range(0,4): csv.write(('"Left Index "')+str(IndexLeftData[i][0])+":"+","+str(frameCount)+","+str(IndexLeftData[i][1][0])+","+str(IndexLeftData[i][1][1])+","+str(IndexLeftData[i][1][2])+"\n") #clientsocket.sendall('Left Index '+str(IndexLeftData[i][0])+":"+","+str(frameCount)+","+str(round(IndexLeftData[i][1][0],2))+","+str(round(IndexLeftData[i][1][1],2))+","+str(round(IndexLeftData[i][1][2],2))+","+"\n") for i in range(0,4): csv.write(('"Left Middle "')+str(MiddleLeftData[i][0])+":"+","+str(frameCount)+","+str(MiddleLeftData[i][1][0])+","+str(MiddleLeftData[i][1][1])+","+str(MiddleLeftData[i][1][2])+"\n") #clientsocket.sendall('Left Middle '+str(MiddleLeftData[i][0])+":"+","+str(frameCount)+","+str(round(MiddleLeftData[i][1][0],2))+","+str(round(MiddleLeftData[i][1][1],2))+","+str(round(MiddleLeftData[i][1][2],2))+","+"\n") for i in range(0,4): csv.write(('"Left Ring "')+str(RingLeftData[i][0])+":"+","+str(frameCount)+","+str(RingLeftData[i][1][0])+","+str(RingLeftData[i][1][1])+","+str(RingLeftData[i][1][2])+"\n") #clientsocket.sendall('Left Ring '+str(RingLeftData[i][0])+":"+","+str(frameCount)+","+str(round(RingLeftData[i][1][0],2))+","+str(round(RingLeftData[i][1][1],2))+","+str(round(RingLeftData[i][1][2],2))+","+"\n") for i in range(0,4): csv.write(('"Left Pinky "')+str(PinkyLeftData[i][0])+":"+","+str(frameCount)+","+str(PinkyLeftData[i][1][0])+","+str(PinkyLeftData[i][1][1])+","+str(PinkyLeftData[i][1][2])+"\n") #clientsocket.sendall('Left Pinky '+str(PinkyLeftData[i][0])+":"+","+str(frameCount)+","+str(round(PinkyLeftData[i][1][0],2))+","+str(round(PinkyLeftData[i][1][1],2))+","+str(round(PinkyLeftData[i][1][2],2))+","+"\n") if (str(rightWristPosition) != '0'): #if we have legit values continue if (rightWristPosition[0]==0) and (rightWristPosition[1]==0) and (rightWristPosition[2]==0): csv.write(('"rightWrist"')+","+str(frameCount)+","+"0"+","+"0"+","+"0"+","+"0"+","+"0"+","+"0"+"\n") #clientsocket.send(('rightWrist')+","+str(frameCount)+","+"0"+","+"0"+","+"0"+","+"0"+","+"0"+","+"0") else: #write wrist location and rotations and bone rotations rightWristPosition = re.sub('[()]', '', str(rightWristPosition)) rightWristPosition = re.sub(r'\s+', '', str(rightWristPosition)) csv.write(('"rightWrist"')+","+str(frameCount)+","+str(rightWristPosition)+","+str(360-rightWristRotation[1])+","+str(rightWristRotation[0])+","+str(rightWristRotation[2])+","+"\n") #clientsocket.send(('rightWrist')+","+str(frameCount)+","+str(rightWristPosition)+","+str(360-rightWristRotation[1])+","+str(rightWristRotation[0])+","+str(rightWristRotation[2])) for i in range(0,4): csv.write(('"Right Thumb "')+str(ThumbRightData[i][0])+":"+","+str(frameCount)+","+str(ThumbRightData[i][1][0])+","+str(ThumbRightData[i][1][1])+","+str(ThumbRightData[i][1][2])+"\n") for i in range(0,4): csv.write(('"Right Index "')+str(IndexRightData[i][0])+":"+","+str(frameCount)+","+str(IndexRightData[i][1][0])+","+str(IndexRightData[i][1][1])+","+str(IndexRightData[i][1][2])+"\n") for i in range(0,4): csv.write(('"Right Middle "')+str(MiddleRightData[i][0])+":"+","+str(frameCount)+","+str(MiddleRightData[i][1][0])+","+str(MiddleRightData[i][1][1])+","+str(MiddleRightData[i][1][2])+"\n") for i in range(0,4): csv.write(('"Right Ring "')+str(RingRightData[i][0])+":"+","+str(frameCount)+","+str(RingRightData[i][1][0])+","+str(RingRightData[i][1][1])+","+str(RingRightData[i][1][2])+"\n") for i in range(0,4): csv.write(('"Right Pinky "')+str(PinkyRightData[i][0])+":"+","+str(frameCount)+","+str(PinkyRightData[i][1][0])+","+str(PinkyRightData[i][1][1])+","+str(PinkyRightData[i][1][2])+"\n") oldFrame, leftWristPosition, leftWristRotation, rightWristPosition, rightWristRotation, IndexLeftData, ThumbLeftData, MiddleLeftData, RingLeftData, PinkyLeftData, IndexRightData, ThumbRightData, MiddleRightData, RingRightData, PinkyRightData = listener.on_frame(controller) frameCount += 1 else: print listener.on_disconnect(controller) break #handle data transfer csv.seek(0) data = csv.read(1024) while (data): print 'Sending Leap Motion data...' clientsocket.send(data) data = csv.read(1024) csv.close() print "Sent Data and Closing" os.remove(str(dir.replace("\\","/")) + "/animDataServer.csv") clientsocket.shutdown(socket.SHUT_WR)
for struc_comp in poses: if os.path.isfile(struc_comp): #Comparison of the number of heavy atoms of each structure to avoid error of rmsd calculation. mol_ref = next(pybel.readfile('sdf', struc_ref)) mol_comp = next(pybel.readfile('sdf', struc_comp)) atom_ref = len([atom for atom in mol_ref.atoms if atom.atomicnum !=1]) atom_comp = len([atom for atom in mol_comp.atoms if atom.atomicnum !=1]) if atom_ref == atom_comp: #RMSD calculation between two active poses from the same ligand try: calculrmsd = os.path.join(os.environ['SCHRODINGER'], 'run') + " rmsd.py -m " + struc_ref + ' ' + struc_comp + " -c %s.csv" % struc_comp subprocess.call(calculrmsd.split()) #If RMSD value is lower than 1.5 A, we consider that they are the same pose, then one copy is removed. However, if rmsd value is higher, two copies are saved. csv = open(struc_comp+'.csv', 'rb') csvfile = csv.read() rmsd = float(csvfile.split('","')[9]) print hetid , rmsd if rmsd <= 1.5: print 'They are the SAME pose' os.remove(struc_comp) else: print 'They are DIFFERENT pose' except Exception, error: print hetid, error os.remove(struc_comp + '.csv') else: print 'They DO NOT have the same number of atoms: %s vs %s' % (atom_ref, atom_comp)
t.sleep(0.5) # ----------------------------------------------------------------------------- # Read a list of gene names by tier tiers = ["tier1", "tier2", "tier3", "nontiered"] files = [2, 3, 4] # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # First for loop to easily open and read through tier files. for tier, file in zip(tiers, files): tier[0] = open(tier[0] + "genes.csv") file[0] = csv.read(tier[0]) # Let me know where I am right before I start the loop. print("\n" + "The current working directory is " + os.getcwd() + (2 * "\n")) # Print current working directory t.sleep(.5) Gene_count = 0 # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # 2nd loop. It creates gene directories for output. for Gene in file[0]: Gene_count = Gene_count + 1
if __name__ == '__main__': # Get the input file name argparser = argparse.ArgumentParser("Load and graph a labview LVM file") argparser.add_argument("file_path", type=str, nargs=1, help="The file to load.") args = argparser.parse_args() allspikes = [] # Handle CSV files too if args.file_path[0].endswith(".csv"): channels = 60 samples = 30000 data = [[] * channels] # Assemble the columns, each column is one channel with open(args.file_path[0]) as infile: reader = csv.read(infile, delimiter=",", quoting=csv.QUOTE_NONE) for row in reader: for ii in range(0, channels): data[ii].append(row[ii]) # Process into spikes for channel in range(0, channels): chData = data[channel] spikes = chn2spike(data, channel, deltaT) allspikes.extend(spikes) else: # Load the file ll = labviewloader.LabViewLoader() ll.load(args.file_path[0]) # for each channel, calculate the spike timings channels = int(ll.getHeaderValue("Channels", 1))
#!/usr/bin/env python3 import csv # Simplest example of reading a CSV file with open('some.csv', newline='') as f: reader = csv.reader(f) for row in reader: print(row) # Reading a file with an alternate format: with open('passwd', newline='') as f: reader = csv.reader(f, delimiter=':', quoting=csv.QUOTE_NONE) for row in reader: print(row) # Simplest writing example with open('some.csv', mode='w', newline='') as f: writer = csv.writer(f) writer.writerows(someiterable) # Better interface? with open('some.csv') as f: for line in f: print(csv.read(line)) with open('some.csv', mode='w') as f: csv.write(line for line in f)
import math import csv with open('data.csv',newline='')as f: reader=csv.read(f) file_data=list(reader) data=file_data[0] def mean(data): n= len(data) total =0 for x in data: total += int(x) mean = total / n return mean squared_list=[] for number in data: a=int(number)-mean(data) a=a**2 squared_list.append(a) sum=0 for i in squared_list: sum =sum + i #dividing the sum by the total values result = sum/ (len(data)-1) # getting the deviation by taking square root of the result std_deviation = math.sqrt(result) print(std_deviation)
import scraperwiki,urllib2,lxml.html,unicodedata,csv # Blank Python # url='https://tools.usps.com/go/ZipLookupResultsAction!input.action?resultMode=0&companyName=&address1=1483+trotters+cv&address2=&city=atlanta&state=ga&urbanCode=&postalCode=&zip=' data = scraperwiki.scrape("https://sites.google.com/site/mlhdatadump/addresslookup/AddrTest.csv") reader = csv.read(data.splitlines()) for row in reader: url='https://tools.usps.com/go/ZipLookupResultsAction!input.action?resultMode=0&companyName=&address1=' LookupAddr '&address2=&city=' LookupCity '&state=' LookupState'&urbanCode=&postalCode=&zip=' html=urllib2.urlopen(url).read() print html root = lxml.html.fromstring(html).get_element_by_id('result-list') for cdata in root.cssselect("[class='data']"): data={} for addressbit in cdata.cssselect("[class='std-addrss'] span"): bitname=addressbit.attrib['class'] bittext=addressbit.text_content() bitname=unicodedata.normalize('NFKD', unicode(bitname)).encode('ascii','ignore').replace('/','-') data[bitname]=bittext for hiddenbit in cdata.cssselect("dl[class='details'] dt"): bitname=hiddenbit.text_content() bittext=hiddenbit.getnext().text_content() bitname=unicodedata.normalize('NFKD', unicode(bitname)).encode('ascii','ignore').replace('/','-') data[bitname]=bittext print data scraperwiki.sqlite.save(table_name='address',data=data,unique_keys=[]) import scraperwiki,urllib2,lxml.html,unicodedata,csv # Blank Python
import csv #把读取文件的代码封装成一个方法 class CsvFileManager2: @classmethod def read(self): path = r'C:\Users\51Testing\PycharmProjects\selenium7th\data\test_data.csv' file = open(path, 'r') #通过csv代码库读取打开的csv文件,获取到文件中的数据集 data_table = csv.reader(file) #for 循环 item每一个行 in 在数据集中 data_table表示数据集 #data_table中有几行数据,我们就会执行几次 for item in data_table: print(item) #如果想测试一下这个方法 if __name__ == '__main__': csvreader = CsvFileManager2() csv.read() # 如果在方法上面加上classmethod表示这个方法可以直接用类调用 # 如果在方法上写一个classmethod就不需要先实例化对象后才能调用 csvFileManager2.read()
from random import shuffle from csv import reader as read csv_file = open("Verbs.csv", "rb") reader = read(csv_file) verbs = {} sep = "\xc2\xa0or\xc2\xa0" for row in reader: infinitive, s_past, p_participle = row if sep in s_past: s_past = s_past.split(sep) if sep in p_participle: p_participle = p_participle.split(sep) verbs[infinitive] = {"infinitive": infinitive, "past simple": s_past, "past participle": p_participle} verbs_list = verbs.keys() incorrect_verbs = [] def display_options(options, data): while True: print "What would you like to do?:\n" for option in options: option_number = options.index(option) + 1 options_text = str(option_number) + ") " + option["text"] print options_text option = raw_input("\n")
def create_ftse100(): ftsefile = open(ftse100.csv, 'r') ftsereader = csv.read(ftsefile, delimiter=' ')
import util, csv # load region_id, name, state from .MID # load region_id, Tot_P_M, Tot_P_F, Tot_P_P from BO1 # load region_id, Median_age_persons, Median_rent_weekly, Median_Tot_prsnl_inc_weekly, Median_Tot_hhd_inc_weekly, Median_rent_weekly, Average_household_size from B02 region_headers, region_rows = csv.read('data', 'UCL_2011_AUST.MID', headers=['region_id','name','state_id','state','area']) regions = csv.index(region_rows, region_headers, 'region_id', ['name','state','area']) b01_headers, b01_rows = csv.read('data', '2011Census_B01_AUST_UCL_short.csv') b01 = csv.index(b01_rows, b01_headers, 'region_id', ['Tot_P_M','Tot_P_F','Tot_P_P']) b02_headers, b02_rows = csv.read('data', '2011Census_B02_AUST_UCL_short.csv') b02 = csv.index(b02_rows, b02_headers, 'region_id', ['Median_Tot_prsnl_inc_weekly', 'Median_Tot_hhd_inc_weekly', 'Median_rent_weekly_', 'Average_household_size']) # combine into one csv rows = [['region_id', 'name', 'state', 'area', 'pop_total', 'pop_male', 'pop_female', 'med_weekly_personal_income', 'med_weekly_household_income', 'med_weekly_rent', 'avg_household_size']] state_abbrev = { 'Queensland': 'qld', 'New South Wales': 'nsw', 'Australian Capital Territory': 'act', 'South Australia': 'sa', 'Western Australia': 'wa', 'Victoria': 'vic', 'Northern Territory': 'nt', 'Tasmania': 'tas', 'Other Territories': 'ot' } for region_id, r in regions.items():
import util, csv, re # combine ABS data with lat/lng from gazetteer gaz_headers, gaz_rows = csv.read('output', 'gazetteer.csv') gaz = csv.index(gaz_rows, gaz_headers, 'name') census_headers, census_rows = csv.read('output', 'census.csv') def lookup(name, state): key = re.sub(' *\([^\)]*\)', '', name.lower()) key = re.sub(' - .*', '', key) # 2-part names, will use coordinates for first town if key not in gaz: # raise Exception('Couldn''t find {} in {}'.format(name,state)) return None matches = gaz[key] if type(matches) is not list: matches = [matches] matches = [m for m in matches if m['state'] == state.upper()] if len(matches) == 0: # raise Exception('Couldn''t find {} in {}'.format(name,state)) return None pref = ['LOCB','URBN','PRSH','POPL','SUB','HMSD','LOCU'] for p in pref: x = [m for m in matches if m['feat_code'] == p] if len(x) > 0: return x[0]
csvfiles reader """ def __init__(self): """ initialize new csvreader """ def read(self, Filename): """ read a csv file :param Filename: file name :return: x,y - points of axis """ x = [] y = [] with open(Filename, newline='') as file: for row in csv.reader(file): print(row) str(row).split(';') x.append(row[0]) print(x) y.append(row[1]) print(y) return x,y print(csv.__file__) csv = CSVReader() csv.read('points_sheet.csv')
def store_data(self, data_file, ): data = [] with open(data_file + self.dh.UNCOMPRESSED_FILE_EXT, 'r') as f: data = csv.read() for row in data: self.db.sp_db_update(self.dh.UPDATE_QUERIES[data_file])