def load_csv(out_filename, encoding='latin1'): """ The GUI CSV loading function. Considers: - extension in determining how to load a file (e.g. commas or not) - header line of file for information regarding data types """ ext = os.path.splitext(out_filename)[1].lower() if ext not in ['.csv', '.dat', '.txt']: raise NotImplementedError( 'extension=%r is not supported (use .dat, .txt, or .csv)' % ext) with open(_filename(out_filename), 'r', encoding=encoding) as file_obj: names, fmt_dict, dtype, delimiter = _load_format_header( file_obj, ext, force_float=False) try: #A = loadtxt(file_obj, dtype=dtype, delimiter=delimiter) A = loadtxt_nice(file_obj, dtype=dtype, comments='#', delimiter=delimiter) except: traceback.print_exc(file=sys.stdout) msg = 'extension=%r nheaders=%s delimiter=%r dtype=%s' % ( ext, len(names), delimiter, dtype) raise RuntimeError(msg) return A, fmt_dict, names
def test_file_roundtrip(self): """per numpy""" a = np.array([(1, 2), (3, 4)]) np.savetxt('temp.txt', a) b = loadtxt_nice('temp.txt') assert_array_equal(a, b) os.remove('temp.txt')
def test_record(self): """per numpy""" c = StringIO() c.write('1 2\n3 4') c.seek(0) x = loadtxt_nice(c, dtype=[('x', np.int32), ('y', np.float32)]) unused_x2 = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.float32)]) #print('x =', x, type(x2)) #print('x2 =', x2, type(x2)) #a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) #assert_array_equal(x, a) d = StringIO() d.write('M 64 75.0\nF 25 60.0') d.seek(0) mydescriptor = { 'names': ('gender', 'age', 'weight'), 'formats': ('S1', 'i4', 'f4') } b = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)], dtype=mydescriptor) y = loadtxt_nice(d, dtype=mydescriptor)
def _add_user_points_from_csv(self, csv_points_filename: str, name: str, color, point_size: int = 4): """ Helper method for adding csv nodes to the gui Parameters ---------- csv_points_filename : str CSV filename that defines one xyz point per line name : str name of the geometry actor color : List[float, float, float] RGB values; [0. to 1.] point_size : int; default=4 the nominal point size """ is_failed = True try: check_path(csv_points_filename, 'csv_points_filename') # read input file try: user_points = np.loadtxt(csv_points_filename, comments='#', delimiter=',') except ValueError: user_points = loadtxt_nice(csv_points_filename, comments='#', delimiter=',') # can't handle leading spaces? #raise except ValueError as error: #self.log_error(traceback.print_stack(f)) self.gui.log_error('\n' + ''.join(traceback.format_stack())) #traceback.print_exc(file=self.log_error) self.gui.log_error(str(error)) return is_failed self._add_user_points(user_points, name, color, csv_points_filename, point_size=point_size) is_failed = False return False
def load_deflection_csv(out_filename: str, encoding: str = 'latin1'): """ The GUI deflection CSV loading function. Considers: - extension in determining how to load a file (e.g. commas or not) - header line of file for information regarding data types """ ext = os.path.splitext(out_filename)[1].lower() if ext not in ['.csv', '.dat', '.txt']: raise NotImplementedError( 'extension=%r is not supported (use .dat, .txt, or .csv)' % ext) with open(_filename(out_filename), 'r', encoding=encoding) as file_obj: names, fmt_dict, dtype, delimiter = _load_format_header( file_obj, ext, force_float=False) try: #A = np.loadtxt(file_obj, dtype=dtype, delimiter=delimiter) A = loadtxt_nice(file_obj, comments='#', delimiter=delimiter) except: traceback.print_exc(file=sys.stdout) msg = 'extension=%r nheaders=%s delimiter=%r dtype=%s' % ( ext, len(names), delimiter, dtype) raise RuntimeError(msg) names_without_index = names[1:] fmt_dict_without_index = { key: fmt_dict[key] for key in names_without_index } nnames_without_index = len(names_without_index) nexpected_results = 1 + 3 * nnames_without_index try: _nrows, ncols = A.shape except ValueError: msg = ('A should be (nnodes, 1+ndeflection_results); ' 'A.shape=%s nexpected_results=%s names=%s' % (str(A.shape), nexpected_results, names)) raise ValueError(msg) if ncols != nexpected_results: msg = 'A.shape=%s ncols=%s nexpected_results=%s names=%s nnames_without_index=%s' % ( str(A.shape), ncols, nexpected_results, names, nnames_without_index) raise ValueError(msg) B = {} nids_index = A[:, 0] for i, name in enumerate(names_without_index): B[name] = A[:, 1 + 3 * i:1 + 3 * i + 3] assert len(B) == len( fmt_dict_without_index), 'B.keys()=%s fmt_dict.keys()=%s' % (list( B.keys()), list(fmt_dict_without_index.keys())) assert len(B) == len( names_without_index), 'B.keys()=%s names.keys()=%s' % (list( B.keys()), names_without_index) return B, nids_index, fmt_dict_without_index, names_without_index
def test_savetxt_nice(self): """tests that we can reimplement savetxt so it works on unicode for unicode file handlers""" A = np.eye(10) csv_filename = 'savetxt_real.csv' savetxt_nice(csv_filename, A, fmt='%.18e', delimiter=',', newline='\n', header='', footer='', comments='# ') with self.assertRaises(ValueError): loadtxt_nice(csv_filename, delimiter=' ', skiprows=0, comments='#', dtype=np.float64, converters=None, usecols=None, unpack=False, ndmin=0) A2 = loadtxt_nice(csv_filename, delimiter=',', skiprows=0, comments='#', dtype=np.float64, converters=None, usecols=None, unpack=False, ndmin=0) assert np.array_equal(A, A2), 'expected:\n%s\nactual:\n%s' % (A, A2) os.remove(csv_filename) csv_filename = 'savetxt_complex.csv' B = np.eye(10, dtype='complex128') - 2 * A * 1j savetxt_nice(csv_filename, B, fmt='%.18e', delimiter=',', newline='\n', header='', footer='', comments='# ') with self.assertRaises(ValueError): ## TODO: mistake unused_B2 = loadtxt_nice(csv_filename, delimiter=',', skiprows=0, comments='#', dtype=np.float64, converters=None, usecols=None, unpack=False, ndmin=0) #assert np.array_equal(B, B2), 'expected:\n%s\nactual:\n%s' % (B, B2) os.remove(csv_filename) if 0: ## TODO: not done with filehandle test with open(csv_filename, 'w') as csv_file: savetxt_nice(csv_file, B, fmt='%.18e', delimiter=',', newline='\n', header='', footer='', comments='# ') os.remove(csv_filename) with self.assertRaises(FileNotFoundError): B2 = loadtxt_nice('missing.txt', delimiter=',', skiprows=0, comments='#', dtype=np.float64, converters=None, usecols=None, unpack=False, ndmin=0)
def test_loadtxt_nice(self): """tests that we can reimplement loadtxt so it has good error messages""" str_data = StringIO("1,0,2\n3,0,4") x1, y1 = np.loadtxt(str_data, delimiter=',', usecols=(0, 2), unpack=True) x2, y2 = loadtxt_nice(str_data, delimiter=',', usecols=(0, 2), unpack=True) #print('x1=%s y1=%s' % (x1, y1)) #print('x2=%s y2=%s\n+' % (x2, y2)) assert np.array_equal(x1, x2), 'x1=%s x2=%s' % (x1, x2) assert np.array_equal(y1, y2), 'y1=%s y2=%s' % (y1, y2) str_data = StringIO("#1,0,2\n3,0,4") x1, y1 = np.loadtxt(str_data, delimiter=',', usecols=(0, 2), unpack=True) x2, y2 = loadtxt_nice(str_data, delimiter=',', usecols=(0, 2), unpack=True) #print('x1=%s y1=%s' % (x1, y1)) #print('x2=%s y2=%s' % (x2, y2)) assert np.array_equal(x1, x2), 'x1=%s x2=%s' % (x1, x2) assert np.array_equal(y1, y2), 'y1=%s y2=%s' % (y1, y2) str_data = StringIO("#1,0,2\n3,0,4") x1, y1 = np.loadtxt(str_data, delimiter=',', usecols=(0, 2), unpack=True, ndmin=1) x2, y2 = loadtxt_nice(str_data, delimiter=',', usecols=(0, 2), unpack=True, ndmin=1) #print('x1=%s y1=%s' % (x1, y1)) #print('x2=%s y2=%s' % (x2, y2)) assert np.array_equal(x1, x2), 'x1=%s x2=%s' % (x1, x2) assert np.array_equal(y1, y2), 'y1=%s y2=%s' % (y1, y2) str_data = StringIO("#1,0,2\n3,0,4") x1, y1 = np.loadtxt(str_data, delimiter=',', usecols=(0, 2), unpack=True, ndmin=2) x2, y2 = loadtxt_nice(str_data, delimiter=',', usecols=(0, 2), unpack=True, ndmin=2) #print('x1=%s y1=%s' % (x1, y1)) #print('x2=%s y2=%s' % (x2, y2)) assert np.array_equal(x1, x2), 'x1=%s x2=%s' % (x1, x2) assert np.array_equal(y1, y2), 'y1=%s y2=%s' % (y1, y2)