def run_experiment(verbose, tensorboard_log, learning_rate): pdb.set_trace() env = make_vec_env( 'PointMassDense-%d-v1' % num_objs, 1, wrapper_class=FlattenDictWrapper, wrapper_env_kwargs=['observation', 'achieved_goal', 'desired_goal']) env = VecVideoRecorder( env, osp.join(logger, "videos"), record_video_trigger=lambda x: x % save_video_interval == 0, video_length=save_video_length) n_actions = env.action_space.shape[-1] stddev = 0.2 action_noise = NormalActionNoise(mean=np.zeros(n_actions), sigma=0.1 * np.ones(n_actions)) model = SAC( MlpPolicy, env, verbose=verbose, tensorboard_log=logger, learning_rate=learning_rate, action_noise=action_noise, ) model.learn(total_timesteps=int(nIter), log_interval=100) model.save(expDir + "/%s/%s_%s" % (name, np.format_float_scientific(nIter), np.format_float_scientific(learning_rate))) env.close()
def generate_metrics(feat: str, cdf_variable: np.ndarray) -> Dict[str, Any]: """ Generate metrics about the given feature Parameters: ----------- feat (str) : name of feature cdf_variable (np.ndarray) : data for feature (e.g. [time, pressure levels, lat, lon]) Returns: -------- feat_info (dict[str, any]) : metrics for feature """ data = cdf_variable[:] valid_range = "" if hasattr(cdf_variable, "valid_range"): valid_range = str(cdf_variable.valid_range) feat_info = { "name": feat, "long_name": cdf_variable.long_name.decode("utf-8"), "units": cdf_variable.units.decode("utf-8"), "valid_range": valid_range, "current_shape": str(data.shape), "mu": np.format_float_scientific(np.mean(data), precision=2), "std": np.format_float_scientific(np.std(data), precision=2), "max": np.format_float_scientific(np.amax(data), precision=2), "min": np.format_float_scientific(np.amin(data), precision=2), } return feat_info
def format_scientific_with_exp(value): is_numpy = type(value).__module__ == np.__name__ if value is None: return 'None' elif is_numpy: type_name = type(value).__name__ if 'float' in type_name or 'int' in type_name: return format_scientific(value.item()) elif torch.is_tensor(value): return format_scientific(value.item()) elif isinstance(value, int): if abs(value) > 1e3 or abs(value) < 1e-3: return np.format_float_scientific(value, precision=3, trim='-', exp_digits=1) else: return str(value) elif isinstance(value, float): if abs(value) > 1e3 or abs(value) < 1e-3: return np.format_float_scientific(value, precision=3, trim='-', exp_digits=1) else: return np.format_float_positional(value, precision=4, trim='-') return str(value)
def plot_heatmaps(artists,dimension, min, max): range_r = np.zeros((dimension)) range_c = np.zeros((dimension)) step_r = (max[0] - min[0]) / dimension step_c = (max[1] - min[1]) / dimension for i,n in enumerate(range_c): left = min[0]+i*step_r right = min[0]+(i+1)*step_r range_r[i] = (right+left)/2 left = min[1] + i * step_c right = min[1] + (i + 1) * step_c range_c[i] = (right + left) / 2 range_c = [np.format_float_scientific(s, exp_digits=2, precision=1) for s in range_c] range_r = [np.format_float_scientific(s, exp_digits=2, precision=1) for s in range_r] for a in artists.values(): fig, ax = plt.subplots() im, cbar = heatmap(a.tsne_heatmap, range_r, range_c, ax=ax, cmap="viridis", cbarlabel="songs concentration") title = "TSNE Heatmap for "+ a.name filename ='./Heatmaps/'+a.id ax.set_title(title) fig.tight_layout() plt.savefig(filename, dpi=300) plt.close('all')
def trans_distribution(): global MINNECC result = get_unique_transactions() # Convert result to numpy array satoshis = numpy.array([res[0] for res in result], dtype=numpy.int64) # Calculate histogram hist, edges = numpy.histogram(satoshis, bins=10, normed=False) # Define data = { 'x': [ "{}-{}".format( numpy.format_float_scientific(edges[i], precision=6), numpy.format_float_scientific(edges[i + 1], precision=6)) for i in range(len(edges) - 1) ], 'y': hist.tolist() } MINNECC = int(_calc_useful_data(satoshis)) return data
def source_read(): #source file line_array = [] source = open(sysargv[1]) #check for line 25 if empty for line in source: line = line.strip() if line.startswith("hSignal__0_copy__1->SetBinContent"): #print(str(line.split("("))[1].split(")")[0]) #array_line = (str(str(line.split("(")[1]).split(")")[0]).split()) array_line = (str(str(line.split("(")[1]).split(")")[0]).split(",")) #print(str(str(line.split("(")[1]).split(")")[0]).split(",")) #print(array_line) #""" #DONE?: yes, done. TODO: need to convert the units appropriately, we are using nS and uA, but need to be given as S and A # DONE: wrong index specified. TODO: saturated signal due to missing "-" sign... why it is not preserved? float_array = np.array([np.format_float_scientific(np.float32(array_line[0])*10**-9, unique=False, precision=6),\ np.format_float_scientific(np.float32(array_line[1])*10**-6, unique=False, precision=6)]) #print(float_array) line_array.append(f'+ ({float_array[0]}, {float_array[1]})') #""" #TODO: fix error in simulation... #doAnalyses: TRAN: Timestep too small; time = 1.32997e-07, timestep = 1.25e-21: trouble with x_x2:diode-instance d.x_x2.dvoutp source.close() return line_array
def func_run(env, logger, lr, action_noise, file): expDir = '/home/shivanik/lab/pointExp/state/' num_objs = 1 verbose = 1 name = 'sac_%d_0.5' % num_objs nIter = 5e7 save_video_length = 200 save_video_interval = 1000000 env = VecVideoRecorder( env, osp.join(logger, "videos"), record_video_trigger=lambda x: x % save_video_interval == 0, video_length=save_video_length) model = SAC( MlpPolicy, env, verbose=verbose, tensorboard_log=logger, learning_rate=lr, action_noise=action_noise, ) model.learn(total_timesteps=int(nIter), log_interval=100) exp_name = expDir + "/%s/%s_%s" % (name, np.format_float_scientific(nIter), np.format_float_scientific(lr)) model.save(exp_name) file.write(exp_name + '\n') env.close() return True
def log_properties(positions, cycle_num, total_PE, filename): if cycle_num == -1: csvfile = open(filename, 'w', newline='') else: csvfile = open(filename, 'a', newline='') writer = csv.writer(csvfile, delimiter="\t") total_KE = 0 for particle in positions: total_KE += particle['KE'] total_E = total_KE + total_PE writer.writerow(['Cycle Number = ' + str(cycle_num)]) writer.writerow([ ' Kinetic Energy = ' + str(np.format_float_scientific(total_KE, precision=4)) ]) writer.writerow([ ' Potential Energy = ' + str(np.format_float_scientific(total_PE, precision=4)) ]) writer.writerow([ ' Total Energy = ' + str(np.format_float_scientific(total_E, precision=4)) ])
def time_to_string(objective_time, derivative_time): obj_time_str = np.format_float_scientific(objective_time, unique=False, precision=PRECISION) der_time_str = np.format_float_scientific(derivative_time, unique=False, precision=PRECISION) return f"{obj_time_str}\n{der_time_str}"
def save_trained_matrix_to_file(matrix_path, matrix): with open(matrix_path, 'w') as f: for i in range(matrix.shape[0]): s = np.format_float_scientific(matrix[i][0], unique=False, precision=18) for j in range(1, matrix.shape[1]): s += ' %s' % np.format_float_scientific( matrix[i][j], unique=False, precision=18) f.write('%s\n' % s)
def return_pressure(self): if (self.pressure >= self.low_limit) and (self.pressure <= self.upp_limit): return [ np.format_float_scientific(self.pressure, precision=2), "normal" ] else: return [ np.format_float_scientific(self.pressure, precision=2), "error" ]
def cr_rec(x1,x2,y1,y2,material_name,region_name): ''' Create a rectangular region ''' output_str = '(sdegeo:create-rectangle ' + \ '(position '+' '.join([np.format_float_scientific(x, precision=15, trim='-') for x in [x1,y1,0]])+') ' + \ '(position '+' '.join([np.format_float_scientific(x, precision=15, trim='-') for x in [x2,y2,0]])+') ' + \ '"'+material_name +'"' \ ' "'+region_name+'")' return output_str
def scientific(s): result = "" if s.shape == (): result = np.format_float_scientific(s, unique=False, precision=5) else: for i in s: result += np.format_float_scientific(i, unique=False, precision=5) + " " return result
def train(): set_gpu() expDir = '/home/shivanik/lab/pointExp/state/' num_objs = 1 verbose = 1 name = 'sac_%d_0.5' % num_objs nIter = 1e8 save_video_length = 200 save_video_interval = 1000000 file = open('sac_done.txt', 'w+') env = make_vec_env( 'PointMassDense-%d-v1' % num_objs, 1, wrapper_class=FlattenDictWrapper, wrapper_env_kwargs=['observation', 'achieved_goal', 'desired_goal']) n_actions = env.action_space.shape[-1] stddev = 0.2 pool = multiprocessing.Pool(processes=4) for lr in [1e-5]: #, 5e-4, 1e-5 logger = osp.join( expDir, name, 'logs%s_%s' % (np.format_float_scientific(nIter), np.format_float_scientific(lr))) env = VecVideoRecorder( env, osp.join(logger, "videos"), record_video_trigger=lambda x: x % save_video_interval == 0, video_length=save_video_length) action_noise = NormalActionNoise(mean=np.zeros(n_actions), sigma=0.1 * np.ones(n_actions)) # boo = pool.apply_async(func_run, args=(env, logger, lr, action_noise, file)) model = SAC( MlpPolicy, env, verbose=verbose, tensorboard_log=logger, learning_rate=lr, action_noise=action_noise, ) model.learn(total_timesteps=int(nIter), log_interval=100) exp_name = expDir + "/%s/%s_%s" % (name, np.format_float_scientific(nIter), np.format_float_scientific(lr)) model.save(exp_name) file.write(exp_name + '\n') env.close() file.close() pool.close() pool.join()
def test_nbs1000(data, data_type, func): expected_devs = nbs14_1000_ref[func.__name__] out = func(data=data, rate=RATE, data_type=data_type, taus=np.array([1, 10, 100])) # Check deviations are the same for i, dev in enumerate(expected_devs): assert np.format_float_scientific(dev, 5) == \ np.format_float_scientific(out.devs[i], 5)
def fit(self, train_loader, validation_loader): # Continue training proc -> Hand-tune LR if global_config.CONTINUE_TRAIN: LR = global_config.GPU_LR self.optimizer = torch.optim.AdamW([ {'params': self.model.parameters(), 'lr': LR[0]}, # {'params': self.model.fc1.parameters(), 'lr': LR[1]}, # {'params': self.model.bn1.parameters(), 'lr': LR[1]}, # {'params': self.model.dense_out.parameters(), 'lr': LR[1]} ]) ############################################## self.scheduler = global_config.SchedulerClass(self.optimizer, **global_config.scheduler_params) # APEX initialize -> FP16 training (half-precision) self.model, self.optimizer = amp.initialize(self.model, self.optimizer, opt_level="O1", verbosity=1) for e in range(self.config.GPU_EPOCH): t = time.time() summary_loss, final_scores = self.train_one_epoch(train_loader) effNet_lr = np.format_float_scientific(self.optimizer.param_groups[0]['lr'], unique=False, precision=1) head_lr = np.format_float_scientific(self.optimizer.param_groups[0]['lr'], unique=False, precision=1) print("---" * 31) self.log(f":::[Train RESULT] | Epoch: {str(self.epoch).rjust(2, ' ')} | Loss: {summary_loss.avg:.4f} | AUC: {final_scores.avg:.4f} | LR: {effNet_lr}/{head_lr} | Time: {int((time.time() - t)//60)}m") self.save(f'{self.base_dir}/last_ckpt.pt') t = time.time() summary_loss, final_scores = self.validation(validation_loader) self.log(f":::[Valid RESULT] | Epoch: {str(self.epoch).rjust(2, ' ')} | Loss: {summary_loss.avg:.4f} | AUC: {final_scores.avg:.4f} | LR: {effNet_lr}/{head_lr} | Time: {int((time.time() - t)//60)}m") if summary_loss.avg < self.best_summary_loss: self.best_summary_loss = summary_loss.avg self.model.eval() self.save(f'{self.base_dir}/{global_config.SAVED_NAME}_{str(self.epoch).zfill(3)}ep.pt') # keep only the best 3 checkpoints # for path in sorted(glob(f'{self.base_dir}/{global_config.SAVED_NAME}_*ep.pt'))[:-3]: # os.remove(path) if self.config.validation_scheduler: try: self.scheduler.step(metrics=summary_loss.avg) except: self.scheduler.step() self.epoch += 1
def numeric_approx(e): neural = Neuralnetwork(config) x = X_train targets = y_train dict1 = {0:"input to hidden", -1: "hidden to output"} # input to hidden, then hidden to output for j in [0,-1]: # choose w1,1 w2,2 for i in [1,2]: l = neural.layers[j] w = np.copy(l.w) neural.forward_pass(x,targets) neural.backward_pass() # gradient by backpropagate print(dict1[j],"W",str(i)+","+str(i),":") d_w = -l.d_w[i][i]/len(x) print(d_w) l.w[i][i] = w[i][i]+e loss_plus = neural.forward_pass(x,targets)[0] l.w[i][i] = w[i][i]-e loss_minus = neural.forward_pass(x,targets)[0] approx = (loss_plus-loss_minus)/(2*e) # gradient by numerical approximation print(approx) diff = np.abs(approx - d_w) print(np.format_float_scientific(diff)) l = neural.layers[j] b = np.copy(l.b) neural.forward_pass(x,targets) neural.backward_pass() # gradient by backpropagate print(dict1[j],"B",str(i),":") d_b = -l.d_b[i]/len(x) print(d_b) l.b[0][i] = b[0][i]+e loss_plus = neural.forward_pass(x,targets)[0] l.b[0][i] = b[0][i]-e loss_minus = neural.forward_pass(x,targets)[0] approx = (loss_plus-loss_minus)/(2*e) # gradient by numerical approximation print(approx) diff = np.abs(approx - d_b) print(np.format_float_scientific(diff))
def update(): # This function updates the labels to give the user the parameters if wavelength.get() and power.get() and div.get() and dist.get() and foc.get() and z.get(): beam = GaussianBeam.GaussianBeam(float(wavelength.get())*1e-9, float(power.get())*1e-3, float(div.get()), 0) distances = list(map(float, dist.get().split(','))) focal_lengths = list(map(float, foc.get().split(','))) width = beam.thinLensFunction(distances, focal_lengths)(float(z.get())) for i in range(len(distances)): beam = beam.thinLens(distances[i], focal_lengths[i]) angle = beam.divergence PR = beam.squareAperturePower(float(z.get()), float(xw.get())/100, float(yw.get())/100) W.set('Beam Diameter: ' + np.format_float_scientific(200*width, 3) + ' cm') theta.set('Beam Divergence: ' + np.format_float_scientific(angle, 3) + ' degrees') power_R.set('Power Recieved: ' + np.format_float_scientific(1000*PR, 3) + ' mW')
def plot(measurement, result_name='FitExpSinAll', save_plot=True): # estimate the number of resonators in the measurement res_names = find_resonator_names(measurement) plt.figure(figsize=np.array(plot_shapes[len(res_names)]) * 3) for i, res in enumerate(res_names): plt.subplot(plot_shapes[len(res_names)][1], plot_shapes[len(res_names)][0], i + 1) fit_res = measurement[f'{res}'][result_name] plt.plot(fit_res['x'], fit_res['y_original'], '.-') if 'y_fit' not in fit_res: plt.text(0.2, 0.2, 'Fit Failed', ha='center', va='center', transform=plt.gca().transAxes) else: decay = np.format_float_scientific(fit_res['params']['t'], precision=3) decay_error = np.format_float_scientific( fit_res['fit_result']['error_dict']['t'], precision=3) frequency = np.format_float_scientific(fit_res['params']['f'], precision=3) frequency_error = np.format_float_scientific( fit_res['fit_result']['error_dict']['f'], precision=3) plt.plot( fit_res['x'], fit_res['y_fit'], label='Decay: {0:}({1:}) us\nFreq: {2:}({3:}) MHz'.format( decay, decay_error, frequency, frequency_error), c='black', ms=2) plt.legend(loc=1) plt.text(0.1, 0.9, res, ha='center', va='center', transform=plt.gca().transAxes) if (np.max(fit_res['y_original']) - np.min(fit_res['y_original'])) < 2: plt.ylim(0, 1) plt.tight_layout() if save_plot: assert 'save_path' in measurement.keys(), "no save path known!" save_path = measurement['save_path']['filename'] plt.savefig(save_path, dpi=200)
def test_function_2D(): """ Test function for implementing linear interpolation in a 2-dimensional case """ x = np.linspace(-2.0, 2.0, 11) y = np.linspace(-2.0, 2.0, 11) X, Y = np.meshgrid(x, y) Z = X * np.exp(-1.0 * (X * X + Y * Y)) """ Number of intervals should be smaller, as the increasing number of dimensions increases computation time """ pts = [10, 20, 30, 40, 50] i = 0 """ Varying the amount of grid points and calculating the average of two-norm of interpolated value and analytical value """ print("2D linear interpolation") print() """ Creating linear_interp object corresponding to 2-dimensional case """ lin2d = linear_interp(x=x, y=y, f=Z, dims=2) while i < 5: xx = np.linspace(-2.0, 2.0, pts[i]) yy = np.linspace(-2.0, 2.0, pts[i]) XX, YY = np.meshgrid(xx, yy) """ Interpolating by calling the function eval2d for the class object lin2d """ Z_evaluated = lin2d.eval2d(xx, yy) Z_analytical = XX * np.exp(-1.0 * (XX * XX + YY * YY)) error = np.linalg.norm(Z_evaluated - Z_analytical) / pts[i] print("Average of two-norm with", pts[i], "grid points:", np.format_float_scientific(error, unique=False, precision=5)) i += 1 print() """ 2D case is a bit unstable with varying grid point number
def test_function_1D(): """ Test function for implementing linear interpolation in a 1-dimensional case """ x = np.linspace(0., 2. * np.pi, 10000) y = np.sin(x) pts = [10, 20, 30, 40, 50] i = 0 """ Varying the amount of grid points and calculating the average of two-norm of interpolated value and analytical value """ print("1D linear interpolation") print() fig1d = plt.figure() ax1d = fig1d.add_subplot(111) """ Creating linear_interp object corresponding to 1-dimensional case """ lin1d = linear_interp(x=x, f=y, dims=1) while i < 5: xx = np.linspace(0., 2. * np.pi, pts[i]) """ Interpolating by calling the function eval1d for the class object lin1d """ y_evaluated = lin1d.eval1d(xx) y_analytical = np.sin(xx) error = np.linalg.norm(y_evaluated-y_analytical)/pts[i] ax1d.plot(xx, y_evaluated) ax1d.set_xlabel(r'$x$') ax1d.set_ylabel(r'f(x)') ax1d.set_title('1D linear interpolation of sin(x) with varying number of grid points') fig1d.show() print("Average of two-norm with", pts[i], "grid points:", np.format_float_scientific(error, unique=False, precision=5)) i += 1 print() """ 1D case works logically, as with increasing grid point number the error decreases. As can be seen from the
def transform(self, point): """Round `point` to the requested precision, as numpy arrays.""" # numpy.format_float_scientific precision starts at 0 if isinstance(point, (list, tuple)) or (isinstance(point, numpy.ndarray) and point.shape): point = map( lambda x: numpy.format_float_scientific( x, precision=self.precision - 1), point) point = list(map(float, point)) else: point = float( numpy.format_float_scientific(point, precision=self.precision - 1)) return numpy.asarray(point)
def format_list(np_list: list[float]) -> list[str]: # we found that storing lists of floating point values as strings in the database # was consume a lot of storage on disk. So we format the values, without losing # much resolution, to reduce the storage impact. import numpy as np return [np.format_float_scientific(x, precision=2) for x in np_list]
def float2string(data): if data == 0: return '0.0' elif 0.01 <= abs(data) < 10000: return np.format_float_positional(data, trim='0') else: return np.format_float_scientific(data, trim='0')
def ALPACAFormatting(s): s = str(s) if s.find('d') == -1: s = numpy.format_float_scientific(float(s)) s = s.replace('e', 'd') s = s.replace('+', '') return s
def send_data(a): a_shape = np.array2string(np.asarray(a.shape), separator=';', max_line_width=np.NaN) a = np.transpose(a, np.flip(range(len(a.shape)))) a_shape = a_shape[1:-1] a_flat = a.flatten() a_string = ';'.join([np.format_float_scientific(num) for num in a_flat]) bytedata = (a_shape + ',' + a_string).encode("utf-8") class MyServer(BaseHTTPRequestHandler): def do_GET(self): self.send_response(200) self.send_header("Content-type", "text/html") self.end_headers() self.wfile.write(bytedata) myserver = HTTPServer(("localhost", http_port), MyServer) myserver.timeout = http_timeout class HTTPThread(threading.Thread): def run(self): print("Starting ") myserver.handle_request() # myserver.serve_forever() myserver.server_close() print("Exiting ") thread1 = HTTPThread() thread1.start() socket.send('Finished'.encode("utf-8")) return thread1
def nsd(data, position, length=1): """ Given some data and the position of the digit desired, return the digit. """ def np_slicer(a, start, end): b = a.view((str, 1)).reshape(len(a), -1)[:, start:end] return numpy.frombuffer(b.tobytes(), dtype=(str, end - start)) if isinstance(data, (float, int)): data = numpy.array([data]) if isinstance(data, (list, pd.DataFrame)): data = numpy.array(data) positive = numpy.absolute(data) #remove negative sign number_zeroes = numpy.count_nonzero(positive == 0) if number_zeroes > 0: return "Zero has no significant digits. Please ensure your data has no zeroes." data_np = [] for i in positive: data_np.append( numpy.format_float_scientific(i, precision=15, unique=False)) data_np = numpy.array(data_np) clean_string = numpy.char.replace(data_np, '.', '') truncated = np_slicer(clean_string, 0, 14) return np_slicer(truncated, position - 1, position - 1 + length)
def _dist_summ(data, precision=1, scientific=True): """ Summarise distribution [as min,q1,median,q3, max] Parameters ---------- data : list List of numeric data precision : int How many integers precision in scientific notation = 1, scientific : bool Default is True, to return result in scientific notation Examples -------- >>> _dist_summ([1,2,3,4,5]) ['1.e+00', '2.e+00', '3.e+00', '4.e+00', '5.e+00'] _dist_summ([1,2,3,4,5], scientific=False) [1, 2.0, 3.0, 4.0, 5] """ dmin = np.min(data) dQ1 = np.percentile(data, q=25, interpolation='midpoint') dmedian = np.median(data) dQ3 = np.percentile(data, q=75, interpolation='midpoint') dmax = np.max(data) r = [dmin, dQ1, dmedian, dQ3, dmax] if scientific: return [np.format_float_scientific(s, precision=precision) for s in r] else: return r
def to_latex_float(val): if abs(val) > 1000: sci_not_form = np.format_float_scientific(val, exp_digits=1, precision=1) sci_not_form = sci_not_form.replace('.e+', '$\\times 10^{') return sci_not_form.replace('e+', '$\\times 10^{') + '}$' elif abs(val) > 1: return str(int(val)) elif abs(val) > 0.001: return "{:0.3f}".format(val) else: sci_not_form = np.format_float_scientific(val, exp_digits=1, precision=0) return sci_not_form.replace('.e-', '$\\times 10^{-') + '}$'
def Plot3D(Resx, Resy, Resz, bins, xlabel='Ask size', ylabel='Bid size', zlabel='Joint distribution', option="save", path="", ImageName="", xtitle="", elev0=30, azim0=40, dist0=12, optionXY=1, figsize_=(8, 11), x_tickslabels=False, x_ticksvalues=np.zeros(1)): if type(bins) == int: bins = [bins, bins] xpos1 = np.zeros(bins[0] * bins[1]) ypos1 = np.zeros(bins[0] * bins[1]) zpos1 = np.zeros(bins[0] * bins[1]) if optionXY == 1: for i in range(bins[0]): for j in range(bins[1]): xpos1[i * bins[1] + j] = (Resx[i] + Resx[i + 1]) / 2 ypos1[i * bins[1] + j] = (Resy[j] + Resy[j + 1]) / 2 zpos1[i * bins[1] + j] = Resz[i, j] if optionXY == 2: for i in range(bins[0]): for j in range(bins[1]): xpos1[i * bins[1] + j] = Resx[i] ypos1[i * bins[1] + j] = Resy[j] zpos1[i * bins[1] + j] = Resz[i * bins[1] + j] fig = plt.figure(figsize=figsize_) ax = fig.add_subplot(111, projection='3d') ax.view_init(elev=elev0, azim=azim0) ax.dist = dist0 if x_tickslabels: ticks_values = tuple([ np.format_float_scientific(elt, unique=False, precision=2) for elt in x_ticksvalues ]) ax.set_xticklabels(list(ticks_values)) ax.plot_trisurf(xpos1, ypos1, zpos1, linewidth=0.2, antialiased=True, cmap=plt.cm.rainbow) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_zlabel(zlabel) #ax.set_zlim((0,0.0035)) plt.grid() if option == "save": plt.savefig(path + ImageName + ".pdf", bbox_inches='tight') plt.show()
def test_dragon4(self): # these tests are adapted from Ryan Juckett's dragon4 implementation, # see dragon4.c for details. fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k) fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k) fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k) fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k) preckwd = lambda prec: {'unique': False, 'precision': prec} assert_equal(fpos32('1.0'), "1.") assert_equal(fsci32('1.0'), "1.e+00") assert_equal(fpos32('10.234'), "10.234") assert_equal(fpos32('-10.234'), "-10.234") assert_equal(fsci32('10.234'), "1.0234e+01") assert_equal(fsci32('-10.234'), "-1.0234e+01") assert_equal(fpos32('1000.0'), "1000.") assert_equal(fpos32('1.0', precision=0), "1.") assert_equal(fsci32('1.0', precision=0), "1.e+00") assert_equal(fpos32('10.234', precision=0), "10.") assert_equal(fpos32('-10.234', precision=0), "-10.") assert_equal(fsci32('10.234', precision=0), "1.e+01") assert_equal(fsci32('-10.234', precision=0), "-1.e+01") assert_equal(fpos32('10.234', precision=2), "10.23") assert_equal(fsci32('-10.234', precision=2), "-1.02e+01") assert_equal(fsci64('9.9999999999999995e-08', **preckwd(16)), '9.9999999999999995e-08') assert_equal(fsci64('9.8813129168249309e-324', **preckwd(16)), '9.8813129168249309e-324') assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)), '9.9999999999999694e-311') # test rounding # 3.1415927410 is closest float32 to np.pi assert_equal(fpos32('3.14159265358979323846', **preckwd(10)), "3.1415927410") assert_equal(fsci32('3.14159265358979323846', **preckwd(10)), "3.1415927410e+00") assert_equal(fpos64('3.14159265358979323846', **preckwd(10)), "3.1415926536") assert_equal(fsci64('3.14159265358979323846', **preckwd(10)), "3.1415926536e+00") # 299792448 is closest float32 to 299792458 assert_equal(fpos32('299792458.0', **preckwd(5)), "299792448.00000") assert_equal(fsci32('299792458.0', **preckwd(5)), "2.99792e+08") assert_equal(fpos64('299792458.0', **preckwd(5)), "299792458.00000") assert_equal(fsci64('299792458.0', **preckwd(5)), "2.99792e+08") assert_equal(fpos32('3.14159265358979323846', **preckwd(25)), "3.1415927410125732421875000") assert_equal(fpos64('3.14159265358979323846', **preckwd(50)), "3.14159265358979311599796346854418516159057617187500") assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793") # smallest numbers assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149), "0.00000000000000000000000000000000000000000000140129846432" "4817070923729583289916131280261941876515771757068283889791" "08268586060148663818836212158203125") assert_equal(fpos64(0.5**(1022 + 52), unique=False, precision=1074), "0.00000000000000000000000000000000000000000000000000000000" "0000000000000000000000000000000000000000000000000000000000" "0000000000000000000000000000000000000000000000000000000000" "0000000000000000000000000000000000000000000000000000000000" "0000000000000000000000000000000000000000000000000000000000" "0000000000000000000000000000000000049406564584124654417656" "8792868221372365059802614324764425585682500675507270208751" "8652998363616359923797965646954457177309266567103559397963" "9877479601078187812630071319031140452784581716784898210368" "8718636056998730723050006387409153564984387312473397273169" "6151400317153853980741262385655911710266585566867681870395" "6031062493194527159149245532930545654440112748012970999954" "1931989409080416563324524757147869014726780159355238611550" "1348035264934720193790268107107491703332226844753335720832" "4319360923828934583680601060115061698097530783422773183292" "4790498252473077637592724787465608477820373446969953364701" "7972677717585125660551199131504891101451037862738167250955" "8373897335989936648099411642057026370902792427675445652290" "87538682506419718265533447265625") # largest numbers assert_equal(fpos32(np.finfo(np.float32).max, **preckwd(0)), "340282346638528859811704183484516925440.") assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)), "1797693134862315708145274237317043567980705675258449965989" "1747680315726078002853876058955863276687817154045895351438" "2464234321326889464182768467546703537516986049910576551282" "0762454900903893289440758685084551339423045832369032229481" "6580855933212334827479782620414472316873817718091929988125" "0404026184124858368.") # Warning: In unique mode only the integer digits necessary for # uniqueness are computed, the rest are 0. Should we change this? assert_equal(fpos32(np.finfo(np.float32).max, precision=0), "340282350000000000000000000000000000000.") # test trailing zeros assert_equal(fpos32('1.0', unique=False, precision=3), "1.000") assert_equal(fpos64('1.0', unique=False, precision=3), "1.000") assert_equal(fsci32('1.0', unique=False, precision=3), "1.000e+00") assert_equal(fsci64('1.0', unique=False, precision=3), "1.000e+00") assert_equal(fpos32('1.5', unique=False, precision=3), "1.500") assert_equal(fpos64('1.5', unique=False, precision=3), "1.500") assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00") assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00")