def draw_image(hMotor, vMotor, inputDict): global returnToMain returnToMain = False #Create stepper motors that use the basi_stepper library for drawing images xMotor = bs.stepper(6, 19, 13, 26, _name='horiz', _backlash=hBacklash) yMotor = bs.stepper(4, 27, 17, 22, _name='vert', _backlash=vBacklash) xMotor.set_motorStepInc(minMotorStepInc) yMotor.set_motorStepInc(minMotorStepInc) fileName = inputDict['fileName'] #Open the file and convert it to black and white directory = "/home/pi/EAS/shade_image/Images/" im_bw = Image.open(directory + fileName) im_bw = im_bw.convert('1', dither=Image.NONE) #Resize the image to be no more than the max size the EAS can handle im_bw = si.resize(im_bw, si.maxY, si.maxX) #Convert the image to a numpy array im_bw = np_asarray(im_bw, np_uint8) * 255 #Find an unshaded region (yStart, xStart) = si.get_start(im_bw) # print("Start at", yStart, xStart) #Shade the first pixel in the image and drawing since this is where the cur$ im_bw[yStart][ xStart] = 100 #First point already shaded since cursor is th$ #Reset the current position on the motors to match the image coordinate settings #(0, 0) for hgraphs is center; (0, 0) for images is top left vAdjust = int(im_bw.shape[0] / 2) hAdjust = int(im_bw.shape[1] / 2) # print("hAdjust:", hAdjust, "vAdjust:", vAdjust) yMotor.set_currPos(vMotor.get_currPos() + vAdjust * si.pixelSizeY) xMotor.set_currPos(hMotor.get_currPos() + hAdjust * si.pixelSizeX) # print("hCurr:", hMotor.get_currPos) # print("vCurr:", vMotor.get_currPos) #Move to the starting point for the drawing and erase the EAS yMotor.go_to(yStart * si.pixelSizeY) xMotor.go_to(xStart * si.pixelSizeX) eMotor.set_motorStepInc(10000) eMotor.set_clockwise(False) eraser.erase(eMotor, eraseSpeed, 1024, 7.5, numShakes, 0.4) eMotor.turn_off() #Call the function to create the drawing returnToMain = si.draw_shaded_image(xMotor, yMotor, yStart, xStart, im_bw, s) #Reset the origin to original location (center) vMotor.set_currPos(yMotor.get_currPos() - vAdjust * si.pixelSizeY) hMotor.set_currPos(xMotor.get_currPos() - hAdjust * si.pixelSizeX) #Turn off motors clean_up()
def parse_matrix_part(matrix, szSub, ovSub): assert matrix.ndim == 3 assert np_ndim(szSub) == 1 assert len(szSub) == 3 assert np_ndim(ovSub) == 1 assert len(ovSub) == 3 matrix_shape = np_asarray(matrix.shape, dtype=int) len_each_section, _, _ = szSub shift_length, _, _ = ovSub len_each_section_range = np_arange(len_each_section) matrix_shape = np_ceil((matrix_shape - szSub + 1)/ovSub).astype(int) num_rows_overlap, num_elements, num_beams = matrix_shape result_matrix = np_zeros((np_prod(szSub), np_prod(matrix_shape))) cnt = 0 for i in range(num_beams): for j in range(num_elements): for k in range(num_rows_overlap): index_1 = len_each_section_range + k * shift_length index_2 = j index_3 = i tmp = matrix[index_1, index_2, index_3] result_matrix[:, cnt] = tmp cnt += 1 return result_matrix
def LoadSim_pressed(p1): global w, loaddata, data_dir_load, issim, f_ph samples = int(point_num.get()) parse_x(st_de.get(), samples) f_de = parse_function(eq_de.get()) #parse_x(st_de.get(), samples) f_te = parse_function(eq_te.get()) #parse_x(st_de.get(), samples) f_ph = parse_function(eq_ph.get()) times_read = st_de.get() times_r_fl = float(times_read) timel = [times_r_fl * i for i in range(samples)] #fun_list=array([f_de[0],f_te[0],f_ph[0]]) fun_list = array([f_de, f_te, f_ph]) ck_list = [ck_rand_de.get(), ck_rand_te.get(), ck_rand_ph.get()] pow_list = [rand_de.get(), rand_te.get(), rand_ph.get()] myR = [] for n, i in enumerate(ck_list): if i: print("added noise") myR = datagenerator.RandomWalk(float(pow_list[n]), 1 / float(st_de.get())) myR.funrand(samples) fun_list[n] += myR.randarr del myR func_read = [eq_de.get(), eq_te.get(), eq_ph.get()] last_x = parse_x(times_read, samples) last_func = [parse_function(ff) for ff in func_read] plots = array(np_asarray(last_func)) f_ph = fun_list[2] plotrefresh(pl1[0], pl1[1], fun_list, y=timel, col=['r', "orange", "green"], ylab="del,the,phi (rad)") loaddata = list( datagenerator.datagen(fun_list[0], fun_list[1], fun_list[2])) loaddata.insert(0, timel) loadata = array(loaddata) data_dir_load = 1 issim = 1 #write sim pars to file datagenerator.writefile(fun_list[0], fun_list[1], fun_list[2]) write_last(func_read=func_read, times_read=times_read, samples=samples, rands=pow_list)
def __array__(self, dtype=None): """Returns a NumPy ndarray. This allows instances of this class to be directly used in NumPy routines. However, doing that may force a copy to CPU. Args: dtype: A NumPy compatible type. Returns: A NumPy ndarray. """ return np_asarray(self.data, dtype)
def rle_decode(self, mask_string: str, shape=(768, 768)): ''' mask_rle: run-length as string formated (start length) shape: (height,width) of array to return Returns numpy array, 1 - mask, 0 - background ''' s = mask_string.split() starts, lengths = [ np_asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2]) ] starts -= 1 ends = starts + lengths img = np_zeros(shape[0] * shape[1], dtype=np_uint8) for lo, hi in zip(starts, ends): img[lo:hi] = 255 return img.reshape(shape).T # Needed to align to RLE direction
def predict(self, X, post_analyze_distribution=False, verbose=1): df = pd_df(X) print("started prediction for ", self.cluster_model, " X(", X.shape, ")") if self.cluster_model == 'KMeans': # default vals for kmeans --> max_iter=300, 1e-4 self.predictedKlusters = self.trained_model.predict(df).astype( float) self.kluster_centers = self.trained_model.cluster_centers_.astype( float) elif self.cluster_model == 'GMM_full': # default vals for gmm --> max_iter=100, 1e-3 _, log_resp = self.trained_model._e_step(df) self.predictedKlusters = log_resp.argmax(axis=1) elif self.cluster_model == 'GMM_diag': _, log_resp = self.trained_model._e_step(df) self.predictedKlusters = log_resp.argmax(axis=1) elif self.cluster_model == 'Spectral': self.predictedKlusters = self.trained_model.predict(X).labels_ self.kluster_centroids = get_cluster_centroids( X, self.predictedKlusters, kluster_centers=self.kluster_centers, verbose=0) if post_analyze_distribution: numOf_1_sample_bins, histSortedInv = analyzeClusterDistribution( self.predictedKlusters, self.n_clusters, verbose=1) unique_clust_cnt = len(np_unique(self.predictedKlusters)) print("prediction completed for ", self.cluster_model, " - unique_clust_cnt(", str(unique_clust_cnt), "), numOf_1_sample_bins(", str(numOf_1_sample_bins), ")") return np_asarray(self.predictedKlusters, dtype=int), self.kluster_centroids
def fit_predict(self, X, post_analyze_distribution=False, verbose=1): self.fit(X, post_analyze_distribution=post_analyze_distribution, verbose=verbose) return np_asarray(self.predictedKlusters, dtype=int), self.kluster_centroids
def imageToArray(im): return np_asarray( im.convert('RGB') ) # converts from grayscale to RGB and also removed alpha if present
def compare_time(objects=None, functions=[], num_times=1000, filepath=None, **kwargs): if not isinstance(functions, list): functions = [functions] times = {} t_test_table = [] headers = ['Function'] if objects is not None: rands = random_order(len(objects), num_times) obj_table = [[[] for _ in range(len(functions))] for _ in range(len(objects))] # For every object, time execution of every function, num_times for func_i, func in enumerate(functions): for rand in rands: obj = objects[rand] # Select object randomly if len(kwargs): start() func(obj, **kwargs) else: start() func(obj) obj_table[rand][func_i].append(end(verbose=False)) # For every function, calc t-score and p-value # Function | obj1 avg time | obj1 std | obj2 avg time | obj2 std | obj2 t-score | obj2 p-value headers.append(get_name(objects[0]) + ' Min') headers.append('Avg Sec') headers.append('Conclusion') for obj in objects[1:]: headers.append(get_name(obj) + ' Min') headers.append('Avg Sec') headers.append('Conclusion') headers.append('p-value') for func_i, func in enumerate(functions): func_scores = [get_name(func)] obj1_times = obj_table[0][func_i] obj1_times = np_asarray(obj1_times) func_scores.append(obj1_times.min()) func_scores.append(np_mean(obj1_times)) func_scores.append('Baseline') for obj_i in range(1, len(objects)): # Skip first obj (baseline) obj_times = obj_table[obj_i][func_i] t, p = ttest_ind(obj1_times, obj_times) conc = get_conclusion(t, p) obj_times = np_asarray(obj_times) func_scores.append(obj_times.min()) func_scores.append(np_mean(obj_times)) func_scores.append(conc) func_scores.append(p) t_test_table.append(func_scores) else: rands = random_order(len(functions), num_times) headers.extend(['Min', 'Avg Sec', 'Conclusion', 'p-value']) func_table = [[] for _ in range(len(functions))] for rand in rands: func = functions[rand] if len(kwargs): start() func(**kwargs) else: start() func() func_table[rand].append(end(verbose=False)) func1_times = func_table[0] func1_times = np_asarray(func1_times) t_test_table.append([get_name(functions[0]), func1_times.min(), np_mean(func1_times), 'Baseline']) for func_i in range(1, len(functions)): # Skip first function (baseline) func = functions[func_i] func_scores = [get_name(func)] func_times = func_table[func_i] t, p = ttest_ind(func1_times, func_times) conc = get_conclusion(t, p) func_times = np_asarray(func_times) func_scores.extend([func_times.min(), np_mean(func_times), conc, p]) t_test_table.append(func_scores) msg = "Timing test iterations: "+str(num_times)+"\n" msg += tabulate(t_test_table, headers=headers) msg += "\n" print(msg) t_test_table.insert(0, headers) if filepath is not None: if filepath is True or filepath == '': filepath = '.csv' if filepath.startswith('.'): filename = '' if objects: for obj in objects: filename += get_name(obj) + '-' if len(filename): filename = filename[:-1] + '_' for func in functions: filename += get_name(func) + '-' if len(filename): filename = filename[:-1] + '_' filename += str(num_times) filepath = filename + filepath # Add extension # e.g. obj1-obj2_func1-func2 vsave(t_test_table, filepath=filepath) return t_test_table