def start_clicked(self): if utils.is_float(self.feature_window_edit.text()): self.config.feature_window_size = float(self.feature_window_edit.text()) if utils.is_float(self.repetition_interval_edit.text()): self.config.repetition_interval = float(self.repetition_interval_edit.text()) if utils.is_float(self.detection_threshold_edit.text()): self.config.detection_threshold = float(self.detection_threshold_edit.text()) self.online_training = self.online_training_checkbox.isChecked() self.log(f"Starting data stream, online training? {self.online_training}") if self.online_training: self.online_training_timer = QTimer() self.online_training_timer.singleShot(self.MENTAL_TASK_DELAY, self.next_mental_task) self.initialize_data_buffer() self.samples_push_count = 0 self.online_training_samples_push_count = 0 if self.reading_timer is None: print("Starting data stream...") self.board.start_stream() self.reading_timer = QTimer() self.reading_timer.timeout.connect(self.read_data) # TODO: Replace with a config variable self.reading_timer.start(100)
def test_is_float(self): self.assertIsNone(utils.is_float('valor')) self.assertEqual(10.0, utils.is_float('10')) self.assertEqual(10.0, utils.is_float(10)) self.assertIsNone(utils.is_number('valor')) self.assertEqual(10, utils.is_number('10')) self.assertEqual(10, utils.is_number(10))
def save_item(self): if len(self.ui.itemNameField.text().strip()) <= 0: self.ui.errorMessagesLabel.setText("Item name is required!") elif item_name_exists(self.ui.itemNameField.text().strip()): self.ui.errorMessagesLabel.setText( "Item with that name already exists in database") elif len(self.ui.itemCostPriceField.text().strip()) <= 0 or ( not is_float(self.ui.itemPriceField.text().strip())): self.ui.errorMessagesLabel.setText( "Cost Price is required! (Use valid characters)") elif len(self.ui.itemPriceField.text().strip()) <= 0 or (not is_float( self.ui.itemPriceField.text().strip())): self.ui.errorMessagesLabel.setText( "Unit Price is required! (Use valid characters)") elif len(self.ui.itemQuantityField.text().strip()) <= 0 or ( not is_integer(self.ui.itemQuantityField.text().strip())): self.ui.errorMessagesLabel.setText( "Quantity is required! (Be sure to enter an integer)") else: item_name = self.ui.itemNameField.text().strip() cost_price = self.ui.itemCostPriceField.text().strip() unit_price = self.ui.itemPriceField.text().strip() quantity = self.ui.itemQuantityField.text().strip() if len(self.ui.itemDescField.toPlainText().strip()) > 0: description = self.ui.itemDescField.toPlainText().strip() if add_item('', item_name, description, cost_price, unit_price, quantity): self.new_item_added.emit() self.close() else: if add_item('', item_name, '', cost_price, unit_price, quantity): self.new_item_added.emit() self.close()
def write(self, new_data): if len(self.sound_list) != 0: return new_data = new_data.split(',') if len(new_data) != 2: self.sound_list.append('data_error') return ecg, gsr = new_data if not is_float(ecg): self.sound_list.append('ecg') if not is_float(gsr): self.sound_list.append('gsr') if not is_float(ecg) or not is_float(gsr): return ecg = float(ecg) gsr = float(gsr) if self.ecg_upper_limit <= ecg or ecg <= self.ecg_lower_limit: self.sound_list.append('ecg') if self.gsr_upper_limit <= gsr or gsr <= self.gsr_lower_limit: self.sound_list.append('gsr') if len(self.sound_list) != 0: self.play_sound()
def test_is_float(self): # Test that the float finding function is correct fl = utils.is_float('3.43') not_fl = utils.is_float('3') also_not_fl = utils.is_float('Musher') date = utils.is_float('3:4:45') self.assertTrue(fl) self.assertFalse(not_fl) self.assertFalse(also_not_fl) self.assertFalse(date)
def blocks_to_str(blocks, name, physio_start=None, convert=True): out = '#-----------------' + name + '----------------------#\n' if physio_start != None: out += 'Physio start: ' + time.asctime( time.gmtime(physio_start)) + '\n' l = len(out) - 3 for s, e, c in blocks: if convert and is_float(s): s = time.asctime(time.gmtime(s)) if convert and is_float(e): e = time.asctime(time.gmtime(e)) out += str(s) + ' | ' + e + ' | ' + c + '\n' out += '#' + '-' * l + '#\n' return out
def points_to_basis_dists(self, points): assert is_mat(points) assert is_float(points) (N, D) = points.shape assert D == self.grid.get_dim() G = self.grid # Get indices cell_coords = G.points_to_cell_coords(points) # Get rel distances rel_dist = G.points_to_low_vertex_rel_distance(points, cell_coords) assert (N, D) == rel_dist.shape # Get the vertices vertices = self.grid.cell_coords_to_vertex_indices(cell_coords) assert (N, 2 ** D) == vertices.shape # Calculate multilinear interp weights from distances weights = np.empty((N, 2 ** D)) for (i, bin_vertex) in enumerate(itertools.product([0, 1], repeat=D)): vert_mask = np.array(bin_vertex, dtype=bool) weights[:, i] = np.product(rel_dist[:, vert_mask], axis=1) * np.product( 1.0 - rel_dist[:, ~vert_mask], axis=1 ) point_dist = self.convert_to_sparse_matrix(cell_coords, vertices, weights) return point_dist
def sanitize(filename): """ Opens NCSMC output file, reads each line, separates into text lines and number lines. Sets NaN values to zero. filename: ncsmc eigenphase_shift or phase_shift file path returns: - one list of strings, for title lines - one list of number lines, each entry is a sub-list of floats """ with open(filename, "r+") as read_file: lines = read_file.readlines() text_lines = [] number_lines = [] for line in lines: nums = line.split() # replace all NaNs with zero for i, num in enumerate(nums): if num == "NaN": nums[i] = 0 if not all(utils.is_float(num) for num in nums): # save these for title purposes text_lines.append(line) else: # save numbers for analysis later nums = [float(n) for n in nums] number_lines.append(nums) return text_lines, number_lines
def get_examination(self, file_path): """Examination data starts with A_ or C_.""" self._update_btk(file_path) features = ["NaN"] * len(self.list_labels) meta_subject = self.metadata_reader.FindChild("SUBJECTS").value() n_childs = meta_subject.GetChildNumber() for child_id in range(1, n_childs): label = meta_subject.GetChild(child_id).GetLabel() if label.lower() in self.list_labels: value = (meta_subject.FindChild( label).value().GetInfo().ToString()[0].replace(" ", "")) if value[-1] in "-+_*°" and len(value) >= 1: symb = value[-1] value = value[:-1] if is_float(value): value = float(value) if symb == "+": value = value + 0.25 elif symb == "-": value = value - 0.25 value = str(value) if value.lower() == "nt": value = "NaN" if len(value) == 0 or value[-1] in "-+_*°" and len(value) == 1: value = "NaN" if value == "null": value = "NaN" label_idx = self.list_labels.index(label.lower()) features[label_idx] = value return features
def on_group_spawn_interval_right_changed(self, e): if len(map_settings.settings.falls) > 0: if len(map_settings.settings.falls[ self.current_selected_fall].groups) > 0: curr_group = map_settings.settings.falls[ self.current_selected_fall].groups[ self.current_selected_group] if len(e.text) > 0 and utils.is_float(e.text): curr_group.interval[1] = float(e.text) else: curr_group.interval[1] = 0.0 self.update_group_panel()
def do_where(self, my_df, attr, value, opr): tbl, attr = self.extract_ta(attr) # if tbl is None: # pass # else: # table = self.alias_map[tbl] if isinstance(value, list): return self.do_dynamic_where(my_df, attr, value[0], opr, value[2], value[1]) elif utils.is_float(value) or utils.is_date(value) or utils.is_quoted( value): par = utils.extract_data(value) return self.do_fix_where(my_df, attr, par, opr) else: return self.do_dynamic_where(my_df, attr, value, opr)
def read_item(self): if len(self.buff) == 0: return t.Null() token = self.buff.pop(0) if token == '(': return self.read_list(')') elif token in ['"', "'"]: return self.read_str(token) elif token == '`': return t.Quote(self.read_item()) elif is_int(token): return t.Int(token) elif is_float(token): return t.Real(token) else: return t.Symbol(token)
def parse_arguments(): args = {} last_key = '' for i in xrange(1, len(sys.argv)): a = sys.argv[i] if a[0] == '-' and not utils.is_float(a): last_key = a args[a] = [] elif last_key != '': arg_values = args[last_key] arg_values.append(a) args[last_key] = arg_values return args
def convert_to_sparse_matrix(self, cell_coords, vertices, weights): assert isinstance(cell_coords, Coordinates) assert cell_coords.check() assert is_mat(vertices) assert is_int(vertices) assert is_mat(weights) assert is_float(weights) (N, D) = cell_coords.shape assert vertices.shape == weights.shape assert (N, 2 ** D) == vertices.shape assert D == self.dim oob_mask = cell_coords.oob.mask num_oob = cell_coords.oob.num_oob() num_normal = N - num_oob assert num_oob >= 0 assert num_normal >= 0 normal_idx = np.arange(N)[~oob_mask] oob_idx = np.arange(N)[oob_mask] m = num_normal * (2 ** D) # Space for normal points M = m + num_oob # Add on space for oob nodes cols = np.empty(M) rows = np.empty(M) data = np.empty(M) # Add normal weights cols[:m] = (np.tile(normal_idx, (2 ** D, 1)).T).flatten() rows[:m] = (vertices[~oob_mask, :]).flatten() data[:m] = (weights[~oob_mask, :]).flatten() # Route all oob points to oob node cols[m:] = oob_idx rows[m:] = vertices[oob_mask, 0] data[m:] = np.ones(num_oob) NN = self.grid.get_num_total_nodes() point_dist = sps.coo_matrix((data, (rows, cols)), shape=(NN, N)) point_dist = point_dist.tocsr() point_dist.eliminate_zeros() return point_dist
def parse_arguments(): args = {} last_key = '' if len(sys.argv) == 1: controller.handle_no_args() return None for i in range(1, len(sys.argv)): a = sys.argv[i] if a[0] == '-' and not utils.is_float(a): last_key = a args[a] = [] elif last_key != '': arg_values = args[last_key] arg_values.append(a) args[last_key] = arg_values return args
async def graph(self, ctx, start: float, end: float, *formulas): ''' Plots a given mathematical function Arguments: start, end, f(x) Supported operations: Basic: +, -, *, / Modulus: % or mod Powers: ^ Square roots: sqrt() Logarithms: log() (base=e) Absolute value: abs() Rounding: round(), floor(), ceil() Trigonometry: sin(), cos(), tan() (in radians) Parentheses: () Constants: pi, e, phi, tau, etc... Example: graph -10 10 x ''' addCommand() await ctx.channel.trigger_typing() formula = '' for f in formulas: formula += f + ' ' formula = formula.strip() if not is_float(start) or not is_float(end): raise commands.CommandError( message=f'Invalid argument(s): `start/end`.') elif start >= end: raise commands.CommandError( message=f'Invalid arguments: `start`, `end`.') if not formula: raise commands.CommandError( message=f'Required argument missing: `formula`.') try: input = format_input(formula.lower(), 1) x = np.linspace(start, end, 250) plt.style.use('dark_background') fig, ax = plt.subplots() manager = multiprocessing.Manager() val = manager.dict() p = multiprocessing.Process(target=plot_func, args=(x, input, val)) p.start() p.join(5) if p.is_alive(): p.terminate() p.join() raise commands.CommandError('Execution timed out.') y = [v for v in val.values()] potential_error = y[len(y) - 1] if isinstance(potential_error, Exception): raise potential_error plt.plot(x, y, color='#47a0ff') ax.yaxis.grid() ax.xaxis.grid() plt.xlim(start, end) plt.savefig('images/math_graph.png', transparent=True) plt.close(fig) with open('images/math_graph.png', 'rb') as f: file = io.BytesIO(f.read()) image = discord.File(file, filename='math_graph.png') formula = beautify_input(formula) embed = discord.Embed(title='Graph', description=f'`𝘧(𝓍) = {formula}`') embed.set_footer(text='Wrong? Please let me know! DM Chatty#0001') embed.set_image(url=f'attachment://math_graph.png') await ctx.send(file=image, embed=embed) except Exception as e: raise commands.CommandError( message=f'Invalid mathematical expression: \n```{e}```')
def order_results(request,pk): order = Orders.objects.get(pk=pk) if request.method == 'POST': for p_tes in request.POST: if p_tes.startswith('test_'): o_order = Orders.objects.get(pk=pk) o_test = Tests.objects.get(pk=p_tes.split('_')[1]) # check current result if request.POST.get( p_tes, '')<>'': # get current result cr_res_a = '' try: cr_res = models.OrderResults.objects.get(order=o_order,test=o_test) cr_res_a = cr_res.result.alfa_result except: pass if not cr_res_a == request.POST.get( p_tes, ''): if request.user.is_authenticated(): alfa_res = request.POST.get( p_tes, '') o_result = models.Results(order=o_order,test=o_test,alfa_result=alfa_res) o_result.save() flag = None ord_res = models.OrderResults.objects.get(order=o_order,test=o_test) if is_float(alfa_res) and ord_res.ref_range: if str(ord_res.ref_range).find(' - ') > 0: # range range = str(ord_res.ref_range).split(' - ') if float(range[0]) <= float(alfa_res) <= float(range[1]): flag = 'N' elif float(alfa_res) >= float(range[1]): flag = 'H' else: flag = 'L' elif '<' in str(ord_res.ref_range) or '>' in str(ord_res.ref_range): range = str(ord_res.ref_range).split(' ') if str(range[0]) == '>': if float(alfa_res) > float(range[1]): flag = 'N' else: flag = 'L' elif str(range[0]) == '<': if float(alfa_res) < float(range[1]): flag = 'N' else: flag = 'H' elif str(range[0]) == '>=': if float(alfa_res) >= float(range[1]): flag = 'N' else: flag = 'H' elif str(range[0]) == '<=': if float(alfa_res) <= float(range[1]): flag = 'N' else: flag = 'H' else: flag = 'A' ord_res.result = o_result ord_res.patologi_mark = flag ord_res.validation_status = 1 ord_res.save() # create history act_txt = 'Result %s set for analyt %s ' % (request.POST.get( p_tes, ''),o_test) his_order = models.HistoryOrders(order=o_order,test=o_test,action_code='RESENTRY',action_user=str(request.user),action_date=datetime.now(),action_text=act_txt) his_order.save() # Reference Range update from orders = Orders.objects.get(pk=pk) ordertests = models.OrderResults.objects.filter(order = orders).values('test_id', 'test__test_group__name', 'test__name', 'test__result_type', 'result__alfa_result', 'is_header', 'unit', 'ref_range', 'patologi_mark', 'validation_status', 'result__instrument__name', 'techval_user', 'medval_user' ).order_by('test__test_group__sort','test__sort') # save report URL oe, _created = models.OrderExtended.objects.get_or_create(order_id=pk) tempate = 'middleware/order_results.html' context = {'order':order,'orders':orders,'ordertests':ordertests} return render(request,tempate,context)
def addtrack(): Name = request.form['Name'] AlbumId = request.form['AlbumId'] GenreId = request.form['GenreId'] Composer = request.form['Composer'] Seconds = request.form['Seconds'] UnitPrice = request.form['UnitPrice'] reason = set() database = "iMusic.db" table = "Track" with sqlite3.connect(database) as conn: AlbumId_set = get_data_from_db(database, table, "AlbumId") GenreId_set = get_data_from_db(database, table, "GenreId") # Name if len(Name) == 0: reason.add("A track name must be provided") if len(Name) > 200: reason.add("A track's name cannot exceed 200 characters") # AlbumId try: AlbumId = int(AlbumId) if AlbumId not in AlbumId_set: reason.add("The specified AlbumId does not exist in the DB.") except ValueError: # 若AlbumId转换失败,即字符串中含有其他不能转换成数字的因素存在如"100a" reason.add("The AlbumId format is wrong format!") # GenreId try: GenreId = int(GenreId) if GenreId not in GenreId_set: reason.add("The specified GenreId does not exist in the DB.") except ValueError: reason.add("The GenreId format is wrong format!") # Seconds try: Seconds = int(Seconds) if Seconds == 0: reason.add( "The specified duration is too short. Must be greater than zero." ) except ValueError: reason.add("The Milliseconds format is wrong format!") # UnitPrice try: UnitPrice_float = float(UnitPrice) if not is_float(UnitPrice) or UnitPrice_float == 0: reason.add("The specified price is invalid.") except ValueError: reason.add("The specified price format is wrong format") if len(reason) != 0: return render_template('add.html', error=1, msg=reason) try: cur = conn.cursor() cur.execute( 'INSERT INTO Track (Name, AlbumId, GenreId, Composer, Milliseconds, UnitPrice) VALUES (?,?,?,?,?,?)', (Name, AlbumId, GenreId, Composer, Seconds * 10000, UnitPrice)) conn.commit() return collection() except Exception as e: logging.warning("Insert to Track API Error: {}".format(e))
def get_regularization_param(self) -> float: c = 1.0 if utils.is_float(self.regularization_edit.text()): c = float(self.regularization_edit.text()) return c
import json import pprint from csv_parser import parse_csv from utils import is_float if __name__ == "__main__": raw_data = parse_csv('data.csv') profit_dict_key = 'Profit (in millions)' # filter function is checking to see whether the profit represents a number valid_data = list( filter(lambda x: is_float(x[profit_dict_key]) == True, raw_data)) # top 20 top_sorted = sorted(valid_data, key=lambda x: float(x[profit_dict_key]), reverse=True)[:20] print(len(raw_data)) print(len(valid_data)) pprint.pprint(top_sorted, indent=1) # outputs data2.json with valid data with open('data2.json', 'w') as out_file: json.dump(valid_data, out_file, indent=2)
def select(self, tablename, columnstring, whereclause, limit, order_by, imputations_dict=None): """ Our own homebrewed select query. First, reads codes from T and converts them to values. Then, filters the values based on the where clause. Then, fills in all imputed values, if applicable. Then, orders by the given order_by functions. Then, computes the queried values requested by the column string. One refactoring option: you could try generating a list of all functions that will be needed, either for selecting or for ordering. Then compute those and add them to the data tuples. Then just do the order by as if you're doing it exclusively on columns. The only downside is that now if there isn't an order by, but there is a limit, then we computed a large number of extra functions. """ probability_query = False ## probability_query is True if at least one of the queries is for probability. data_query = False ## data_query is True if at least one of the queries is for raw data. similarity_query = False typicality_query = False mutual_information_query = False M_c, M_r, T = self.persistence_layer.get_metadata_and_table(tablename) ## Create conds: the list of conditions in the whereclause. ## List of (c_idx, op, val) tuples. conds = list() if len(whereclause) > 0: conditions = whereclause.split(',') ## Order matters: need <= and >= before < and > and =. operator_list = ['<=', '>=', '=', '>', '<'] operator_map = { '<=': operator.le, '<': operator.lt, '=': operator.eq, '>': operator.gt, '>=': operator.ge } for condition in conditions: for operator_str in operator_list: if operator_str in condition: op_str = operator_str op = operator_map[op_str] break vals = condition.split(op_str) column = vals[0].strip() ## Determine what type the value is raw_val = vals[1].strip() if utils.is_int(raw_val): val = int(raw_val) elif utils.is_float(raw_val): val = float(raw_val) else: ## val could have matching single or double quotes, which we can safely eliminate ## with the following safe (string literal only) implementation of eval val = ast.literal_eval(raw_val).lower() c_idx = M_c['name_to_idx'][column] conds.append((c_idx, op, val)) ## Iterate through the columnstring portion of the input, and generate the query list. ## queries is a list of (query_type, query) tuples, where query_type is: row_id, column, probability, similarity. ## For row_id: query is ignored (so it is None). ## For column: query is a c_idx. ## For probability: query is a (c_idx, value) tuple. ## For similarity: query is a (target_row_id, target_column) tuple. ## ## TODO: Special case for SELECT *: should this be refactored to support selecting * as well as other functions? if '*' in columnstring: query_colnames = [] queries = [] data_query = True for idx in range(len(M_c['name_to_idx'].keys())): queries.append(('column', idx)) query_colnames.append(M_c['idx_to_name'][str(idx)]) else: query_colnames = [ colname.strip() for colname in utils.column_string_splitter(columnstring) ] queries = [] for idx, colname in enumerate(query_colnames): ## Check if probability query prob_match = re.search( r""" probability\s* \(\s* (?P<column>[^\s]+)\s*=\s*(?P<value>[^\s]+) \s*\) """, colname, re.VERBOSE | re.IGNORECASE) if prob_match: column = prob_match.group('column') c_idx = M_c['name_to_idx'][column] value = prob_match.group('value') if utils.is_int(value): value = int(value) elif utils.is_float(value): value = float(value) ## TODO: need to escape strings here with ast.eval... call? queries.append(('probability', (c_idx, value))) probability_query = True continue ## Check if similarity query similarity_match = re.search( r""" similarity\s+to\s+ (?P<rowid>[^\s]+) (\s+with\s+respect\s+to\s+(?P<column>[^\s]+))? """, colname, re.VERBOSE | re.IGNORECASE) ## Try 2nd type of similarity syntax. Add "contextual similarity" for when cols are present? if not similarity_match: similarity_match = re.search( r""" similarity_to\s*\(\s* (?P<rowid>[^,]+) (\s*,\s*(?P<column>[^\s]+)\s*)? \s*\) """, colname, re.VERBOSE | re.IGNORECASE) if similarity_match: rowid = similarity_match.group('rowid').strip() if utils.is_int(rowid): target_row_id = int(rowid) else: ## Instead of specifying an integer for rowid, you can specify a where clause. where_vals = rowid.split('=') where_colname = where_vals[0] where_val = where_vals[1] if type(where_val) == str or type( where_val) == unicode: where_val = ast.literal_eval(where_val) ## Look up the row_id where this column has this value! c_idx = M_c['name_to_idx'][where_colname.lower()] for row_id, T_row in enumerate(T): row_values = utils.convert_row(T_row, M_c) if row_values[c_idx] == where_val: target_row_id = row_id break if similarity_match.group('column'): target_column = similarity_match.group( 'column').strip() else: target_column = None queries.append( ('similarity', (target_row_id, target_column))) similarity_query = True continue ## Check if row structural anomalousness/typicality query row_typicality_match = re.search( r""" row_typicality """, colname, re.VERBOSE | re.IGNORECASE) if row_typicality_match: queries.append(('row_typicality', None)) typicality_query = True continue ## Check if col structural typicality/typicality query col_typicality_match = re.search( r""" col_typicality\s*\(\s* (?P<column>[^\s]+) \s*\) """, colname, re.VERBOSE | re.IGNORECASE) if col_typicality_match: colname = col_typicality_match.group('column').strip() queries.append( ('col_typicality', M_c['name_to_idx'][colname])) typicality_query = True continue ## Check if predictive probability query ## TODO: demo (last priority) ## Check if mutual information query - AGGREGATE mutual_information_match = re.search( r""" mutual_information\s*\(\s* (?P<col1>[^\s]+) \s*,\s* (?P<col2>[^\s]+) \s*\) """, colname, re.VERBOSE | re.IGNORECASE) if mutual_information_match: col1 = mutual_information_match.group('col1') col2 = mutual_information_match.group('col2') queries.append( ('mutual_information', (M_c['name_to_idx'][col1], M_c['name_to_idx'][col2]))) mutual_information_query = True continue ## If none of above query types matched, then this is a normal column query. queries.append(('column', M_c['name_to_idx'][colname])) data_query = True ## Always return row_id as the first column. query_colnames = ['row_id'] + query_colnames queries = [('row_id', None)] + queries ## Helper function that applies WHERE conditions to row, returning True if row satisfies where clause. def is_row_valid(idx, row): for (c_idx, op, val) in conds: if type(row[c_idx]) == str or type(row[c_idx]) == unicode: return op(row[c_idx].lower(), val) else: return op(row[c_idx], val) return True ## If probability query: get latent states, and simple predictive probability givens (Y). ## TODO: Pretty sure this is the wrong way to get Y. if probability_query or similarity_query or order_by or typicality_query or mutual_information_query: X_L_list, X_D_list, M_c = self.persistence_layer.get_latent_states( tablename) Y = None #if probability_query: #if whereclause=="" or '=' not in whereclause: #Y = None ''' else: varlist = [[c.strip() for c in b.split('=')] for b in whereclause.split('AND')] Y = [(numrows+1, name_to_idx[colname], colval) for colname, colval in varlist] # map values to codes Y = [(r, c, du.convert_value_to_code(M_c, c, colval)) for r,c,colval in Y] ''' ## If there are only aggregate values, then only return one row. ## TODO: is this actually right? Or is probability also a function of row? If so: get rid of this. aggregates_only = reduce(lambda v,q: (q[0] == 'probability' or \ q[0] == 'col_typicality' or \ q[0] == 'mutual_information') and v, queries[1:], True) if aggregates_only: limit = 1 ## Iterate through all rows of T, convert codes to values, filter by all predicates in where clause, ## and fill in imputed values. filtered_values = list() for row_id, T_row in enumerate(T): row_values = utils.convert_row( T_row, M_c) ## Convert row from codes to values if is_row_valid(row_id, row_values): ## Where clause filtering. if imputations_dict and len(imputations_dict[row_id]) > 0: ## Fill in any imputed values. for col_idx, value in imputations_dict[row_id].items(): row_values = list(row_values) row_values[col_idx] = '*' + str(value) row_values = tuple(row_values) filtered_values.append((row_id, row_values)) ## Apply order by, if applicable. if order_by: ## Step 1: get appropriate functions. Examples are 'column' and 'similarity'. function_list = list() for orderable in order_by: function_name, args_dict = orderable args_dict['M_c'] = M_c args_dict['X_L_list'] = X_L_list args_dict['X_D_list'] = X_D_list args_dict['T'] = T ## TODO: use something more understandable and less brittle than getattr here. method = getattr(self, '_get_%s_function' % function_name) argnames = inspect.getargspec(method)[0] args = [ args_dict[argname] for argname in argnames if argname in args_dict ] function = method(*args) if args_dict['desc']: function = lambda row_id, data_values: -1 * function( row_id, data_values) function_list.append(function) ## Step 2: call order by. filtered_values = self._order_by(filtered_values, function_list) ## Now: generate result set by getting the desired elements of each row, iterating through queries. data = [] row_count = 0 for row_id, row_values in filtered_values: ret_row = [] for (query_type, query) in queries: if query_type == 'row_id': ret_row.append(row_id) elif query_type == 'column': col_idx = query val = row_values[col_idx] ret_row.append(val) elif query_type == 'probability': c_idx, value = query if M_c['column_metadata'][c_idx]['code_to_value']: val = float(M_c['column_metadata'][c_idx] ['code_to_value'][str(value)]) else: val = value Q = [(len(X_D_list[0][0]) + 1, c_idx, val) ] ## row is set to 1 + max row, instead of this row. prob = math.exp( self.backend.simple_predictive_probability_multistate( M_c, X_L_list, X_D_list, Y, Q)) ret_row.append(prob) elif query_type == 'similarity': target_row_id, target_column = query sim = self.backend.similarity(M_c, X_L_list, X_D_list, row_id, target_row_id, target_column) ret_row.append(sim) elif query_type == 'row_typicality': anom = self.backend.row_structural_typicality( X_L_list, X_D_list, row_id) ret_row.append(anom) elif query_type == 'col_typicality': c_idx = query anom = self.backend.column_structural_typicality( X_L_list, c_idx) ret_row.append(anom) elif query_type == 'predictive_probability': c_idx = query ## WARNING: this backend call doesn't work for multinomial ## TODO: need to test Q = [(row_id, c_idx, du.convert_value_to_code(M_c, c_idx, T[row_id][c_idx]))] Y = [] prob = math.exp( self.backend.simple_predictive_probability_multistate( M_c, X_L_list, X_D_list, Y, Q)) ret_row.append(prob) elif query_type == 'mutual_information': c_idx1, c_idx2 = query mutual_info, linfoot = self.backend.mutual_information( M_c, X_L_list, X_D_list, [(c_idx1, c_idx2)]) mutual_info = numpy.mean(mutual_info) ret_row.append(mutual_info) data.append(tuple(ret_row)) row_count += 1 if row_count >= limit: break ## Prepare for return ret = dict(message='', data=data, columns=query_colnames) return ret
def executeVM(quadruples, global_variables_dict, function_dict, constant_dict, curr_func_temp_vars): instructionPointer = 0 quadruplesLen = len(quadruples) execution_memory = Memory(global_variables_dict, constant_dict, curr_func_temp_vars) prints = [] # Start executing quadruples while instructionPointer < quadruplesLen: curr_quad = quadruples[instructionPointer] # print instructionPointer, curr_quad curr_operation = curr_quad[0] curr_left_op = curr_quad[1] curr_right_op = curr_quad[2] curr_result = curr_quad[3] # GOTO OPERATION if curr_operation == 'GOTO': instructionPointer = curr_result - 1 # GOTOF OPERATION elif curr_operation == 'GOTOF': [value, _, _, _] = execution_memory.get_address_context(curr_right_op) if value == 'falso': instructionPointer = curr_result - 1 # SUM OPERATION elif curr_operation == '+': [value_left, value_right, result_context, result_calc_index ] = binaryRegularOperation(execution_memory, curr_left_op, curr_right_op, curr_result, curr_operation) execution_memory.set_value_from_context_address( result_context, result_calc_index, value_left + value_right) # SUBTRACT OPERATION elif curr_operation == '-': [value_left, value_right, result_context, result_calc_index ] = binaryRegularOperation(execution_memory, curr_left_op, curr_right_op, curr_result, curr_operation) execution_memory.set_value_from_context_address( result_context, result_calc_index, value_left - value_right) # MULTIPLICATION OPERATION elif curr_operation == '*': [value_left, value_right, result_context, result_calc_index ] = binaryRegularOperation(execution_memory, curr_left_op, curr_right_op, curr_result, curr_operation) execution_memory.set_value_from_context_address( result_context, result_calc_index, value_left * value_right) # DIVISION OPERATION elif curr_operation == '/': [value_left, value_right, result_context, result_calc_index ] = binaryRegularOperation(execution_memory, curr_left_op, curr_right_op, curr_result, curr_operation) execution_memory.set_value_from_context_address( result_context, result_calc_index, value_left / value_right) # LESS THAN OPERATION elif curr_operation == '<': [value_left, value_right, result_context, result_calc_index ] = binaryRegularOperation(execution_memory, curr_left_op, curr_right_op, curr_result, curr_operation) execution_memory.set_value_from_context_address( result_context, result_calc_index, cast_bool(value_left < value_right)) # LESS OR EQUAL THAN OPERATION elif curr_operation == '<=': [value_left, value_right, result_context, result_calc_index ] = binaryRegularOperation(execution_memory, curr_left_op, curr_right_op, curr_result, curr_operation) execution_memory.set_value_from_context_address( result_context, result_calc_index, cast_bool(value_left <= value_right)) # GREATER THAN OPERATION elif curr_operation == '>': [value_left, value_right, result_context, result_calc_index ] = binaryRegularOperation(execution_memory, curr_left_op, curr_right_op, curr_result, curr_operation) execution_memory.set_value_from_context_address( result_context, result_calc_index, cast_bool(value_left > value_right)) # GREATER OR EQUAL THAN OPERATION elif curr_operation == '>=': [value_left, value_right, result_context, result_calc_index ] = binaryRegularOperation(execution_memory, curr_left_op, curr_right_op, curr_result, curr_operation) execution_memory.set_value_from_context_address( result_context, result_calc_index, cast_bool(value_left >= value_right)) # EQUAL OPERATION elif curr_operation == '==': [value_left, value_right, result_context, result_calc_index ] = binaryRegularOperation(execution_memory, curr_left_op, curr_right_op, curr_result, curr_operation) execution_memory.set_value_from_context_address( result_context, result_calc_index, cast_bool(value_left == value_right)) # NOT EQUAL OPERATION elif curr_operation == '!=': [value_left, value_right, result_context, result_calc_index ] = binaryRegularOperation(execution_memory, curr_left_op, curr_right_op, curr_result, curr_operation) execution_memory.set_value_from_context_address( result_context, result_calc_index, cast_bool(value_left != value_right)) # AND OPERATION elif curr_operation == 'y': [value_left, value_right, result_context, result_calc_index ] = binaryRegularOperation(execution_memory, curr_left_op, curr_right_op, curr_result, curr_operation) execution_memory.set_value_from_context_address( result_context, result_calc_index, cast_bool( cast_bool_inverse(value_left) and cast_bool_inverse(value_right))) # OR OPERATION elif curr_operation == 'o': [value_left, value_right, result_context, result_calc_index ] = binaryRegularOperation(execution_memory, curr_left_op, curr_right_op, curr_result, curr_operation) execution_memory.set_value_from_context_address( result_context, result_calc_index, cast_bool( cast_bool_inverse(value_left) or cast_bool_inverse(value_right))) # ASSIGN OPERATION elif curr_operation == '=': [value, result_type, result_context, result_calc_index ] = unaryRegularOperation(execution_memory, curr_right_op, curr_left_op, curr_operation) if result_type == 'num': value = int(value) elif result_type == 'dec': value = float(value) execution_memory.set_value_from_context_address( result_context, result_calc_index, value) # NEGATIVE UNARY OPERATION elif curr_operation == '-u': [value, _, result_context, result_calc_index ] = unaryRegularOperation(execution_memory, curr_right_op, curr_result, curr_operation) [value, _, _, _] = execution_memory.get_address_context(curr_right_op) [_, _, result_context, result_calc_index ] = execution_memory.get_address_context(curr_result) execution_memory.set_value_from_context_address( result_context, result_calc_index, value * -1) # POSITIVE UNARY OPERATION elif curr_operation == '+u': [value, _, result_context, result_calc_index ] = unaryRegularOperation(execution_memory, curr_right_op, curr_result, curr_operation) if value < 0: value *= -1 execution_memory.set_value_from_context_address( result_context, result_calc_index, value) # RANDOM OPERATION elif curr_operation == 'aleatorio': [value_left, value_right, result_context, result_calc_index ] = binaryRegularOperation(execution_memory, curr_left_op, curr_right_op, curr_result, curr_operation) if (value_left > value_right): [value_right, value_left] = [value_left, value_right] execution_memory.set_value_from_context_address( result_context, result_calc_index, randint(value_left, value_right)) # PRINT COMMAND elif curr_operation == 'imprimir': params = [] for param in curr_right_op: # Print arrays if isinstance(param, list): base_address = param[0] arr_len = param[1] array_values = [] i = 0 while i < arr_len: [ value, _, _, _ ] = execution_memory.get_address_context(base_address + i) if value == None: array_values.append("Nulo") else: array_values.append(value) i += 1 params.append(array_values) else: [value, _] = operationlessAction(execution_memory, param, curr_operation) params.append(value) print "".join(map(str, params)) prints.append("".join(map(str, params))) # READ COMMAND elif curr_operation == 'leer': curr_address = curr_right_op[0] curr_name = curr_right_op[1] [_, result_type, result_context, result_calc_index ] = execution_memory.get_address_context(curr_address) user_input = raw_input( "Inserta valor para variable {}: \n".format(curr_name)) if result_type == 'num': user_input = is_float(user_input) if not user_input: raise TiposErroneos('leer') user_input = int(user_input) elif result_type == 'dec': user_input = is_float(user_input) if not user_input: raise TiposErroneos('leer') elif result_type == 'tex': user_input = str(data).rstrip() elif result_type == 'bin': user_input = is_boolean(user_input) if not user_input: raise TiposErroneos('leer') execution_memory.set_value_from_context_address( result_context, result_calc_index, user_input) # ARRAY PUSH OPERATION elif curr_operation == 'agregar': arr_base_address = curr_left_op[0] arr_len = curr_left_op[1] value_address = curr_right_op[0] index_address = curr_right_op[1] index_value = verifyArrayBounds(execution_memory, index_address, arr_len) # Get value and context to save [value, _, _, _] = execution_memory.get_address_context(value_address) [_, _, result_context, result_calc_index ] = execution_memory.get_address_context(arr_base_address + index_value) execution_memory.set_value_from_context_address( result_context, result_calc_index, value) # ARRAY ACCESS OPERATION elif curr_operation == 'accesar': arr_base_address = curr_left_op[0] arr_len = curr_left_op[1] [_, _, result_context, result_calc_index ] = execution_memory.get_address_context(curr_result) index_value = verifyArrayBounds(execution_memory, curr_right_op, arr_len) [value, _, _, _] = execution_memory.get_address_context(arr_base_address + index_value) execution_memory.set_value_from_context_address( result_context, result_calc_index, value) # ARRAY POP OPERATION elif curr_operation == 'sacar': arr_base_address = curr_left_op[0] arr_len = curr_left_op[1] # Get context for temporary var that will hold poped value [_, _, result_context, result_calc_index ] = execution_memory.get_address_context(curr_result) index_value = verifyArrayBounds(execution_memory, curr_right_op, arr_len) # Get current value of array position, and store it's context [value, _, to_delete_context, to_delete_calc_index ] = execution_memory.get_address_context(arr_base_address + index_value) # Clear position in array execution_memory.set_value_from_context_address( to_delete_context, to_delete_calc_index, None) # Set previous value in temporary variable execution_memory.set_value_from_context_address( result_context, result_calc_index, value) # ERA OPERATION elif curr_operation == 'ERA': curr_func = function_dict[curr_left_op] execution_memory.initFunction(curr_func['local_count'], curr_func['temp_count']) # PARAM OPERATION elif curr_operation == 'PARAM': # Verify if param is an array, to copy all values into memory if isinstance(curr_left_op, list): for i in range(0, curr_left_op[1]): [value, _, _, _ ] = execution_memory.get_address_context(curr_left_op[0] + i) [ _, _, value_context, value_calc_index ] = execution_memory.get_address_context(curr_right_op + i) execution_memory.set_value_from_context_address( "temp_{}".format(value_context), value_calc_index, value) # Get current value of parameter and copy it to it's assigned address by function else: [value, _, _, _] = execution_memory.get_address_context(curr_left_op) [_, _, value_context, value_calc_index ] = execution_memory.get_address_context(curr_right_op) execution_memory.set_value_from_context_address( "temp_{}".format(value_context), value_calc_index, value) # GOSUB OPERATION elif curr_operation == 'GOSUB': curr_func_name = curr_left_op curr_return_address = curr_right_op execution_memory.save_memory() execution_memory.push_return_address(curr_return_address) execution_memory.push_instruction_pointer(instructionPointer) instructionPointer = function_dict[curr_func_name]['start_p'] - 1 elif curr_operation == 'RETURN': execution_memory.set_base_return_address(curr_right_op) # ENDFUNC OPERATION elif curr_operation == 'ENDFUNC': target_return_address = execution_memory.pop_return_address() instructionPointer = execution_memory.pop_instruction_pointer() result = None if target_return_address != -1: is_array = isinstance(target_return_address, list) return_len = 1 if is_array: return_len = target_return_address[1] target_return_address = target_return_address[0] result = execution_memory.get_return_values( is_array, return_len) execution_memory.recovery_memory() if target_return_address != -1: [ _, _, result_context, result_calc_index ] = execution_memory.get_address_context(target_return_address) if isinstance(result, list): for i in range(0, len(result)): execution_memory.set_value_from_context_address( result_context, result_calc_index + i, result[i]) else: execution_memory.set_value_from_context_address( result_context, result_calc_index, result) # Advance instructor pointer instructionPointer += 1 return prints
def test_is_float__valid(self): """valid value.""" result = utils.is_float(10.10) self.assertEqual(result, 10.10) result = utils.is_float('10.10') self.assertEqual(result, 10.10)
def plot(filename, flipped=False, e_bounds=(-inf, inf), res_types="all", channels="", Nmax=None, dpi=dpi, suffix=""): """ Makes a whole bunch of plots. - one for each of the user-specified channels - one with all channels on the same plot filename: string, an eigenphase_shift / phase_shift file path flipped: boolean, has this file been put throught flipper.py? e_bounds: tuple, (left, right) bounds of the energy axis res_types: string or list. what types of resonances should be plotted? channels: big string, one channel per line, of channels to be plotted Nmax: float, for giving the plot a title dpi: resolution of the image """ if res_types == "all": res_types = ["strong", "possible", "none"] filename = utils.abs_path(filename) phase_word = "eigenphase" if "eigen" in filename else "phase" # ensure file is flipped if flipped: new_filename = filename else: new_filename = flipper.flip(filename) # if channels are provided, there will be at least one number in the string # if no channels are provided, get them all if not any([utils.is_float(char) for char in channels]): file_suffix = "auto" # get csv filename with resonance info res_output_file = get_resonance_info(filename, Nmax=Nmax, already_flipped=True) # take all channels, i.e. all text in the file with open(res_output_file, "r+") as channel_file: channels = channel_file.read() else: file_suffix = "custom" # make channel titles lines = channels.split("\n") input_titles = [] for line in lines: if line == "": continue Jx2, parity, Tx2, col_num, res_type = line.split(",") # only take the types of resonances we want to plot if res_type in res_types: title = "_".join([Jx2, parity, Tx2, "column", col_num]) input_titles.append(title) # all_channels: dict, key = title, value = list of phases for that channel # energies: a list of energy values, possibly longer than some channels all_channels, energies = flipper.separate_into_channels(new_filename) # if energy bounds are -inf, inf, let's set them to the min / max e values if e_bounds == (-inf, inf): l_bound, r_bound = min(energies), max(energies) else: l_bound, r_bound = e_bounds output_dir = utils.output_dir.format(Nmax) if not exists(output_dir): os.mkdir(output_dir) png_dir = join(output_dir, "PNGs_" + phase_word) csv_dir = join(output_dir, "CSVs_" + phase_word) grace_dir = join(output_dir, "grace_files_" + phase_word) for d in [png_dir, csv_dir, grace_dir]: if not exists(d): os.mkdir(d) print("Working on resonance plotting") main_xmgrace_string = "" # xmgrace string for the full file channel_string = "" # xmgrace string for each individual channel series_counter = 0 # xmgrace series titles csv_paths = [] # list of csv files of channels we plot # now look in each channel, plot the ones we care about to_plot = [] for title, phases in all_channels.items(): # see if the title matches one we were given. If so, plot nice_title = utils.make_nice_title(title) if nice_title in input_titles: print("adding", nice_title, "to plot\r", end="") # energies may be longer than phases, # so we truncate energy where needed len_diff = len(energies) - len(phases) if len_diff != 0: trunc_energies = energies[len_diff:] else: trunc_energies = energies # then only plot within the given bounds plot_energies, plot_phases = [], [] for e, p in zip(trunc_energies, phases): if l_bound <= e and e <= r_bound: plot_energies.append(e) plot_phases.append(p) # make a matplotlib channel plot channel_fig, channel_ax = plt.subplots() plot_title = utils.make_plot_title(nice_title) channel_ax.set_title(plot_title) channel_ax.set_ylabel("Phase (degrees)") # nothing interesting should happen outside this range, right? # I'd let matplotlib autogenerate the graph limits, # but then you get graphs with a range of -1 to 1, which have # an interesting shape but are not large enough to be useful channel_ax.set_ylim(-50, 200) channel_ax.set_xlim(l_bound, r_bound) channel_ax.set_xlabel("Energy (MeV)") channel_ax.plot(plot_energies, plot_phases) channel_path = join( png_dir, phase_word + "_" + nice_title + "_Nmax_" + str(Nmax) + ".png") channel_fig.savefig(channel_path, dpi=dpi) plt.close(channel_fig) to_plot.append((plot_energies, plot_phases, plot_title)) # make xmgrace file for channel c_title = utils.xmgrace_title(title, series_counter) + "\n" channel_string = c_title series_counter += 1 for e, p in zip(plot_energies, plot_phases): channel_string += str(e) + " " + str(p) + "\n" channel_string += "&\n" # append it to the full file string main_xmgrace_string += channel_string # and also save it as its own file with series number = 0 lines = channel_string.splitlines() lines[0] = utils.xmgrace_title(lines[0], 0) channel_string = "\n".join(lines) grace_name = join( grace_dir, phase_word + "_" + nice_title + "_Nmax_" + str(Nmax) + ".agr") with open(grace_name, "w+") as channel_file: channel_file.write(channel_string) # make csv file for channel too csv_path = join( csv_dir, phase_word + "_" + nice_title + "_Nmax_" + str(Nmax) + ".csv") with open(csv_path, "w+") as csv_file: for e, p in zip(plot_energies, plot_phases): csv_file.write(",".join([str(e), str(p)]) + "\n") csv_paths.append(csv_path) # make main matplotlib plot print("Making a big spaghetti plot...\r", end="") plt.cla() plt.clf() plt.title(phase_word.title() + " Shift vs. Energy for $N_{max}$ = " + str(Nmax)) plt.ylabel("Phase (degrees)") plt.ylim(-5, 150) plt.xlim(l_bound, r_bound) plt.xlabel("Energy (MeV)") for energy, phase, title in to_plot: plt.plot(energy, phase, label=title) main_mpl_path = join( png_dir, phase_word + "_Nmax_" + str(Nmax) + "_" + file_suffix + suffix + ".png") plt.legend(loc='lower right', shadow=False, fontsize='medium') plt.savefig(main_mpl_path, dpi=dpi) plt.savefig(main_mpl_path.replace(".png", ".svg")) plt.close() # make main xmgrace file main_grace_path = join( grace_dir, phase_word + "_plot_Nmax_" + str(Nmax) + "_" + file_suffix + suffix + ".agr") with open(main_grace_path, "w+") as grace_file: grace_file.write(main_xmgrace_string) print("Done plotting! Saved main plot(s) to:") print(main_mpl_path) print(main_mpl_path.replace(".png", ".svg")) print(main_grace_path) # return paths to csv files of channels we plotted return csv_paths
async def convert(self, ctx, value='', unit='', new_unit=''): ''' Converts given unit to new unit. Default value = 1 ''' addCommand() await ctx.channel.trigger_typing() if not value or not unit: raise commands.CommandError( message=f'Required argument(s) missing: `value/unit`.') elif not new_unit: new_unit = unit unit = value value = 1 unit = unit.lower().replace(' ', '') new_unit = new_unit.lower().replace(' ', '') if not is_float(value): raise commands.CommandError( message=f'Invalid argument: `{value}`.') elif is_int(value): value = int(value) else: value = float(value) if value > sys.maxsize: raise commands.CommandError( message=f'Invalid argument: `{value}`. This value is too high.' ) if not unit in units: if not any(unit in alias for alias in unit_aliases): raise commands.CommandError( message=f'Invalid argument: `{unit}`.') else: unit = get_alias(unit) conversion_units = units[unit] if new_unit in conversion_units: factor = conversion_units[new_unit] else: if not any(new_unit in alias for alias in unit_aliases): raise commands.CommandError( message=f'Invalid argument: `{new_unit}`.') else: new_unit = get_alias(new_unit) if new_unit in conversion_units: factor = conversion_units[new_unit] else: if not any(unit in alias for alias in unit_aliases): raise commands.CommandError( message= f'Incompatible units: `{unit}`, `{new_unit}`.') else: unit = get_alias(unit) conversion_units = units[unit] if new_unit in conversion_units: factor = conversion_units[new_unit] else: new_unit = get_alias(new_unit) if new_unit in conversion_units: factor = conversion_units[new_unit] else: raise commands.CommandError( message= f'Incompatible units: `{unit}`, `{new_unit}`.' ) if isinstance(factor, int) or isinstance(factor, float): new_value = value * factor else: new_value = eval(f'{value}{factor}') new_value = str(new_value).replace('e', ' • 10^').replace('+', '') await ctx.send(f'{value} {unit} = `{new_value} {new_unit}`')
async def scientific(self, ctx, *number): ''' Convert a number literal to scientific notation and vice versa. ''' addCommand() await ctx.channel.trigger_typing() if not number: raise commands.CommandError( message='No input. Please give a number literal as argument.') input = ' '.join(number) input = input.replace(' ', '').replace(',', '') input = input.replace('*10^', 'e') input = input.replace('x10^', 'e') input = input.replace('•10^', 'e') if 'e' in input: # convert from scientific notation to number num = input[:input.index('e')] exp = input[input.index('e') + 1:] if not is_float(num) or not is_float(exp): raise commands.CommandError( message= f'Invalid input: `{input}`. Please give a number literal as argument.' ) num, exp = float(num), float(exp) try: result = num * (10**exp) except Exception as e: raise commands.CommandError( message=f'Invalid input: `{input}`. Error: {e}') output = float_to_str(result) if output.endswith('.0'): output = output[:len(output) - 2] end = '' if output.find('.') != -1: end = output[output.find('.'):] output = output[:output.find('.')] index = len(output) - 2 while index >= 0: print( f'{index} "{output[index]}": {(len(output.replace(",", "")) - 1 - index) % 3 == 0}' ) if (len(output.replace(',', '')) - 1 - index) % 3 == 0: output = output[:index + 1] + ',' + output[index + 1:] index -= 1 output += end else: # convert from number literal to scientific notation if not is_float(input): raise commands.CommandError( message= f'Invalid input: `{input}`. Please give a number literal as argument.' ) num = float(input) exp = 0 while num < 1 or num >= 10: if num < 1: num *= 10 exp -= 1 else: num /= 10 exp += 1 num = str(num) if len(num) > len(input): num = num[:len(input)] output = f'{num} • 10^{exp}' if len(output) < 1998: await ctx.send(f'`{output}`') else: raise commands.CommandError( message=f'Error: output exceeds character limit.')
def extract_features_clicked(self): # Construct filter settings to loaded data. bandpass_min = -1 bandpass_max = -1 notch_filter = self.notch_filter_checkbox.isChecked() if utils.is_float(self.bandpass_min_edit.text()): bandpass_min = float(self.bandpass_min_edit.text()) if utils.is_float(self.bandpass_max_edit.text()): bandpass_max = float(self.bandpass_max_edit.text()) adaptive_settings = None if self.adaptive_filtering_checkbox.isChecked(): reference_electrode = int(self.adaptive_reference_electrode.text()) frequencies = [] widths = [] for freq_str in self.adaptive_frequencies_edit.text().split(","): frequencies.append(float(freq_str)) for width_str in self.adaptive_bandwidths_edit.text().split(","): widths.append(float(width_str)) adaptive_settings = utils.AdaptiveFilterSettings(reference_electrode, frequencies, widths) reference_electrode = 0 if self.re_reference_checkbox.isChecked() and utils.is_integer(self.reference_electrode_edit.text()): reference_electrode = int(self.reference_electrode_edit.text()) filter_settings = utils.FilterSettings(global_config.SAMPLING_RATE, bandpass_min, bandpass_max, notch_filter=notch_filter, adaptive_filter_settings=adaptive_settings, reference_electrode=reference_electrode) if self.root_directory_changed: self.loaded_eeg_data = utils.load_data(self.root_directories) eeg_data, classes, sampling_rate, self.trial_classes = \ utils.slice_and_filter_data(self.root_directories, filter_settings, self.loaded_eeg_data) labels = np.array(classes).reshape((-1, 1)) if len(eeg_data) != 0 and len(classes) != 0: print("Data loaded successfully") else: print("Could not load data") return # Construct feature descriptors. # Obtain the range of channels to be included electrode_list = [] for electrode_str in self.electrodes_edit.text().split(","): electrode_list.append(int(electrode_str)) fft_window_size = float(self.fft_window_combo.currentText()) feature_types = [] if self.band_amplitude_checkbox.isChecked(): band_amplitude_min_freq = -1 band_amplitude_max_freq = -1 if utils.is_float(self.band_amplitude_min_edit.text()): band_amplitude_min_freq = float(self.band_amplitude_min_edit.text()) if utils.is_float(self.band_amplitude_max_edit.text()): band_amplitude_max_freq = float(self.band_amplitude_max_edit.text()) if band_amplitude_min_freq != -1 and band_amplitude_max_freq != -1: feature_types.append( utils.AverageBandAmplitudeFeature( utils.FrequencyBand(band_amplitude_min_freq, band_amplitude_max_freq), fft_window_size)) if self.frequency_bands_checkbox.isChecked(): band_width = -1 peak_frequency = self.peak_frequency_checkbox.isChecked() if utils.is_float(self.band_width_edit.text()): band_width = float(self.band_width_edit.text()) center_frequencies_str_list = self.center_frequencies_edit.text().split(",") center_frequencies = [] for center_freq_str in center_frequencies_str_list: if utils.is_float(center_freq_str): center_frequencies.append(float(center_freq_str)) if len(center_frequencies) != 0 and band_width != -1: feature_types.append( utils.FrequencyBandsAmplitudeFeature(center_frequencies, band_width, fft_window_size, peak_frequency)) feature_extraction_info = utils.FeatureExtractionInfo(sampling_rate, electrode_list) self.filter_settings = filter_settings self.feature_extraction_info = feature_extraction_info self.feature_types = feature_types # Extract features extracted_data = utils.extract_features( eeg_data, feature_extraction_info, feature_types) feature_matrix = data.construct_feature_matrix(extracted_data) self.data_set = DataSet(feature_matrix, labels, add_x0=False, shuffle=False) print("Features extracted successfully...")
def organize_data(keys, table, wanted_keys): '''Organize all of the mushers and their stats for that log''' windex = 0 musher_list = [] musher = {} count_spaces = 0 for ele in table: # strip extra symbols, not annoyed at all if ' •' in ele: ele = ele.strip(' •') if ' *' in ele: ele = ele.strip(' *') # if its a date, continue to next if is_date(ele): continue # if its a float, continue to next if is_float(ele): continue # white space of empty values or end of row if ele == '': count_spaces += 1 continue # break if scratched data if ele == 'Scratched' or ele == 'Withdrawn': return [] if count_spaces >= 4: count_spaces = 0 windex = 0 musher = {} if '(r)' in ele: ele = ele.strip(' (r)') musher['rookie_status'] = True if ele.isdigit(): ele = int(ele) musher[wanted_keys[windex]] = ele if 'Dogs' in musher.keys(): if 'rookie_status' not in musher.keys(): musher['rookie_status'] = False musher_list.append(musher) musher = {} windex = 0 continue # if Pos isnt an int, its been written as a checkpoint if 'Pos' in musher: if not type(musher['Pos']) == int: continue if windex == (len(wanted_keys) - 1): windex = 0 else: windex += 1 return musher_list
def get_accuracy_threshold(self) -> float: accuracy_threshold = 1 if utils.is_float(self.accuracy_threshold_edit.text()): accuracy_threshold = float(self.accuracy_threshold_edit.text()) return accuracy_threshold
@author: Nuno ''' import sys import utils if __name__ == '__main__': len_vect = 0 max = None min = None soma = 0 while True: number = input("float number?") number = utils.is_float(number) if (number != None): if (number == 0.0): break len_vect += 1 if (max == None or max < number): max = number if (min == None or min > number): min = number soma += number if (len_vect == 0): print("No values in vector") sys.exit(0) print("Max: {} Min: {} Average: {}".format(max, min, soma / len_vect))
command = None while command != 'Q': neighbors = UNI.graph.neighbors(current_player.current_node) print("".join(utils.get_messages(current_player.current_node, current_player, neighbors, UNI))) clock = datetime.now().strftime('%H:%M:%S') command = input("Command [TL={}]:[{}] (?=Help) : ".format(clock, current_player.current_node) ) command = command.upper() if command not in ['Q', 'V', 'P', '?'] and (command.isnumeric() or utils.is_float(command)): try: target_node = int(command) except ValueError: target_node = float(command) if target_node in neighbors: current_player.current_node = target_node current_player.sectors_visited.update({current_player.current_node: 1}) else: print("That's an invalid jump selection...try again!") elif command == 'V': print("Jump history: {}".format(current_player.sectors_visited)) elif command == 'C': ship = UNI.ships[current_player.ship_current]
": User wishes to change the number of solutions to " + str(num_solns) + ".\n") answer_2 = "" while answer_2 not in ["Y", "N"]: answer_2 = utils.input_with_colours( "Architect: change size of gap pool from 0.1? [y/n] ") answer_2 = answer_2.strip().upper() if answer_2 == "Q": status_writer.write("Termination:" + str(datetime.datetime.now()) + ": " + utils.TERMINATION + "\n") status_writer.close() exit() if answer_2 == "Y": pool_gap = "" while (not utils.is_float(pool_gap)) or (float(pool_gap) > 1) or ( float(pool_gap) <= 0): pool_gap = utils.input_with_colours( "Enter a number between 0 and 1 for the gap pool: ") pool_gap = float(pool_gap) status_writer.write("Step 5: " + str(datetime.datetime.now()) + \ ": User wishes to change the gap pool to " + str(pool_gap) + ".\n") answer_3 = "" while answer_3 not in ["Y", "N"]: answer_3 = utils.input_with_colours( "Architect: change size integrality constraint from 10E-8? [y/n] " ) answer_3 = answer_3.strip().upper() if answer_3 == "Q": status_writer.write("Termination:" +