def slow_coord_to_index(target,lens): """ Slow but simple way of converting coords to indices Uses C-style indexing; this means the last coordinate changes most freqently. For an (P,Q,R) matrix: 0 0 0 -> 0 0 0 1 -> 1 0 0 2 -> 2 ... p q r -> r + R*q + (R*Q)*p """ assert is_vect(target) assert is_int(target) assert is_vect(lens) assert is_int(target) assert target.shape == lens.shape (D,) = lens.shape idx = 0 mult = 1 for d in xrange(D-1,-1,-1): idx += mult * target[d] mult *= lens[d] return idx
def parse_create_models(self, words, orig): n_chains = 10 if len(words) >= 1: if words[0] == 'create' and (utils.is_int(words[1]) or words[1] == 'model' or words[1] == 'models'): if len(words) >= 4 and words[1] == 'model' or words[1] == 'models': if words[2] == 'for': tablename = words[3] if len(words) >= 7: if words[4] == 'with' and utils.is_int(words[5]) and words[6] == 'explanations': n_chains = int(words[5]) result = 'create_models', dict(tablename=tablename, n_chains=n_chains) print 'Created %d models for btable %s' % (n_chains, tablename) return result else: print self.help_create_models() return False elif len(words) >= 3 and utils.is_int(words[1]): n_chains = int(words[1]) assert n_chains > 0 if words[2] == 'model' or words[2] == 'models': if len(words) >= 5 and words[3] == 'for': tablename = words[4] result = 'create_models', dict(tablename=tablename, n_chains=n_chains) print 'Created %d models for btable %s' % (n_chains, tablename) return result else: print self.help_create_models() return False else: print self.help_create_models() return False
def linear_diophantine_solver(a, b, c, lb, ub): """ ## untested solves a*x + b*y = c for (x,y) integer in the range min <= x, y <= max """ class NoSolution(Exception): pass assert is_int(a) assert is_int(b) assert is_int(c) if c % gcd(a, b) != 0: raise NoSolution() # find a single solution x = lb while True: if (a * x + c) % b == 0: # is an integer y = (a * x + c) / b break x += 1 u, v = a / gcd(a, b), b / gcd(a, b) k = 0 while (lb <= x + k * v <= ub) and (lb <= y - k * u <= ub): yield (x + k * v, y - k * u) k += 1
async def roll(self, ctx, sides=6, num=1): ''' Rolls a dice. ''' addCommand() if is_int(num): num = int(num) else: raise commands.CommandError(message=f'Invalid argument: `{num}`.') if num < 1 or num > 100: raise commands.CommandError(message=f'Invalid argument: `{num}`.') if is_int(sides): sides = int(sides) else: raise commands.CommandError( message=f'Invalid argument: `{sides}`.') if sides < 2 or sides > 2147483647: raise commands.CommandError( message=f'Invalid argument: `{sides}`.') results = [] for _ in range(0, num): results.append(random.randint(1, sides)) result = str(results).replace('[', '').replace(']', '') await ctx.send(f'{ctx.author.mention} You rolled {result}!')
def counter(_, __, cmd): max_count = 20 minus = F('минус') + ' ' plus = F('плюс') + ' ' data = cmd.lower().replace(minus, '-').replace(plus, '').split() if len(data) == 2 and data[0] == F('до') and utils.is_int( data[1]) and abs(int(data[1])) > 1: to_ = int(data[1]) from_ = 1 if to_ > 0 else -1 elif len(data) == 4 and utils.is_int(data[1]) and utils.is_int(data[3]) \ and data[0] == F('от') and data[2] == F('до') and abs(int(data[3]) - int(data[1])) > 0: to_, from_ = int(data[3]), int(data[1]) else: return Next if abs(to_ - from_) + 1 > 500: return Say( F('Это слишком много для меня - считать {} чисел.', abs(to_ - from_) + 1)) inc_ = 1 if from_ < to_ else -1 say = [str(x) for x in range(from_, to_ + inc_, inc_)] say = [ ', '.join(say[x:x + max_count]) for x in range(0, len(say), max_count) ] say.append(F('Я всё сосчитала')) return SayLow(phrases=say)
def even_slower_coord_to_index(target,lens): assert is_vect(target) assert is_int(target) assert is_vect(lens) assert is_int(target) assert target.shape == lens.shape N = np.prod(lens) C = np.reshape(np.arange(N),lens) # Should be row-major ordering idx = tuple(target.astype(np.integer)) return C[idx]
async def purge(self, ctx, num=0): ''' Deletes given amount of messages (Admin+). Arguments: integer. Constraints: You can delete up to 100 messages at a time. ''' addCommand() if not isinstance(num, int): if is_int(num): num = int(num) else: raise commands.CommandError( message=f'Invalid argument: `{num}`.') if not num or num < 1 or num > 100: raise commands.CommandError(message=f'Invalid argument: `{num}`.') try: try: await ctx.message.delete() except: pass await ctx.channel.purge(limit=num) msg = await ctx.send(f'{num} messages deleted!') await asyncio.sleep(3) await msg.delete() except discord.Forbidden: raise commands.CommandError( message=f'Missing permissions: `delete_message`.')
def get_name(self): if not is_int(self.idCompany): raise TypeError company_name = query_company_name(self.idCompany) if len(company_name) == 0: raise Exception("This company id doesn't exist") return company_name[0]["name"]
def is_valid(mystring, mylist): if not utils.is_int(mystring): return False myint = int(mystring) for i in range(len(mylist)): if myint - 1 == i: return True return False
def evaluate_taglist(self, tag_list): tags_evaluated = [] for t in tag_list: if '.' in t: real_tag = '' for i in range(0, len(t) - 1): s = t[i] if s == '.': prefix_tag = t[:i] real_tag = t[i + 1:] # print("debug: real_tag: " + real_tag) # print("debug: prefix_tag: " + prefix_tag) break if is_int(prefix_tag): if not t in self.static_tags: if real_tag in self.tags: self.static_tags[t] = self.evaluate( get_random(self.tags[real_tag])) if t in self.static_tags: tags_evaluated.append(self.static_tags[t]) elif prefix_tag in self.text_functions: real_tag = self.evaluate("#" + real_tag + "#") tags_evaluated.append( self.text_functions[prefix_tag](real_tag)) elif t in self.tags: # print(t) tagged_text = get_random(self.tags[t]) tags_evaluated.append(self.evaluate(tagged_text)) return tags_evaluated
async def removenotification(self, ctx, id): ''' Removes a custom notification by ID. (Admin+) To get the ID of the notification that you want to remove, use the command "notifications". ''' addCommand() if not id: raise commands.CommandError( message=f'Required argument missing: `id`.') if not is_int(id): raise commands.CommandError( message=f'Invalid argument: `{id}`. Must be an integer.') else: id = int(id) notification = await Notification.query.where( Notification.guild_id == ctx.guild.id ).where(Notification.notification_id == id).gino.first() if not notification: raise commands.CommandError( message=f'Could not find custom notification: `{id}`.') await notification.delete() notifications = await Notification.query.where( Notification.guild_id == ctx.guild.id ).order_by(Notification.notification_id.asc()).gino.all() if notifications: for i, notification in enumerate(notifications): await notification.update(notification_id=i).apply() await ctx.send(f'Removed custom notification: `{id}`')
def _time(context, ticket, val): "Display grant time for ticket" if not utils.is_int(val): context.fatal_error("unexpected value for ticket %s: %s" % (ticket, val)) if val == "-1": context.fatal_error("%s: no such ticket" % ticket) print "ticket %s last time granted on %s" % (ticket, time.ctime(int(val)))
def __init__(self, factor=2): super(SqueezeTransform, self).__init__() if not utils.is_int(factor) or factor <= 1: raise ValueError("Factor must be an integer > 1.") self.factor = factor
def valid(self, data_row): for ind in self.required_column_indices: if data_row[ind] == "": return False if not is_date(data_row[4]) or not is_int(data_row[6]): return False return True
def data_user(request): if request.POST and util.is_int(request.POST['id']) and int(request.POST['id']) > 0: citizen = Citizen.objects.filter(citizen_id_er=request.POST['id']) objects = [WeaponCompany.objects,FoodCompany.objects, RawWeaponCompany.objects,RawFoodCompany.objects] data_companies = { 'RawFood':{}, 'RawWeapon':{}, 'Weapon':{}, 'Food':{} } for obj in objects: companies = obj.filter(owner_citizen__exact = citizen) if companies.count() >0: for company in companies: key = company.get_type_company_display() if not company.quantity in data_companies[key]: data_companies[key][company.quality] = 0 data_companies[key][company.quality] = company.quantity TG = TraningGround.objects.get(owner_citizen__exact = citizen) TG_data = {} if(TG != None): TG_data = {'TG1':TG.weights_room, 'TG2': TG.climbing_center, 'TG3': TG.shooting_range, 'TG4': TG.special_forces} return render_to_response('data_MU/templates/data_user_company.html', {'companies':data_companies, 'TG':TG_data} ,context_instance = RequestContext(request)) return render_to_response('data_MU/templates/blank.html' ,context_instance = RequestContext(request))
def read_data_first(self): # self.inventory.read_data() user_data = utils.get_user_data() path = os.path.join("data", user_data["character_name"], constants.PLAYER_DATA_FILE) # print("path: {}".format(path)) mylist = utils.read_data_file(path, num_of_fields=11) mydict = mylist[0] # print("mydict: {}".format(mydict)) # ---- self.x = mydict["x"] self.y = mydict["y"] self.name = mydict["name"] self.kind = mydict["kind"] if utils.is_int(mydict["direction"]) == True: self.direction = int(mydict["direction"]) else: self.direction = utils.convert_direction_to_integer(mydict["direction"]) self.max_hit_points = mydict["max_hit_points"] self.hit_points = mydict["hit_points"] self.chance_to_hit = mydict["chance_to_hit"] self.experience = mydict["experience"] self.profession = mydict["profession"] self.gold = mydict["gold"] # ---- self.load_images()
def read_data_restart(self, x=-1, y=-1): # print("mydict: {}".format(mydict)) # self.inventory = Inventory("player") # self.inventory.read_data() user_data = utils.get_user_data() path = os.path.join("data", user_data["character_name"], constants.PLAYER_DATA_FILE) mylist = utils.read_data_file(path, num_of_fields=11) mydict = mylist[0] # ---- if x == -1 and y == -1: self.x = mydict["x"] self.y = mydict["y"] else: if x == -1 or y == -1: raise ValueError("Error!") self.x = x self.y = y # ---- self.name = mydict["name"] self.kind = mydict["kind"] if utils.is_int(mydict["direction"]) == True: self.direction = -90 else: self.direction = utils.convert_direction_to_integer(mydict["direction"]) self.max_hit_points = mydict["max_hit_points"] self.hit_points = mydict["hit_points"] self.chance_to_hit = mydict["chance_to_hit"] self.experience = mydict["experience"] self.profession = mydict["profession"] self.gold = mydict["gold"] # ---- self.load_images() # ---- self.direction = "DOWN"
def speak_some_text(self): def is_valid(mystring, mylist): if not utils.is_int(mystring): return False myint = int(mystring) for i in range(len(mylist)): if myint - 1 == i: return True return False # ----------------------------------------- first_choice = "enter the text you would like me to speak" second_choice = "Enter the filename you would like me to read" choices = [first_choice, second_choice, "quit"] text = "" default_text = """Though I speak with the tongues of men and of angels, and have not charity, I am become as sounding brass, or a tinkling cymbal.""" # ----------------------------------------- for count, elem in enumerate(choices): print("{}) {}".format(count + 1, elem)) user_input = input("> ").lower().strip() while not is_valid(user_input, choices): user_input = input("> ").lower().strip() if user_input == "quit": return False user_input = int(user_input) user_choice = choices[user_input - 1].lower().strip() # ----------------------------------------- if user_choice == first_choice: print("Enter the text you would like me to speak:") user_input = input("> ").lower().strip() while len(user_input) == 0: user_input = input("> ").lower().strip() if user_input == "quit": sys.exit() self.engine.say(user_input) self.engine.runAndWait() print("Finished speaking.") elif user_choice == second_choice: print("Entering the filename you would like me to read :..") filepath = os.path.join("data", "text_files") files = os.listdir(filepath) for count, file in enumerate(files): print("{}) {}".format(count + 1, file)) user_input = input("> ").lower().strip() while not utils.is_int(user_input): user_input = input("> ").lower().strip() if user_input == "quit": sys.exit() filename = files[int(user_input) - 1] print("Reading file: ", filename) filepath = os.path.join("data", "text_files", filename) mytext = "" with open(filepath, "r") as f: mytext = f.read() # ----------------------------------------- self.engine.say(mytext) self.engine.runAndWait() print("Finished speaking.") elif user_choice == "quit": pass else: raise ValueError("I don't recognize that: {}".format(user_choice))
def load_config(self, path): self.path = path; with open(self.path) as configFile: self.config = (line.rstrip('\n') for line in open(self.path)) #finds the names and values from config file #adds them in ConfigVariable class and stores in configVariables array for line in self.config: count = 0; for character in line: if character == ':': variableHeld = line[count+1:len(line)]; if is_int(variableHeld): variableHeld = int(variableHeld); elif is_number(variableHeld): variableHeld = float(variableHeld); elif is_bool(variableHeld): variableHeld = make_bool(variableHeld); #dont create an object if it has no value if variableHeld != '': newConfigVar = ConfigVariable(line[0:count], variableHeld); self.configVariables.append(newConfigVar); count += 1;
def indices_to_coords(self,indices): # Converts indices to coordinates assert is_vect(indices) assert is_int(indices) (N,) = indices.shape D = len(self.coef) # Does the hard work raw_coords = np.empty((N,D)) res = indices for d in xrange(D): (coord,res) = divmod(res,self.coef[d]) raw_coords[:,d] = coord # OOB indices mapped to NAN oob_mask = self.are_indices_oob(indices) raw_coords[oob_mask,:] = np.nan oob_indices = self.indices_to_oob_indices(indices,oob_mask) oob = OutOfBounds() oob.build_from_oob_indices(oob_indices,D) coords = Coordinates(raw_coords,oob) assert coords.check() return coords
def config_edit(): # Shows contents of config.cfg to the user and gives them the option to edit any of the configurations cfg_dict = ConfigHandler.config_layout cfg_layout = [] for key in cfg_dict.keys(): cfg_layout.append(key) while True: print( "These is your current configuration, type the number of what you want to modify:" ) print("0: Exit config") ConfigHandler.list_config() line_selection = input("> ") if not is_int(line_selection ) or not 0 <= int(line_selection) <= len(cfg_layout): print("Invalid choice") continue else: ls = int(line_selection) if ls == 0: break else: ls -= 1 ConfigHandler.cfg_dict[ cfg_layout[ls]] = ConfigHandler.ask_config_line( cfg_layout[ls]) ConfigHandler.save_config() continue if read_config("save password") == "False": write_config("s_password", "")
def show(message, *args): if args[0] == "all": return join_columns(Transaction.select()) usernames = [ _ for _ in args if _.startswith('@')] numbers = [_ for _ in args if is_int(_)] if not numbers and not usernames: return "See /usage for the help" query = Chat.id == message.chat.id if usernames: for username in usernames: username = username.upper() try: user = User.get(User.username == username) except: return "No such user %s" % username query &= (Transaction.creditor == user) | (Transaction.debtor == user) if numbers: N = int(numbers[0]) else: N = 10 return join_columns(Transaction.select().join(Chat).where(query).limit(N))
def simular_create_tfg(self, titulo, **kwargs): try: # comprobando titulo vacio o Tfg con el mismo titulo if not titulo: raise NameError("Titulo necesario") else: res = Tfg.objects.filter(titulo=titulo) if res.count() != 0: raise NameError("El TFG ya existe") # comprobando tipo no vacio if not kwargs.get('tipo'): raise NameError("Tipo de TFG necesario") # comprobando numero de alumnos if kwargs.get('n_alumnos') is None or not utils.is_int(kwargs.get('n_alumnos')) or int(kwargs.get('n_alumnos')) <= 0 \ or int(kwargs.get('n_alumnos')) > 3: raise NameError("Numero de alumnos incorrecto") # comprobando descripcion if not kwargs.get('descripcion'): raise NameError("Descripcion necesaria") # comprobando tutor if kwargs.get('tutor') is None: raise NameError("Tutor necesario") else: try: tutor = Profesor.objects.get(email=kwargs.get('tutor')) except Profesor.DoesNotExist: return dict(status=False, message='El tutor no existe') if not tutor.groups.filter(name='Profesores').exists(): raise NameError("Tutor ha de ser un profesor") # comprobando cotutor cotutor = None if not kwargs.get('cotutor') is None: try: cotutor = Profesor.objects.get(email=kwargs.get('cotutor')) except Profesor.DoesNotExist: return dict(status=False, message='El cotutor no existe') if not cotutor.groups.filter(name='Profesores').exists(): raise NameError("Cotutor ha de ser un profesor") # comprobando titulacion if kwargs.get('titulacion') is None: raise NameError("Titulacion necesaria") else: try: titulacion = Titulacion.objects.get(codigo=kwargs.get('titulacion')) except Titulacion.DoesNotExist: return dict(status=False, message='la titulacion no existe') self.model(tipo=kwargs.get('tipo'), titulo=titulo, n_alumnos=kwargs.get('n_alumnos'), descripcion=kwargs.get('descripcion'), conocimientos_previos=kwargs.get('conocimientos_previos'), hard_soft=kwargs.get('hard_soft'), tutor=tutor, cotutor=cotutor, titulacion=titulacion) return True except NameError as e: return e.message
def process_directory_input(self): def is_valid(mystring): if mystring == ".DS_Store": return False if mystring.find("_data_") > -1: return False if mystring.find("_scores_") > -1: return False return True # ----------------------------------------------------- if not utils.is_int(self.user_text): return False myint = int(self.user_text) if not myint in list(range(1, len(self.window_text_list) + 1)): return False # ----------------------------------------------------- dir = self.window_text_list[myint - 1] myint = dir.find(" ") dir = dir[myint:].strip() dir = dir.replace(" ", "_") self.directory = dir print(dir) filepath = os.path.join("data", "quizes", dir) filepaths = os.listdir(filepath) self.window_text_list = [i for i in filepaths if is_valid(i)] self.window_text_list = [ "{}. {}".format(count + 1, i) for count, i in enumerate(self.window_text_list) if is_valid(i) ]
def _sell_menu_helper(self, player): print("What would you like to sell?") print(player.items.display_screen_line()) # print("*" * 20) # validate input user_input = "" while not utils.is_int(user_input): user_input = input("sell > ").lower().strip() if user_input == "quit": return True print("You entered: {}".format(user_input)) users_choice = int(user_input) if not player.items.item_exists(users_choice): print( "Doh! It looks like you don't have that item in your inventory." ) return False chosen_item = player.items.get_item(users_choice) print("Here is the item you want to sell:") print("- " * 20) print(chosen_item.display_screen_line()) print("- " * 20) # ---- Remove item from self.items ---- player.items.remove_item(chosen_item.id) print("You have sold the item!") # self.items.add_item(chosen_item.id) # ---- end ---- print("You had {} of gold.".format(player.display_gold())) player.gold += chosen_item.cost print("You have {} of gold.".format(player.display_gold()))
def _buy_menu_valid_input(self, user_input): if user_input == "quit": return True if len(user_input) == 0: return False if utils.is_int(user_input): return True else: return False
def get_banker(self): log("Parsing messages and potentially constructing zeBanker object", 3) for msg in self.groupme_group.messages.list(): if msg.id in self.new_messages: log("New message found {}:{}".format(msg.id, msg.text), 2) if "!results" in msg.text: if msg.text.strip() == "!results": send_groupme_messages(["nah"], self.bot_id, self.message) #self.banker = zeBanker(None, self.donk_group_id, self.output_dir, self.message, None, None, self.bot_id) elif is_int(msg.text.replace("!results", "")): num_tables = int(msg.text.replace("!results", "")) send_groupme_messages(["Ok {}, getting results from the last {} table(s).".format(msg.name, num_tables)], self.bot_id, self.message) self.banker = zeBanker(None, self.donk_group_id, self.output_dir, self.message, num_tables, None, self.bot_id) elif ":" in msg.text or "donkhouse.com/group/{}".format(self.donk_group_id) in msg.text: if "donkhouse.com/group/{}".format(self.donk_group_id) in msg.text: tables = [msg.text.split("/")[-1]] else: tables = msg.text.split(":")[1].split(",") log("Tables to retrieve nets for: {}".format(tables)) file_names = [] for table in tables: file_name = "{}_{}_chat.pkl".format(self.donk_group_id, table) file_names.append(os.path.abspath(os.path.join(script_path, "../Output/ChatHistories", file_name))) net_getter = getNets(file_names) nets = net_getter.run() send_groupme_messages(["Ok {}, getting results from {}".format(msg.name, tables)], self.bot_id, self.message) if sum(nets.values()) != 0: send_groupme_messages( ["nets sum to {}: {}".format(sum(nets.values()), nets)], self.bot_id, self.message) else: self.banker = zeBanker(None, self.donk_group_id, self.output_dir, self.message, None, None, self.bot_id, nets) break
def textfiles_valid(mystring, mylist): if not utils.is_int(mystring): return False myint = int(mystring) try: temp = mylist[myint - 1] return True except: return False
def __apply_single_override(self, dest, name, value): """ Apply single override :type name: str :type value: str """ self.log.debug("Applying %s=%s", name, value) parts = [(int(x) if is_int(x) else x) for x in name.split(".")] pointer = dest for index, part in enumerate(parts[:-1]): self.__ensure_list_capacity(pointer, part, parts[index + 1]) if isinstance(part, integer_types): if part < 0: if isinstance(parts[index + 1], integer_types): pointer.append([]) else: pointer.append(BetterDict()) pointer = pointer[-1] else: pointer = pointer[part] elif isinstance(parts[index + 1], integer_types) and isinstance( pointer, dict): pointer = pointer.get(part, [], force_set=True) else: pointer = pointer.get(part, force_set=True) self.__ensure_list_capacity(pointer, parts[-1]) self.log.debug("Applying: [%s]=%s", parts[-1], value) if isinstance(parts[-1], string_types) and parts[-1][0] == '^': item = parts[-1][1:] if isinstance(pointer, list): item = int(item) if -len(pointer) <= item < len(pointer): del pointer[item] else: self.log.debug("No value to delete: %s", item) elif isinstance(pointer, dict): if item in pointer: del pointer[item] else: self.log.debug("No value to delete: %s", item) else: raise ValueError( "Cannot handle override %s in non-iterable type %s" % (item, pointer)) else: parsed_value = self.__parse_override_value(value) self.log.debug("Parsed override value: %r -> %r (%s)", value, parsed_value, type(parsed_value)) if isinstance(parsed_value, dict): parsed_value = BetterDict.from_dict(parsed_value) if isinstance(pointer, list) and parts[-1] < 0: pointer.append(parsed_value) else: pointer[parts[-1]] = parsed_value
def unpack_sp_mat(A): assert A.size >= 3 assert is_int(A[:3]) (R,C,nnz) = map(int,A[:3]) assert((3 + 3*nnz) == A.size) # Armadillo works in Fortran format triples = np.reshape(A[(-3*nnz):],(3,nnz),order='F') rows = triples[0,:] cols = triples[1,:] data = triples[2,:] assert(is_int(rows)) assert(is_int(cols)) rows = rows.astype(np.integer) cols = cols.astype(np.integer) return sps.coo_matrix((data,(rows,cols)),shape=(R,C))
def _resolve_ambiguous(self, ambiguous): if ambiguous[-1:] == 's' and is_int(ambiguous[:-1]): return self.TIME_OFFSET_TAG else: return self.TIMESTAMP_TAG
async def poll(self, ctx, hours='24', *options): ''' Create a poll in which users can vote by reacting. Poll duration can vary from 1 hour to 1 week (168 hours). Options must be separated by commas. ''' addCommand() if not is_int(hours): options = [hours] + list(options) hours = 24 else: hours = int(hours) if hours < 1 or hours > 168: raise commands.CommandError( message= f'Invalid argument: `{hours}`. Must be positive and less than 168.' ) options = ' '.join(options) options = options.split(',') if len(options) < 2: raise commands.CommandError( message= 'Error: insufficient options to create a poll. At least two options are required.' ) elif len(options) > 20: raise commands.CommandError( message= 'Error: too many options. This command only supports up to 20 options.' ) txt = '' i = 0 for opt in options: txt += f'\n{num_emoji[i]} {opt}' i += 1 txt += f'\n\nThis poll will be open for {hours} hours!' embed = discord.Embed( title='**Poll**', description=f'Created by {ctx.message.author.mention}\n{txt}', timestamp=datetime.utcnow()) msg = await ctx.send(embed=embed) embed.set_footer(text=f'ID: {msg.id}') await msg.edit(embed=embed) for num in range(i): await msg.add_reaction(num_emoji[num]) await Poll.create(guild_id=ctx.guild.id, author_id=ctx.author.id, channel_id=ctx.channel.id, message_id=msg.id, end_time=datetime.utcnow() + timedelta(hours=hours))
def datatype_exists(idDatatype): #this has been tested if not is_int(idDatatype): raise TypeError results = (db.query(Datatype.id).filter(Datatype.id == idDatatype).all()) if len(results) == 1: return True elif len(results) > 1: raise Exception("Database is compromised") else: return False
def most_similar_common_level(level, level_counter, threshold, sentence_abs_levels, index): if is_int(level): level = int(level) common_levels = [ int(l) for l in level_counter if is_int(l) and level_counter[l] > threshold ] root_common_levels = [ int(l.replace("ROOT", "")) if l != "ROOT" else 1 for l in level_counter if "ROOT" in l and level_counter[l] > threshold ] if level > 0: diffs = sorted([(cl, level - cl) for cl in common_levels if cl > 0], key=lambda t: t[1], reverse=False) return diffs[0][0] else: diffs = sorted([(cl, level - cl) for cl in common_levels if cl < 0], key=lambda t: t[1], reverse=True) #Computing the differences to see whether the closest level #can be encoded using the top-down encoding diffs_from_root = sorted( [(str(rcl) + "ROOT", int(sentence_abs_levels[index]) - rcl) for rcl in root_common_levels], key=lambda t: t[1], reverse=False) if diffs[0][1] == diffs_from_root[0][1]: return diffs_from_root[0][0] else: return min([diffs[0], diffs_from_root[0]], key=lambda t: abs(t[1]))[0] return level
def euler80(): decimal.getcontext().prec = 105 ret = 0 for i in range(2, 101): if is_int(math.sqrt(i)): continue x = str(decimal.Decimal(i).sqrt()).replace(".", "") ret += sum(digits(x[:100])) return ret
def get_admin(self): if not is_int(self.idPerson): raise TypeError if not person_exists(self.idPerson): raise Exception("This person id doesn't exist") person_admin = query_person_admin(self.idPerson) if len(person_admin) == 0: return {"level": 0, "name": "Colaborador"} else: return {"level": 1, "name": "Admin"}
def compare_user(their_id, session, param): param['id'] = their_id comparison = session.get('https://www.goodreads.com/user/compare/' + str(their_id), params=param) ctree = objectify.fromstring(comparison.content) # return comparison review_pairs = [(review.your_review.rating, review.their_review.rating) for review in ctree.compare.reviews.getchildren()] # exclude pairs where either or both have not entered a review (but have put it on a shelf/shelves) rp_int = [(y, t) for (y, t) in review_pairs if (is_int(y) and is_int(t))] your_reviews = [x[0] for x in rp_int] their_reviews = [x[1] for x in rp_int] similarity = pearson_def(your_reviews, their_reviews) return comparison, their_id, similarity
async def deleteall(self, ctx, channel=''): ''' Deletes all messages that will be sent in the given channel. (Admin+) Arguments: channel (mention, name, or id) ''' addCommand() if not channel: raise commands.CommandError( message=f'Required argument missing: `channel`.') elif ctx.message.channel_mentions: channel = ctx.message.channel_mentions[0] else: found = False if is_int(channel): for c in ctx.guild.text_channels: if c.id == int(channel): channel = c found = True break if not found: for c in ctx.guild.text_channels: if c.name.upper() == channel.upper(): channel = c found = True break if not found: for c in ctx.guild.text_channels: if channel.upper() in c.name.upper(): channel = c found = True break if not found: raise commands.CommandError( message=f'Could not find channel: `{channel}`.') guild = await Guild.get(ctx.guild.id) if guild.delete_channel_ids: if channel.id in guild.delete_channel_ids: await guild.update(delete_channel_ids=guild.delete_channel_ids. remove(channel.id)).apply() await ctx.send( f'Messages in {channel.mention} will no longer be deleted.' ) else: await guild.update( delete_channel_ids=guild.delete_channel_ids + [channel.id] ).apply() await ctx.send( f'All future messages in {channel.mention} will be deleted.' ) else: await guild.update(delete_channel_ids=[channel.id]).apply() await ctx.send( f'All future messages in {channel.mention} will be deleted.')
def test_is_int(self): self.assertTrue(is_int('0')) self.assertTrue(is_int('-1337')) self.assertTrue(is_int('1337')) self.assertFalse(is_int('0x0')) self.assertFalse(is_int('Some text')) self.assertFalse(is_int('123a')) self.assertFalse(is_int('1e1'))
def get_row_list_sorting_key(x): """ To be used as the key function in a sort. Puts cc_2 ahead of cc_10, e.g. """ name, count = x if "_" not in name: return name s = name.split("_") end = s[-1] start = "_".join(s[:-1]) if utils.is_int(end): return (start, int(end)) return name
def get_queryset(self): qs = self.queryset if InteractionList.query: terms = InteractionList.query.split(None) for term in terms: if is_int(term): qs = qs.filter(Q(id_a=term) | Q(id_b=term)) else: qs = qs.filter(Q(alias_a__icontains=term) | Q(alias_b__icontains=term)) self.filterset = InteractionFilterSet(qs, self.request.GET) return self.filterset.qs
def extract_order_by(self, orig): pattern = r""" (order\s+by\s+(?P<orderbyclause>.*?((?=limit)|$))) """ match = re.search(pattern, orig, re.VERBOSE | re.IGNORECASE) if match: order_by_clause = match.group('orderbyclause') ret = list() orderables = list() for orderable in utils.column_string_splitter(order_by_clause): ## Check for DESC desc = re.search(r'\s+desc($|\s|,|(?=limit))', orderable, re.IGNORECASE) orderable = re.sub(r'\s+desc($|\s|,|(?=limit))', '', orderable, re.IGNORECASE) ## Check for similarity pattern = r""" similarity\s+to\s+(?P<rowid>[^\s]+) (\s+with\s+respect\s+to\s+(?P<column>[^\s]+))? """ match = re.search(pattern, orderable, re.VERBOSE | re.IGNORECASE) if match: rowid = int(match.group('rowid').strip()) if match.group('column'): column = match.group('column').strip() else: column = None orderables.append(('similarity', {'desc': desc, 'target_row_id': rowid, 'target_column': column})) else: match = re.search(r""" similarity_to\s*\(\s* (?P<rowid>[^,]+) (\s*,\s*(?P<column>[^\s]+)\s*)? \s*\) """, orderable, re.VERBOSE | re.IGNORECASE) if match: if match.group('column'): column = match.group('column').strip() else: column = None rowid = match.group('rowid').strip() if utils.is_int(rowid): target_row_id = int(rowid) else: target_row_id = rowid orderables.append(('similarity', {'desc': desc, 'target_row_id': target_row_id, 'target_column': column})) else: orderables.append(('column', {'desc': desc, 'column': orderable.strip()})) orig = re.sub(pattern, '', orig, flags=re.VERBOSE | re.IGNORECASE) return (orig, orderables) else: return (orig, False)
def int_to_argument(self, key, value, gen_arg): res = 0 if value == None: gen_arg.add_int(key, int(-1)) else: if utils.is_hex(value): gen_arg.add_int(key, int(value, 16)) elif utils.is_int(value): gen_arg.add_int(key, int(value)) else: print "Value error: value <", value, "> cannot be converted to int" res = -1 return res
def _get_pe_input(self, pe_spec): '''Get PE input file from the <number>|<index>|<file> spec.''' if re.search('pe-', pe_spec): f = self._get_pe_byname(pe_spec) elif utils.is_int(pe_spec): n = int(pe_spec) if n <= 0: f = self._get_pe_byidx(n-1) else: f = self._get_pe_bynum(n) else: f = self._get_pe_byidx(-1) return f
def parse_delete_chain(self, words, orig): if len(words) >= 3: if words[0] == 'delete': if words[1] == 'chain' and utils.is_int(words[2]): chain_index = int(words[2]) if words[3] == 'from': tablename = words[4] return 'delete_chain', dict(tablename=tablename, chain_index=chain_index) elif len(words) >= 6 and words[2] == 'all' and words[3] == 'chains' and words[4] == 'from': chain_index = 'all' tablename = words[5] return 'delete_chain', dict(tablename=tablename, chain_index=chain_index) else: print self.help_delete_chain() return False
def read_item(self): if len(self.buff) == 0: return t.Null() token = self.buff.pop(0) if token == '(': return self.read_list(')') elif token in ['"', "'"]: return self.read_str(token) elif token == '`': return t.Quote(self.read_item()) elif is_int(token): return t.Int(token) elif is_float(token): return t.Real(token) else: return t.Symbol(token)
def indices_to_oob_indices(self,indices,oob_mask=None): assert is_vect(indices) assert is_int(indices) # Identify oob indices if oob_mask is None: oob_mask = self.are_indices_oob(indices) assert is_vect(oob_mask) assert oob_mask.shape == indices.shape # Subtract offset, so least oob index is 0 oob_indices = indices - self.get_num_spatial_nodes() # Nan-out any normal index location oob_indices[~oob_mask] = np.nan return oob_indices
def on_alert_dismiss(): if request.headers['Content-Type'] != 'application/json': bad_request(error='Content-Type must be application/json') alerts = request.get_json(force=False) for a in alerts: if 'id' not in a or not utils.is_int(a['id']): bad_request() if sentrygund.config.debug: print '[debug][views] dismissing:', json.dumps(alerts, indent=4) dismiss_alerts(alerts) data = {'success' : True, 'status' : 200} return Response(data, status=200, mimetype='application/json')
def convert_to_sparse_matrix(self, cell_coords, vertices, weights): assert isinstance(cell_coords, Coordinates) assert cell_coords.check() assert is_mat(vertices) assert is_int(vertices) assert is_mat(weights) assert is_float(weights) (N, D) = cell_coords.shape assert vertices.shape == weights.shape assert (N, 2 ** D) == vertices.shape assert D == self.dim oob_mask = cell_coords.oob.mask num_oob = cell_coords.oob.num_oob() num_normal = N - num_oob assert num_oob >= 0 assert num_normal >= 0 normal_idx = np.arange(N)[~oob_mask] oob_idx = np.arange(N)[oob_mask] m = num_normal * (2 ** D) # Space for normal points M = m + num_oob # Add on space for oob nodes cols = np.empty(M) rows = np.empty(M) data = np.empty(M) # Add normal weights cols[:m] = (np.tile(normal_idx, (2 ** D, 1)).T).flatten() rows[:m] = (vertices[~oob_mask, :]).flatten() data[:m] = (weights[~oob_mask, :]).flatten() # Route all oob points to oob node cols[m:] = oob_idx rows[m:] = vertices[oob_mask, 0] data[m:] = np.ones(num_oob) NN = self.grid.get_num_total_nodes() point_dist = sps.coo_matrix((data, (rows, cols)), shape=(NN, N)) point_dist = point_dist.tocsr() point_dist.eliminate_zeros() return point_dist
def build_from_oob_indices(self,indices,D): """ Indices should be np.nan if not oob. Max spatial index should be already subtracted off, so indices should be integers in [0,2*D). """ assert is_vect(indices) assert is_int(indices) # ignore nan oob_mask = ~np.isnan(indices) assert not np.any(indices[oob_mask] < 0) assert not np.any(indices[oob_mask] >= 2*D) (N,) = indices.shape self.dim = D self.num = N self.shape = (N,D) self.mask = oob_mask # Binary mask self.indices = np.empty(N) self.indices.fill(np.nan) self.indices[oob_mask] = indices[oob_mask] # Cache of the indices # Go through the non nan indices, unpack into data data = sps.lil_matrix((N,D),dtype=np.integer) for d in xrange(D): # Even indices mask = (self.indices == 2*d) data[mask,d] = -1 # Odd indices mask = (self.indices == 2*d+1) data[mask,d] = 1 self.data = data.tocsc() assert self.check()
def on_deauth(): if request.headers['content-type'] != 'application/json': bad_request(error='content-type must be application/json') targets = request.get_json(force=False) if sentrygund.config.debug: print '[debug][views] received request for deauth attack' for t in targets: if 'id' not in t or not utils.is_int(t['id']): bad_request() t = models.retrieve_alert(t) if sentrygund.config.debug: print '[debug][views] calling launch_deauth() on:', print json.dumps(t, sort_keys=True, indent=4) launch_deauth(t) data = {'success' : True, 'status' : 200} return Response(data, status=200, mimetype='application/json')
def points_to_cell_coords(self,points): """ Figure out where points are. Returns the cell coordinate. """ assert is_mat(points) (N,D) = points.shape assert D == self.dim # Get the OOB info oob = OutOfBounds() oob.build_from_points(self,points) assert oob.check() raw_coords = np.empty((N,D)) for d in xrange(D): (low,high,num_cells) = self.grid_desc[d] # Transform: [low,high) |-> [0,n) transform = num_cells * (points[:,d] - low) / (high - low) transform += self.fuzz raw_coords[:,d] = np.floor(transform).astype(np.integer) # Add a little fuzz to make sure stuff on the boundary is # mapped correctly # Fuzz top boundary to get [low,high] fuzz_mask = np.logical_and(high <= points[:,d], points[:,d] < high + 2*self.fuzz) raw_coords[fuzz_mask,d] = num_cells - 1 # Counts things just a littttle bit greater than last cell # boundary as part of the last cell raw_coords[oob.mask,:] = np.nan assert is_int(raw_coords) coords = Coordinates(raw_coords,oob) assert coords.check() return coords
def add_datauser(request): errors={} if request.POST: companies = {} storage = "" for i in request.POST.keys(): if ("food" in i) or ("weapon" in i): quality_v = i[len(i)-1:] key = i[:len(i)-1] util.set_dict(companies,key,quality_v,request.POST[i]) if "q_storage" in request.POST: storage = request.POST["q_storage"] name = request.POST["name"] er = request.POST["url_or_id"] er_id =0 url_citizen = "" error = False val = URLValidator() try: val(er) if er.index("erepublik.com/en/citizen/profile/") > -1: url_citizen = er er_id = er.split("/").pop() except Exception, e: errors['url'] = "url invalid" if util.is_int(er) and int(er) >0: er_id = er url_citizen = "http://www.erepublik.com/en/citizen/profile/"+er_id else: errors['id citizen'] = "id invalid" if er_id == 0 and len(url_citizen)>0: error = True else: error = False errors ={} if not error: try: cit=Citizen(name=name,citizen_id_er=er_id,url_citizen=url_citizen) cit.save() Tg = TraningGround(owner_citizen=cit) Tg.weights_room = request.POST['TG1'] Tg.climbing_center = request.POST['TG2'] Tg.shooting_range = request.POST['TG3'] Tg.special_forces = request.POST['TG4'] Tg.save() sto=Storage(owner_citizen=cit) if(storage <> "" and util.is_int(storage)): sto.quantity=int(storage) sto.save() for key in companies: company = util.get_company(companies[key]) company.owner_citizen = cit company.save() except DatabaseError as e: transaction.rollback() errors[e.errno]=e.strerror except ValidationError as e: transaction.rollback() errors[e.errno]=e.strerror
def test_is_int__valid(self): """valid value.""" result = utils.is_int(1) self.assertEqual(result, 1) result = utils.is_int('1') self.assertEqual(result, 1)
def get_models(self, tablename, modelid=None): """ Return the models dict for the table if modelid is None. If modelid is an int, then return the model specified by that id. If modelid is a list, then get each individual model specified by each int in that list. """ models_dir = os.path.join(self.data_dir, tablename, 'models') if os.path.exists(models_dir): if modelid is not None: def get_single_model(modelid): self.model_locks.acquire(tablename, modelid) # Only return one of the models full_fname = os.path.join(models_dir, 'model_%d.pkl' % modelid) if not os.path.exists(full_fname): self.model_locks.release(tablename, modelid) return None f = open(full_fname, 'r') m = pickle.load(f) f.close() self.model_locks.release(tablename, modelid) return m if type(modelid) == list: models = {} for i in modelid: if not utils.is_int(i): raise utils.BayesDBError('Invalid modelid: %s' % str(modelid)) models[i] = get_single_model(int(i)) return models elif utils.is_int(modelid): return get_single_model(int(modelid)) else: raise utils.BayesDBError('Invalid modelid: %s' % str(modelid)) else: # Return all the models models = {} self.model_locks.acquire_table(tablename) fnames = os.listdir(models_dir) for fname in fnames: if fname.startswith('model_'): model_id = fname[6:] # remove preceding 'model_' model_id = int(model_id[:-4]) # remove trailing '.pkl' and cast to int full_fname = os.path.join(models_dir, fname) f = open(full_fname, 'r') m = pickle.load(f) f.close() models[model_id] = m self.model_locks.release_table(tablename) return models else: # Backwards compatibility with old model style. self.model_locks.acquire_table(tablename) try: f = open(os.path.join(self.data_dir, tablename, 'models.pkl'), 'r') models = pickle.load(f) f.close() if modelid is not None: ret = models[modelid] else: ret = models self.model_locks.release_table(tablename) return ret except IOError: self.model_locks.release_table(tablename) return {}
def execute_statement(self, bql_statement_ast, pretty=True, timing=False, plots=None, yes=False, debug=False, pandas_df=None, pandas_output=True, key_column=None, return_raw_result=False): """ Accepts a SINGLE BQL STATEMENT as input, parses it, and executes it if it was parsed successfully. If pretty=True, then the command output will be pretty-printed as a string. If pretty=False, then the command output will be returned as a python object. timing=True prints out how long the command took to execute. For commands that have visual results, plots=True will cause those to be displayed by matplotlib as graphics rather than being pretty-printed as text. (Note that the graphics will also be saved if the user added SAVE TO <filename> to the BQL.) """ if timing: start_time = time.time() parser_out = None # TODO move pyparsing objects out of client into parser if debug: parser_out = self.parser.parse_single_statement(bql_statement_ast) else: try: parser_out = self.parser.parse_single_statement(bql_statement_ast) except Exception as e: raise utils.BayesDBParseError(str(e)) if parser_out is None: print("Could not parse command. Try typing 'help' for a list of all commands.") return elif not parser_out: return method_name, args_dict, client_dict = parser_out if client_dict is None: client_dict = {} # Do stuff now that you know the user's command, but before passing it to engine. if method_name == 'execute_file': return dict(message='execute_file', bql_string=open(args_dict['filename'], 'r').read()) elif method_name == 'update_codebook': _, codebook_rows = data_utils.read_csv(client_dict['codebook_path'], has_header=True) # TODO: require specific codebook_header values? Or don't require a header, # and if the first value in the header is actually a data column name, assume # the first row is codebook data, not a header. # Create a dict indexed by column name codebook = dict() for codebook_row in codebook_rows: codebook[codebook_row[0]] = dict(zip(['short_name', 'description', 'value_map'], codebook_row[1:])) args_dict['codebook'] = codebook elif (method_name == 'drop_btable') and (not yes): # If dropping something, ask for confirmation. print("Are you sure you want to permanently delete this btable, and all associated " "models, without any way to get them back? Enter 'y' if yes.") user_confirmation = raw_input() if 'y' != user_confirmation.strip(): return dict(message="Operation canceled by user.") elif (method_name == 'drop_models') and (not yes): # If dropping something, ask for confirmation. print("Are you sure you want to permanently delete model(s), without any way to get " "them back? Enter 'y' if yes.") user_confirmation = raw_input() if 'y' != user_confirmation.strip(): return dict(message="Operation canceled by user.") elif method_name == 'load_models': pklpath = client_dict['pkl_path'] try: model_data = pickle.load(gzip.open(self.parser.get_absolute_path(pklpath), 'rb')) except IOError as e: if pklpath[-7:] != '.pkl.gz': if pklpath[-4:] == '.pkl': model_data = pickle.load(open(self.parser.get_absolute_path(pklpath), 'rb')) else: pklpath = pklpath + ".pkl.gz" model_data = pickle.load(gzip.open(self.parser.get_absolute_path(pklpath), 'rb')) else: raise utils.BayesDBError('Models file %s could not be found.' % pklpath) # This is the more recent version, where schema is stored with models. if 'schema' in model_data.keys(): args_dict['models'] = model_data['models'] args_dict['model_schema'] = model_data['schema'] # This support older saved models, where only the model info was stored. else: args_dict['models'] = model_data args_dict['model_schema'] = None elif method_name == 'create_btable': if pandas_df is None: header, rows = data_utils.read_csv(client_dict['csv_path']) else: header, rows = data_utils.read_pandas_df(pandas_df) args_dict['header'] = header args_dict['raw_T_full'] = rows args_dict['key_column'] = key_column args_dict['subsample'] = False if 'codebook_path' in client_dict: _, codebook_rows = data_utils.read_csv(client_dict['codebook_path'], has_header=True) # TODO: require specific codebook_header values? Or don't require a header, # and if the first value in the header is actually a data column name, assume # the first row is codebook data, not a header. # Create a dict indexed by column name codebook = dict() for codebook_row in codebook_rows: codebook[codebook_row[0]] = dict(zip(['short_name', 'description', 'value_map'], codebook_row[1:])) args_dict['codebook'] = codebook else: warning = dedent(""" WARNING! You are creating a btable without a codebook, which will make interpretation of results more difficult. Codebooks should be in CSV format with each row corresponding to one column of the original data. The codebook should have four columns: 1. actual column name 2. short column description 3. long column description 4. value map (optional, only used for categorical columns - should be in JSON format) """) print(warning) # Display warning messages and get confirmation if btable is too large. # Ask user if they want to turn on subsampling. max_columns = 200 max_rows = 1000 max_cells = 100000 message = None if not yes: if len(rows[0]) > max_columns: message = "The btable you are uploading has %d columns, but BayesDB is " \ "currently designed to support only %d columns. If you proceed, " \ "performance may suffer unless you set many columns' datatypes to " \ "'ignore'. Would you like to continue? Enter 'y' if yes." \ % (len(rows[0]), max_columns) if len(rows) > max_rows: message = "The btable you are uploading has %d rows, but BayesDB is currently "\ "designed to support only %d rows. If you proceed, performance may "\ "suffer. Would you like to continue? Enter 'y' to continue without "\ "subsampling, 'n' to abort, 's' to continue by subsampling %d rows, "\ "or a positive integer to specify the number of rows to be "\ "subsampled." % (len(rows), max_rows, max_rows) if len(rows[0])*len(rows) > max_cells: message = "The btable you are uploading has %d cells, but BayesDB is currently"\ " designed to support only %d cells. If you proceed, performance may"\ " suffer unless you enable subsampling. Enter 'y' to continue "\ " without subsampling, 'n' to abort, 's' to continue by subsampling "\ "%d rows, or a positive integer to specify the number of rows to be "\ "subsampled." % (len(rows)*len(rows[0]), max_cells, max_rows) if message is not None: print(message) user_confirmation = raw_input() if 'y' == user_confirmation.strip(): pass elif 'n' == user_confirmation.strip(): return dict(message="Operation canceled by user.") elif 's' == user_confirmation.strip(): args_dict['subsample'] = min(max_rows, len(rows)) elif utils.is_int(user_confirmation.strip()): args_dict['subsample'] = int(user_confirmation.strip()) else: return dict(message="Operation canceled by user.") elif method_name in ['label_columns', 'update_metadata']: if client_dict['source'] == 'file': header, rows = data_utils.read_csv(client_dict['csv_path']) args_dict['mappings'] = {key: value for key, value in rows} # Call engine. result = self.call_bayesdb_engine(method_name, args_dict, debug) # If error occurred, exit now. if 'error' in result and result['error']: if pretty: print(result['message']) return result['message'] else: return result # Do stuff now that engine has given you output, but before printing the result. result = self.callback(method_name, args_dict, client_dict, result) if return_raw_result: raw_result = { 'result': result, 'method_name': method_name, 'client_dict': client_dict} print("returning raw result for %s" % (method_name)) return raw_result assert type(result) != int if timing: end_time = time.time() print('Elapsed time: %.2f seconds.' % (end_time - start_time)) if plots is None: plots = 'DISPLAY' in os.environ.keys() if 'matrix' in result and (plots or client_dict['filename']): # Plot matrices plotting_utils.plot_matrix(result['matrix'], result['column_names'], result['title'], client_dict['filename']) if pretty: if 'column_lists' in result: print(self.pretty_print(dict(column_lists=result['column_lists']))) return self.pretty_print(result) else: return result if ('plot' in client_dict and client_dict['plot']): if (plots or client_dict['filename']): # Plot generalized histograms or scatterplots try: plotting_M_c = result['metadata_full']['M_c_full'] except KeyError: plotting_M_c = result['M_c'] plot_remove_key = method_name in ['select', 'infer'] plotting_utils.plot_general_histogram(result['column_names'], result['data'], plotting_M_c, result['schema_full'], client_dict['filename'], client_dict['scatter'], remove_key=plot_remove_key) return self.pretty_print(result) else: if 'message' not in result: result['message'] = "" result['message'] = "Your query indicates that you would like to make a plot, but "\ "in order to do so, you must either enable plotting in a "\ "window or specify a filename to save to by appending 'SAVE "\ "TO <filename>' to this command.\n" + result['message'] if pretty: pp = self.pretty_print(result) print(pp) # Print warnings last so they're readable without scrolling backwards. if 'warnings' in result: """ Pretty-print warnings. """ for warning in result['warnings']: print('WARNING: %s' % warning) if pandas_output and 'data' in result and 'column_labels' in result: result_pandas_df = data_utils.construct_pandas_df(result) return result_pandas_df else: return result
def execute_statement( self, bql_statement_ast, pretty=True, timing=False, plots=None, yes=False, debug=False, pandas_df=None, pandas_output=True, key_column=None, ): """ Accepts a SINGLE BQL STATEMENT as input, parses it, and executes it if it was parsed successfully. If pretty=True, then the command output will be pretty-printed as a string. If pretty=False, then the command output will be returned as a python object. timing=True prints out how long the command took to execute. For commands that have visual results, plots=True will cause those to be displayed by matplotlib as graphics rather than being pretty-printed as text. (Note that the graphics will also be saved if the user added SAVE TO <filename> to the BQL.) """ if timing: start_time = time.time() parser_out = None ##TODO move pyparsing objects out of client into parser if debug: parser_out = self.parser.parse_single_statement(bql_statement_ast) else: try: parser_out = self.parser.parse_single_statement(bql_statement_ast) except Exception as e: raise utils.BayesDBParseError(str(e)) if parser_out is None: print "Could not parse command. Try typing 'help' for a list of all commands." return elif not parser_out: return method_name, args_dict, client_dict = parser_out if client_dict is None: client_dict = {} ## Do stuff now that you know the user's command, but before passing it to engine. if method_name == "execute_file": return dict(message="execute_file", bql_string=open(args_dict["filename"], "r").read()) elif (method_name == "drop_btable") and (not yes): ## If dropping something, ask for confirmation. print "Are you sure you want to permanently delete this btable, and all associated models, without any way to get them back? Enter 'y' if yes." user_confirmation = raw_input() if "y" != user_confirmation.strip(): return dict(message="Operation canceled by user.") elif (method_name == "drop_models") and (not yes): ## If dropping something, ask for confirmation. print "Are you sure you want to permanently delete model(s), without any way to get them back? Enter 'y' if yes." user_confirmation = raw_input() if "y" != user_confirmation.strip(): return dict(message="Operation canceled by user.") elif method_name == "load_models": pklpath = client_dict["pkl_path"] try: models = pickle.load(gzip.open(self.parser.get_absolute_path(pklpath), "rb")) except IOError as e: if pklpath[-7:] != ".pkl.gz": if pklpath[-4:] == ".pkl": models = pickle.load(open(self.parser.get_absolute_path(pklpath), "rb")) else: pklpath = pklpath + ".pkl.gz" models = pickle.load(gzip.open(self.parser.get_absolute_path(pklpath), "rb")) else: raise utils.BayesDBError("Models file %s could not be found." % pklpath) args_dict["models"] = models elif method_name == "create_btable": if pandas_df is None: header, rows = data_utils.read_csv(client_dict["csv_path"]) else: header, rows = data_utils.read_pandas_df(pandas_df) args_dict["header"] = header args_dict["raw_T_full"] = rows args_dict["key_column"] = key_column args_dict["subsample"] = False # Display warning messages and get confirmation if btable is too large. # Ask user if they want to turn on subsampling. max_columns = 200 max_rows = 1000 max_cells = 100000 message = None if not yes: if len(rows[0]) > max_columns: message = ( "The btable you are uploading has %d columns, but BayesDB is currently designed to support only %d columns. If you proceed, performance may suffer unless you set many columns' datatypes to 'ignore'. Would you like to continue? Enter 'y' if yes." % (len(rows[0]), max_columns) ) if len(rows) > max_rows: message = ( "The btable you are uploading has %d rows, but BayesDB is currently designed to support only %d rows. If you proceed, performance may suffer. Would you like to continue? Enter 'y' to continue without subsampling, 'n' to abort, 's' to continue by subsampling %d rows, or a positive integer to specify the number of rows to be subsampled." % (len(rows), max_rows, max_rows) ) if len(rows[0]) * len(rows) > max_cells: message = ( "The btable you are uploading has %d cells, but BayesDB is currently designed to support only %d cells. If you proceed, performance may suffer unless you enable subsampling. Enter 'y' to continue without subsampling, 'n' to abort, 's' to continue by subsampling %d rows, or a positive integer to specify the number of rows to be subsampled." % (len(rows) * len(rows[0]), max_cells, max_rows) ) if message is not None: print message user_confirmation = raw_input() if "y" == user_confirmation.strip(): pass elif "n" == user_confirmation.strip(): return dict(message="Operation canceled by user.") elif "s" == user_confirmation.strip(): args_dict["subsample"] = min(max_rows, len(rows)) elif utils.is_int(user_confirmation.strip()): args_dict["subsample"] = int(user_confirmation.strip()) else: return dict(message="Operation canceled by user.") elif method_name in ["label_columns", "update_metadata"]: if client_dict["source"] == "file": header, rows = data_utils.read_csv(client_dict["csv_path"]) args_dict["mappings"] = {key: value for key, value in rows} ## Call engine. result = self.call_bayesdb_engine(method_name, args_dict, debug) ## If error occurred, exit now. if "error" in result and result["error"]: if pretty: print result["message"] return result["message"] else: return result ## Do stuff now that engine has given you output, but before printing the result. result = self.callback(method_name, args_dict, client_dict, result) assert type(result) != int if timing: end_time = time.time() print "Elapsed time: %.2f seconds." % (end_time - start_time) if plots is None: plots = "DISPLAY" in os.environ.keys() if "matrix" in result and (plots or client_dict["filename"]): # Plot matrices plotting_utils.plot_matrix( result["matrix"], result["column_names"], result["title"], client_dict["filename"] ) if pretty: if "column_lists" in result: print self.pretty_print(dict(column_lists=result["column_lists"])) return self.pretty_print(result) else: return result if "plot" in client_dict and client_dict["plot"]: if plots or client_dict["filename"]: # Plot generalized histograms or scatterplots plot_remove_key = method_name in ["select", "infer"] plotting_utils.plot_general_histogram( result["columns"], result["data"], result["M_c"], client_dict["filename"], client_dict["scatter"], remove_key=plot_remove_key, ) return self.pretty_print(result) else: if "message" not in result: result["message"] = "" result["message"] = ( "Your query indicates that you would like to make a plot, but in order to do so, you must either enable plotting in a window or specify a filename to save to by appending 'SAVE TO <filename>' to this command.\n" + result["message"] ) if pretty: pp = self.pretty_print(result) print pp if pandas_output and "data" in result and "columns" in result: result_pandas_df = data_utils.construct_pandas_df(result) return result_pandas_df else: return result
def execute_statement( self, bql_statement_ast, pretty=True, timing=False, plots=None, yes=False, debug=False, pandas_df=None, pandas_output=True, key_column=None, return_raw_result=False, force_output=False, ): """ Accepts a SINGLE BQL STATEMENT as input, parses it, and executes it if it was parsed successfully. If pretty=True, then the command output will be pretty-printed as a string. If pretty=False, then the command output will be returned as a python object. If force_output=True, then results will be returned regardless of pretty timing=True prints out how long the command took to execute. For commands that have visual results, plots=True will cause those to be displayed by matplotlib as graphics rather than being pretty-printed as text. (Note that the graphics will also be saved if the user added SAVE TO <filename> to the BQL.) """ if timing: start_time = time.time() parser_out = None # TODO move pyparsing objects out of client into parser if debug: parser_out = self.parser.parse_single_statement(bql_statement_ast) else: try: parser_out = self.parser.parse_single_statement(bql_statement_ast) except Exception as e: raise utils.BayesDBParseError(str(e)) if parser_out is None: print ("Could not parse command. Try typing 'help' for a list of all commands.") return elif not parser_out: return method_name, args_dict, client_dict = parser_out if client_dict is None: client_dict = {} # Do stuff now that you know the user's command, but before passing it to engine. if method_name == "execute_file": return dict(message="execute_file", bql_string=open(args_dict["filename"], "r").read()) elif method_name == "update_codebook": _, codebook_rows = data_utils.read_csv(client_dict["codebook_path"], has_header=True) # TODO: require specific codebook_header values? Or don't require a header, # and if the first value in the header is actually a data column name, assume # the first row is codebook data, not a header. # Create a dict indexed by column name codebook = dict() for codebook_row in codebook_rows: codebook[codebook_row[0]] = dict(zip(["short_name", "description", "value_map"], codebook_row[1:])) args_dict["codebook"] = codebook elif (method_name == "drop_btable") and (not yes): # If dropping something, ask for confirmation. print ( "Are you sure you want to permanently delete this btable, and all associated " "models, without any way to get them back? Enter 'y' if yes." ) user_confirmation = raw_input() if "y" != user_confirmation.strip(): return dict(message="Operation canceled by user.") elif (method_name == "drop_models") and (not yes): # If dropping something, ask for confirmation. print ( "Are you sure you want to permanently delete model(s), without any way to get " "them back? Enter 'y' if yes." ) user_confirmation = raw_input() if "y" != user_confirmation.strip(): return dict(message="Operation canceled by user.") elif method_name == "load_models": pklpath = client_dict["pkl_path"] try: model_data = pickle.load(gzip.open(self.parser.get_absolute_path(pklpath), "rb")) except IOError as e: if pklpath[-7:] != ".pkl.gz": if pklpath[-4:] == ".pkl": model_data = pickle.load(open(self.parser.get_absolute_path(pklpath), "rb")) else: pklpath = pklpath + ".pkl.gz" model_data = pickle.load(gzip.open(self.parser.get_absolute_path(pklpath), "rb")) else: raise utils.BayesDBError("Models file %s could not be found." % pklpath) # This is the more recent version, where schema is stored with models. if "schema" in model_data.keys(): args_dict["models"] = model_data["models"] args_dict["model_schema"] = model_data["schema"] # This support older saved models, where only the model info was stored. else: args_dict["models"] = model_data args_dict["model_schema"] = None # Older versions of model_schema just had a str cctype as the dict items. # Newest version has a dict of cctype and parameters. Use this values to # test the recency of the models. model_schema = args_dict["model_schema"] if model_schema: model_schema_itemtype = type(model_schema[model_schema.keys()[0]]) else: model_schema_itemtype = None if model_schema is None or model_schema_itemtype != dict: args_dict["model_schema"] = None if not yes: print """WARNING! The models you are currently importing were saved without a schema or without detailed column parameters (probably from a previous version). If you are loading models into the same table from which you created them, problems are unlikely, unless you have dropped models and then updated the schema. If you are loading models into a different table from which you created them, you should verify that the table schemas are the same. Please use "SAVE MODELS FROM <btable> TO <filename.pkl.gz>" to create an updated copy of your models. Are you sure you want to load these model(s)? """ user_confirmation = raw_input() if "y" != user_confirmation.strip(): return dict(message="Operation canceled by user.") elif method_name == "create_btable": if pandas_df is None: header, rows = data_utils.read_csv(client_dict["csv_path"]) else: header, rows = data_utils.read_pandas_df(pandas_df) args_dict["header"] = header args_dict["raw_T_full"] = rows args_dict["key_column"] = key_column args_dict["subsample"] = False if "codebook_path" in client_dict: _, codebook_rows = data_utils.read_csv(client_dict["codebook_path"], has_header=True) # TODO: require specific codebook_header values? Or don't require a header, # and if the first value in the header is actually a data column name, assume # the first row is codebook data, not a header. # Create a dict indexed by column name codebook = dict() for codebook_row in codebook_rows: codebook[codebook_row[0]] = dict(zip(["short_name", "description", "value_map"], codebook_row[1:])) args_dict["codebook"] = codebook else: warning = dedent( """ WARNING! You are creating a btable without a codebook, which will make interpretation of results more difficult. Codebooks should be in CSV format with each row corresponding to one column of the original data. The codebook should have four columns: 1. actual column name 2. short column description 3. long column description 4. value map (optional, only used for categorical columns - should be in JSON format) """ ) print (warning) # Display warning messages and get confirmation if btable is too large. # Ask user if they want to turn on subsampling. max_columns = 200 max_rows = 1000 max_cells = 100000 message = None if not yes: if len(rows[0]) > max_columns: message = ( "The btable you are uploading has %d columns, but BayesDB is " "currently designed to support only %d columns. If you proceed, " "performance may suffer unless you set many columns' datatypes to " "'ignore'. Would you like to continue? Enter 'y' if yes." % (len(rows[0]), max_columns) ) if len(rows) > max_rows: message = ( "The btable you are uploading has %d rows, but BayesDB is currently " "designed to support only %d rows. If you proceed, performance may " "suffer. Would you like to continue? Enter 'y' to continue without " "subsampling, 'n' to abort, 's' to continue by subsampling %d rows, " "or a positive integer to specify the number of rows to be " "subsampled." % (len(rows), max_rows, max_rows) ) if len(rows[0]) * len(rows) > max_cells: message = ( "The btable you are uploading has %d cells, but BayesDB is currently" " designed to support only %d cells. If you proceed, performance may" " suffer unless you enable subsampling. Enter 'y' to continue " " without subsampling, 'n' to abort, 's' to continue by subsampling " "%d rows, or a positive integer to specify the number of rows to be " "subsampled." % (len(rows) * len(rows[0]), max_cells, max_rows) ) if message is not None: print (message) user_confirmation = raw_input() if "y" == user_confirmation.strip(): pass elif "n" == user_confirmation.strip(): return dict(message="Operation canceled by user.") elif "s" == user_confirmation.strip(): args_dict["subsample"] = min(max_rows, len(rows)) elif utils.is_int(user_confirmation.strip()): args_dict["subsample"] = int(user_confirmation.strip()) else: return dict(message="Operation canceled by user.") elif method_name in ["label_columns", "update_metadata"]: if client_dict["source"] == "file": header, rows = data_utils.read_csv(client_dict["csv_path"]) args_dict["mappings"] = {key: value for key, value in rows} # Call engine. result = self.call_bayesdb_engine(method_name, args_dict, debug) # If error occurred, exit now. if "error" in result and result["error"]: if pretty: print (result["message"]) if force_output: return result else: return result["message"] else: return result # Do stuff now that engine has given you output, but before printing the result. result = self.callback(method_name, args_dict, client_dict, result) if return_raw_result: raw_result = {"result": result, "method_name": method_name, "client_dict": client_dict} print ("returning raw result for %s" % (method_name)) return raw_result assert type(result) != int if timing: end_time = time.time() print ("Elapsed time: %.2f seconds." % (end_time - start_time)) if plots is None: plots = "DISPLAY" in os.environ.keys() if "matrix" in result and (plots or client_dict["filename"]): # Plot matrices plotting_utils.plot_matrix( result["matrix"], result["column_names"], result["title"], client_dict["filename"] ) if pretty: if "column_lists" in result: print (self.pretty_print(dict(column_lists=result["column_lists"]))) if force_output: return result else: return self.pretty_print(result) else: return result if "plot" in client_dict and client_dict["plot"]: if plots or client_dict["filename"]: # Plot generalized histograms or scatterplots try: plotting_M_c = result["metadata_full"]["M_c_full"] except KeyError: plotting_M_c = result["M_c"] plot_remove_key = method_name in ["select", "infer"] plotting_utils.plot_general_histogram( result["column_names"], result["data"], plotting_M_c, result["schema_full"], client_dict["filename"], client_dict["scatter"], remove_key=plot_remove_key, ) return self.pretty_print(result) else: if "message" not in result: result["message"] = "" result["message"] = ( "Your query indicates that you would like to make a plot, but " "in order to do so, you must either enable plotting in a " "window or specify a filename to save to by appending 'SAVE " "TO <filename>' to this command.\n" + result["message"] ) if pretty: pp = self.pretty_print(result) print (pp) # Print warnings last so they're readable without scrolling backwards. if "warnings" in result: """ Pretty-print warnings. """ for warning in result["warnings"]: print ("WARNING: %s" % warning) if pandas_output and "data" in result and "column_labels" in result: result_pandas_df = data_utils.construct_pandas_df(result) return result_pandas_df else: return result