def build_binary_expression(node): if len(node.children) != 3: logging.error('FATAL ERROR: build_binary_expression()') sys.exit(1) valid_operators = { 'EqualOperator', 'NotEqualOperator', 'AndOperator', 'OrOperator', 'LessThanOperator', 'LessThanEqualOperator', 'GreaterThanOperator', 'GreaterThanEqualOperator', 'BinaryAndOperator', 'BinaryOrOperator', 'AddOperator', 'SubtractOperator', 'MultiplyOperator', 'DivideOperator', 'ModuloOperator', } lhs = build_expr(node[0]) rhs = build_expr(node[2]) # Check that the operator is valid, and convert it to a corresponding # expression node (done by stripping "Operator" and replacing it with # "Expression"). if node[1].name in valid_operators: node_name = node[1].name[:-len('Operator')] + 'Expression' return ASTNode(node_name, None, [lhs, rhs]) else: logging.error('FATAL ERROR: Invalid operator %s encountered in build_binary_expression()' % node[1].name) sys.exit(1)
def get_arraytype(typ): if is_array_type(typ): return typ[:-2] else: logging.error("FATAL ERROR: non-array type %s provided to get_arraytype" % typ) sys.exit(-1)
def check_method_forwardreference(pkg_index, type_index, cls_idx): """ check forward referencing of methods in field initializers """ for pkg_name in pkg_index: for cu_env in pkg_index[pkg_name]: if cu_env['ClassDeclaration'] == None: continue typedecl_env = cu_env['ClassDeclaration'] for field_decl in typedecl_env.node.select(['Fields', 'FieldDeclaration']): # get all methods declared after this field: methods_obj_blacklist = [] for m in typedecl_env.methods: for method_decl in typedecl_env.methods[m]: if method_decl.decl_order > field_decl.decl_order: methods_obj_blacklist.append(method_decl.obj) # for each field initializer, find method invocations for mi_node in \ field_decl.find_child('Initializer').select(['MethodInvocation']): # for each method invocation, ensure it's not declared after # this field if mi_node.decl.obj.declared_in.name == cu_env.canon \ and mi_node.decl.obj in methods_obj_blacklist: logging.error("Illegal Forward reference to method %s before its declaration" \ % (mi_node.decl.obj)) sys.exit(42)
def gen_add_expr(info, node, method_obj): output = [] # Numbers, just add. if primitives.is_numeric(node[0].typ) and primitives.is_numeric(node[1].typ): output.extend(gen_binary_expr_common(info, node, method_obj)) output.append('add eax, ebx') # If they are objects, we do the following: # 1. Use String.valueOf() on each operand. # 2. Do a LHS.concat(RHS), which returns a new string. elif node[0].typ == 'java.lang.String' or node[1].typ == 'java.lang.String': # Convert LHS to a string. output.extend(gen_string_valueof(info, node[0], method_obj)) output.append('push eax') # Convert RHS to a string. output.extend(gen_string_valueof(info, node[1], method_obj)) output.append('push eax') # Receiver is LHS, Argument is RHS (already done!), just need to call # the correct method. output.append('call METHOD~java.lang.String.concat~java.lang.String') # Jump back. output.append('add esp, 8') else: logging.error('FATAL ERROR: invalid add') sys.exit(1) return output
def typecheck_add(node, c, method, t_i, c_i): expected_node = ['AddExpression', 'SubtractExpression'] if node.name not in expected_node: logging.error("FATAL ERROR: expected", expected_node) sys.exit(1) if len(node.children) == 0: logging.error("FATAL ERROR: %s has no children" % expected_node) sys.exit(1) elif len(node.children) != 2: logging.error('FATAL ERROR: typecheck_add() on expression %s with %d children' % (node.name, len(node.children))) sys.exit(1) else: t1 = typecheck_expr(node[0], c, method, t_i, c_i) t2 = typecheck_expr(node[1], c, method, t_i, c_i) if node.name == 'AddExpression' and \ (t1 == "java.lang.String" or t2 == "java.lang.String"): if t1 != "Void" and t2 != "Void": node.typ = 'java.lang.String' return node.typ else: logging.error("typecheck failed: string add void") sys.exit(42) elif primitives.is_numeric(t1) and primitives.is_numeric(t2): node.typ = 'Int' return node.typ else: logging.error("typecheck failed: Add:", t1, t2) sys.exit(42)
def replace(m1, list_of_m): if isinstance(m1, Field): return for m2 in list_of_m: # A nonstatic method must not replace a static method (JLS 8.4.6.1, dOvs well-formedness constraint 5) if ("Static" not in m1.mods and "Static" in m2.mods): logging.error("a nonstatic method (%s) replaced a static method (%s)" % (m1, m2)) sys.exit(42) if ("Static" in m1.mods and "Static" not in m2.mods): logging.error("a static method (%s) replaced a nonstatic method (%s)" % (m1, m2)) sys.exit(42) # A method must not replace a method with a different return type. (JLS 8.1.1.1, 8.4, 8.4.2, 8.4.6.3, 8.4.6.4, 9.2, 9.4.1, dOvs well-formedness constraint 6) if m1.type != m2.type: logging.error("a method (%s) replaced a method (%s) with a different return type" % (m1, m2)) sys.exit(42) # A protected method must not replace a public method. (JLS 8.4.6.3, dOvs well-formedness constraint 7) if "Protected" in m1.mods and "Public" in m2.mods: print(list_of_m) logging.error("a protected method (%s) replaced a public method (%s)" % (m1, m2)) sys.exit(42) # A method must not replace a final method. (JLS 8.4.3.3, dOvs well-formedness constraint 9) if "Final" in m2.mods: logging.error("a method (%s) replaced a final method (%s)" % (m1, m2)) sys.exit(42)
def on_status(self, status): if self.storage == 'stdout': print self.to_string(status) elif self.storage == 'sql': try: self.to_sql(status) except db.DataError as ex: logging.error(ex) raise ex
def get_field_offset(self, node): if node.name != "FieldAccess": logging.error("get_field_offset") sys.exit(1) receiver_type = node.find_child("FieldReceiver")[0].typ field_name = node.find_child("FieldName")[0].value.value return self.get_field_offset_from_field_name(receiver_type, field_name)
def on_status(self, status): if 'stdout' in self.storage: print self.status_to_string(status) if 'sql' in self.storage: try: self.to_sql(status) except db.DataError as ex: logging.error(ex) raise ex
def build_method_params(method_node): params = {} for p in method_node.select(['Parameters', 'Parameter']): param_name = p[1].value.value if param_name in params: logging.error('Two params=%s have cannot have the same name' % (param_name)) sys.exit(42) params[param_name] = p return params
def cycle_detection(c, seen_so_far): l = list(c.implements) if c.extends != None: l.append(c.extends) for cc in l: if cc in seen_so_far: logging.error("Cycle detected in super class hierarchy for %s" % c.name) sys.exit(42) cycle_detection(cc, list(seen_so_far)+[cc])
def call_remote(self, url): req = urllib2.Request(url) try: response = urllib2.urlopen(req) except urllib2.URLError: result = None logging.error("Error while requesting: " + url) self.flash(_("Cannot connect to server")) else: json_data = response.read() result = json.loads(json_data) return result
def typecheck_while(node, c, method, t_i, c_i): if node.name != 'WhileStatement': logging.error('FATAL ERROR: typecheck_while') sys.exit(1) expr_type = typecheck_expr(node[0], c, method, t_i, c_i) if expr_type != 'Boolean': logging.error('Type of expression for \'while\' must be a Boolean') sys.exit(42) return None
def typecheck_mult(node, c, method, t_i, c_i): expected_node = ['MultiplyExpression', 'DivideExpression', 'ModuloExpression'] if node.name not in expected_node: logging.error("FATAL ERROR: expected", expected_node) sys.exit(1) if len(node.children) == 0: logging.error("FATAL ERROR: %s has no children in typecheck_mult()" % expected_node) sys.exit(1) elif len(node.children) != 2: logging.error('FATAL ERROR: typecheck_mult got %s node with %d children' % (node.name, len(node.children))) sys.exit(1) else: t1 = typecheck_expr(node[0], c, method, t_i, c_i) t2 = typecheck_expr(node[1], c, method, t_i, c_i) if primitives.is_numeric(t1) and primitives.is_numeric(t2): node.typ = 'Int' return node.typ else: logging.error("typecheck failed: mult/div/mod not num") sys.exit(42)
def sell(branch, isdn, amount): result = None try: xml = ( '<request><branch>%(branch)s</branch><customer system="ISDN">%(isdn)s</customer><point>%(point)s</point><smsPrefix>my.ubedn.mn</smsPrefix><smsSuffix>UBEDN</smsSuffix><product>tsaxilgaanii tulbur</product><productType></productType><description>tsaxilgaanii tulbur tulugdsun.</description></request>' % {"branch": branch, "isdn": isdn, "point": amount} ) result = api_request("sell", xml) except Exception as ex: logging.error("[candy api] [sell] error: %s" % ex) return result
def typecheck_relational(node, c, method, t_i, c_i): expected_node = ['LessThanExpression', 'LessThanEqualExpression', 'GreaterThanExpression', 'GreaterThanEqualExpression'] if node.name not in expected_node: logging.error("FATAL ERROR: typecheck_relational() expected", expected_node) sys.exit(1) if len(node.children) == 0: logging.error("FATAL ERROR: has no children", expected_node) sys.exit(1) elif len(node.children) != 2: logging.error('FATAL ERROR: typecheck_relational() on expression %s with %d children' % (node.name, len(node.children))) sys.exit(1) else: t1 = typecheck_expr(node[0], c, method, t_i, c_i) t2 = typecheck_expr(node[1], c, method, t_i, c_i) if primitives.is_numeric(t1) and primitives.is_numeric(t2): node.typ = 'Boolean' return node.typ else: logging.error("typecheck failed: Relational:", t1, t2) sys.exit(42)
def get_method_offset(self, node): if node.name != "MethodInvocation": logging.error("get_method_offset") sys.exit(1) method_name = node.find_child("MethodName")[0].value.value params = [] for child in node.find_child("Arguments").children: params.append(child.typ) method = class_hierarchy.Temp_Method(method_name, params) offset = self.method_index.index(method) * 4 return offset
def build_field_index(class_index): field_index = {} for c in class_index.values(): # print(c) field_index[c.name] = [] for m in get_all_fields(c): if isinstance(m, class_hierarchy.Field): field_index[c.name].append(m) # print(" ",field_index[c.name].index(m), field_index[c.name]) else: logging.error("build_field_index") sys.exit(1) return field_index
def typecheck_equality(node, c, method, t_i, c_i): expected_node = ['EqualExpression', 'NotEqualExpression'] if node.name not in expected_node: logging.error("FATAL ERROR: expected", expected_node) sys.exit(1) if len(node.children) == 0: logging.error("FATAL ERROR: has no children", expected_node) sys.exit(1) elif len(node.children) != 2: logging.error('FATAL ERROR: typecheck_equality on %d children' % len(node.children)) sys.exit(1) else: t1 = typecheck_expr(node[0], c, method, t_i, c_i) t2 = typecheck_expr(node[1], c, method, t_i, c_i) if primitives.is_numeric(t1) and primitives.is_numeric(t2): node.typ = "Boolean" return node.typ elif t1 == "Boolean" and t2 == "Boolean": node.typ = "Boolean" return node.typ elif is_assignable(t1, t2, c_i) or is_assignable(t2, t1, c_i): node.typ = "Boolean" return node.typ else: logging.error("typecheck failed: equality between", t1, t2) sys.exit(42)
def get_parse_tree(tokens, filename, options): global parse_table parse_tree = parser.parse(tokens, parse_table) if parse_tree == False: logging.error("Could not parse file %s" % filename) sys.exit(42) if options.stage == 'parser': if options.include_stdlib == False or filename not in opts.stdlib_files or \ options.print_stdlib == True: print("Unweeded parse tree for %s:" % filename) parse_tree.pprint() return parse_tree
def typecheck_cast_expression(node, c, method, t_i, c_i): if node.name != 'CastExpression': logging.error('FATAL: Invalid node %s for typecheck_cast_expression' % node.name) sys.exit(1) expr_type = typecheck_expr(node[1], c, method, t_i, c_i) if (primitives.is_numeric(expr_type) and primitives.is_numeric(node[0].canon)) \ or is_assignable(expr_type, node[0].canon, c_i) \ or is_assignable(node[0].canon, expr_type, c_i): return node[0].canon else: logging.error('Cast expression of type %s into %s' % (expr_type, node[0].canon)) sys.exit(42)
def get_balance(isdn): result = None try: xml = "<request><customer>%(isdn)s</customer><customer.system>ISDN</customer.system></request>" % {"isdn": isdn} result = api_request("customer", xml) if result and not result.has_error: result.balance = None root = xml_et.fromstring(result.body) result.balance = int(float(root.find("balance").text)) except Exception as ex: logging.error("[candy api] [get balance] error: %s" % ex) return result
def typecheck_postfix(node, c, method, t_i, c_i): expected_node = 'PostfixExpression' if node.name != expected_node: logging.error("FATAL ERROR: typecheck_postfix() got", node.name) sys.exit(1) if len(node.children) == 0: logging.error("FATAL ERROR") sys.exit(1) if node[0].name == 'Name': node.typ = typecheck_name(node[0]) else: node.typ = typecheck_expr(node[0], c, method, t_i, c_i) return node.typ
def typecheck_for(node, c, method, t_i, c_i): if node.name != 'ForStatement': logging.error('FATAL ERROR: typecheck_for') sys.exit(1) # If there's no 'ForCondition', don't need to do anything. if len(node[1].children) == 0: return None else: expr_type = typecheck_expr(node[1][0], c, method, t_i, c_i) if expr_type != 'Boolean': logging.error('Type of expression for \'for\' must be a Boolean') sys.exit(42) return None
def typecheck_creation(node, c, method, t_i, c_i): expected_node = 'CreationExpression' if node.name != expected_node: logging.error("FATAL ERROR: expected", expected_node) sys.exit(1) creation_type = node[0].canon if is_array_type(creation_type): if len(node[1].children) > 1: logging.error('Too many args to array creation') sys.exit(42) if len(node[1].children) == 1: expr_type = typecheck_expr(node[1][0], c, method, t_i, c_i) if not primitives.is_numeric(expr_type): logging.error('Invalid array creation argument') sys.exit(42) node.typ = creation_type return node.typ else: cons_name = creation_type.split('.')[-1] # Check that we don't call constructor of an abstract class. if 'Abstract' in c_i[creation_type].mods: logging.error('Cannot call constructor of abstract class') sys.exit(42) arg_types = [] for arg_expr in node[1].children: arg_types.append(typecheck_expr(arg_expr, c, method, t_i, c_i)) cons_decl = name_resolve.constructor_accessable(c_i, t_i, creation_type, cons_name, arg_types, c.name, False) cons = class_hierarchy.Temp_Constructor(cons_name, arg_types) # TODO: Check that cons is not protected, and if it is, we have access # to call it. if cons_decl != None and cons in c_i[creation_type].declare: node.typ = creation_type return node.typ else: logging.error('Invalid constructor call') sys.exit(42)
def build_statement(node): if_statements = [ 'IfThenStatement', 'IfThenElseStatement', 'IfThenElseStatementNoShortIf', ] if node[0].name == 'StatementWithoutTrailingSubstatement': return build_statement_wts(node[0]) elif node[0].name in if_statements: return build_if_statement(node[0]) elif node[0].name in ['WhileStatement', 'WhileStatementNoShortIf']: return build_while_statement(node[0]) elif node[0].name in ['ForStatement', 'ForStatementNoShortIf']: return build_for_statement(node[0]) else: logging.error('AST: invalid node in build_statement()') sys.exit(1)
def __make_station(self, s): try: prefix, name, bitrate, mimetype, location, genre = File.unpack_path( s) except: logging.error("error decoding iradio path: %s\n%s", path, logging.stacktrace()) return None f = File(self) f.name = name f.info = "Bitrate: %s kb" % bitrate f.resource = location f.path = s f.mimetype = mimetype f.icon = theme.shoutcast_station.get_path() return f
def is_widening_conversion(type1, type2): if type1 == type2: return True elif type2 == 'Byte': return type1 == 'Short' or type1 == 'Int' elif type2 == 'Short': return type1 == 'Int' elif type2 == 'Char': return type1 == 'Int' elif type2 == 'Int': return type1 == 'Int' else: logging.error('FATAL ERROR: Invalid type (%s) for is_widening_conversion' % type2) sys.exit(1)
def sell_confirm(branch, isdn, tan): result = None try: xml = ( '<request><branch>%(branch)s</branch><customer system="ISDN">%(isdn)s</customer><tancode>%(tan)s</tancode></request>' % {"branch": branch, "isdn": isdn, "tan": tan} ) result = api_request("sell/confirm", xml) if result and not result.has_error: result.trans_id = None root = xml_et.fromstring(result.body) result.trans_id = root.find("transactionId").text except Exception as ex: logging.error("[candy api] [sell confirm] error: %s" % ex) return result
def __init__(self, location, dom): """ Creates a new DeviceDescription object from the given DOM tree of a device description. @since: 0.96 @param location: URL of the device location @param dom: the DOM tree of the description XML (see L{utils.MiniXML}) """ self.__dom = dom # location of the device description self.__location = location # prefix for accessing relative URLs self.__url_base = "" self.__device_type = "" self.__udn = "" self.__friendly_name = "" self.__manufacturer = "" self.__model_name = "" self.__model_number = "" self.__model_description = "" self.__presentation_url = "" self.__url_base = "" # mapping of service type to (SCPDURL, controlURL, eventSubURL) self.__services = {} # mapping of service type to a SOAP proxy self.__service_proxies = {} self.__icon_url = "" try: self.__parse_description(dom) except: import traceback traceback.print_exc() logging.error("[upnp] invalid device description:\n%s", str(dom))
def __on_receive_xml(self, data, amount, total, xml, category, query, is_toc, cb, *args): if (data == None and not xml): # error logging.error("error downloading XML data") self.__current_folder.message = "content not available" cb(None, *args) elif (not data): # finished loading #self.__cache_search_result(url, xml[0]) if (is_toc): gobject.timeout_add(0, self.__parse_toc_xml, xml[0], category, query, cb, *args) else: gobject.timeout_add(0, self.__parse_page_xml, xml[0], cb, *args) else: xml[0] += data
def _set_thumbnail(self, f, pbuf): """ Saves the given pixbuf as thumbnail for the given file. Thumbnailers may use this method for caching thumbnails. """ # always scale down large thumbnails if (pbuf.get_width() > 200 or pbuf.get_height() > 200): _PBUF.fill(0x00000000) pixbuftools.fit_pbuf(_PBUF, pbuf, 0, 0, 200, 200, True) pbuf = _PBUF #end if path = self.__get_thumbnail_path(f) try: pbuf.save(path, "jpeg") return path except: logging.error("cannot save thumbnail:\n%s", logging.stacktrace()) return ""
def parse(tokens, parse_table): #logging.info("PARSE: %s" % (tokens)) tree = None stack = [] node_stack = [] # keep nodes in a parallel stack cause its easier suck it for a in tokens: #logging.info(">>PARSE LOOP") #logging.info(" " + str(a)) #logging.info(" " + str(stack)) # Reduce production = parse_reduce(parse_table, stack, a) while production != False: children = [] # Remove RHS from stack for p in reversed(production[1]): if stack.pop()[0] != p: logging.error("PARSE ERROR: Stack did not match production ('%s')" % (p)) sys.exit(1) children.insert(0, node_stack.pop()) stack.append((production[0], None)) node_stack.append(node.Node(production[0], None, children)) #logging.info("#PARSE REDUCE : %s" % (stack)) production = parse_reduce(parse_table, stack, a) # Reject? if parse_reject(parse_table, stack + [a]): #logging.info("#FAIL : %s" % (stack + [a])) return False # Shift stack.append(a) node_stack.append(node.Node(a[0], a, [])) #logging.info("#PARSE SHIFT : %s" % (stack)) #logging.info("#NODE STACK : %s" % (node_stack)) # Accept return node.Node("ROOT", children=node_stack)
def load_bbox(depth_dir, view): """ Internal helper function trying to load bounding box information from file. """ base_filename = os.path.join(depth_dir, "%05d" % view) if os.path.exists(base_filename + ".npz"): npz_dict = np.load(base_filename + ".npz") if 'bbox' in npz_dict: crop = npz_dict['bbox'] else: crop = None else: crop = None if crop is None: crop_files = glob(base_filename + "_bbox*") if len(crop_files) == 1: crop = np.load(crop_files[0]) elif len(crop_files) > 1: error("Crop file base '%s_bbox' matches multiple files" % base_filename) return crop
def typecheck_local_var_decl(node, c, method, t_i, c_i): type_node = node.find_child('Type') init_node = node.find_child('Initializer') if type_node == None or init_node == None: logging.error('FATAL ERROR: typecheck_var_decl') sys.exit(1) # Extract type from Type node. var_type = type_node.canon initializer_type = var_type if len(init_node.children) == 1: initializer_type = typecheck_expr(init_node[0], c, method, t_i, c_i) if is_assignable(var_type, initializer_type, c_i): node.typ = var_type return node.typ else: logging.error('Invalid initializer for variable of type %s to %s' % (initializer_type, var_type)) sys.exit(42)
def get_file(self, path): try: name, location = self.__decode_path(path) except: logging.error("error decoding iradio path: %s\n%s", path, logging.stacktrace()) return None f = File(self) f.name = name f.info = location f.path = path f.resource = location if (location.endswith(".ram") or location.endswith(".rm")): f.mimetype = "application/vnd.rn-realmedia" else: f.mimetype = "audio/x-unknown" f.icon = theme.iradio_device return f
def __get_video(self, f): """ Returns the video URL. """ if (f.resource == _REGION_BLOCKED): self.emit_message(msgs.UI_ACT_SHOW_INFO, "This video is not available in your country.") return ("", "") try: fmts = self.__get_flv(f.resource) except: logging.error("could not retrieve video\n%s", logging.stacktrace()) return "" #if (not 18 in fmts): fmts.append(18) f_ids = fmts.keys() f_ids.sort(formats.comparator) # filter out incompatible formats if (platforms.MAEMO5): f_ids = [f for f in f_ids if f in _N900_FORMAT_WHITELIST] elif (platforms.MAEMO4): f_ids = [f for f in f_ids if f in _N8x0_FORMAT_WHITELIST] # retrieve high-quality version, if desired if (len(f_ids) > 1): qtype = self.__ask_for_quality(f_ids) elif (len(f_ids) == 1): qtype = f_ids[0] else: qtype = 5 print "Requested Video Quality:", qtype flv = fmts[qtype] ext = "." + formats.get_container(qtype) logging.info("found FLV: %s", flv) return flv + "&ext=" + ext
def delete_cluster(proxy): """Idempotently delete the cluster defined in [cluster_section]. Section of the configuration is 'cluster' by default. Use the cluster_section option on the cluster subcommand to specifiy a different section from the config. E.g.: $> cumulus cluster --cluster_section 'other_section' delete """ try: _id = proxy.cluster['_id'] logging.info('Deleting cluster "%s"' % proxy.cluster_name) try: del proxy.cluster logging.info('Finished deleting cluster "%s" (%s)' % (proxy.cluster_name, _id)) except RuntimeError as e: logging.error(e.message) except TypeError: logging.info('No cluster "%s" found. Skipping' % proxy.cluster_name)
def delete_profile(proxy): """Idempotently delete the profile defined in [profile_section]. Section of the configuration is 'profile' by default. Use the profile_section option on the profile subcommand to specifiy a different section from the config. E.g.: $> cumulus profile --profile_section 'other_section' delete """ try: _id = proxy.profile['_id'] logging.info('Deleting profile "%s"' % proxy.profile_name) try: del proxy.profile logging.info('Finished deleting profile "%s" (%s)' % (proxy.profile_name, _id)) except RuntimeError as e: logging.error(e.message) except TypeError: logging.info('Found no profile "%s", Skipping.' % proxy.profile_name)
def __parse_genres(self, data): """ Parses the XML list of genres. """ genres = [] try: dtree = ElementTree.parse(StringIO(data)) for i in dtree.getiterator(): if i.tag == "genre": for j,n in i.items(): if j == "name": genres.append(n) except: self.call_service(msgs.UI_ACT_SHOW_INFO, "An error occured while loading the list of genres.\n" \ "Check your internet connection and try again.") logging.error("SHOUTcast genre listing download failed:\n%s", logging.stacktrace()) genres.sort() return genres
def evaluate(self, simulations, observations, experiment_state, data_adapter): if not hasattr(experiment_state.materials, "base_weights"): error( "Calculating weight smoothness on a material representation that doesn't have weights" ) N = experiment_state.locations.get_point_count() locations = experiment_state.locations.location_vector() # Nx3 albedo = experiment_state.materials.get_brdf_parameters()[ 'diffuse'] # Nx3 bilateral_positions = torch.cat((locations / 0.1, albedo / 0.01), dim=1) # Nx6 weights = experiment_state.materials.base_weights smoothed_weights = permutohedral_filter( weights[None], bilateral_positions[None], torch.ones(1, N, dtype=torch.float, device=locations.device), False)[0] return (smoothed_weights - weights).abs()
def __load_index(self): """ Deserializes the index from file. """ try: import cPickle fd = open(_INDEX_FILE, "rb") except: return try: magic, data = cPickle.load(fd) except: logging.error(logging.stacktrace()) return finally: fd.close() # ignore the file if it isn't compatible if (magic == _MAGIC): self.__current_id, self.__entries, self.__indices = data self.__is_dirty = False
def load(self, f): self.__lyrics = "" self.__have_cover = False stopwatch = logging.stopwatch() self.__player = self.call_service(msgs.MEDIA_SVC_GET_OUTPUT) self.__player.connect_status_changed(self.__on_change_player_status) self.__player.connect_volume_changed(self.__on_change_player_volume) self.__player.connect_position_changed(self.__on_update_position) self.__player.connect_tag_discovered(self.__on_discovered_tags) self.__player.connect_error(self.__on_error) logging.profile(stopwatch, "[audioplayer] connected audio output") try: stopwatch = logging.stopwatch() self.__context_id = self.__player.load_audio(f) logging.profile(stopwatch, "[audioplayer] loaded media file: %s", f) except: logging.error("error loading media file: %s\n%s", f, logging.stacktrace()) stopwatch = logging.stopwatch() self.__current_file = f logging.profile(stopwatch, "[audioplayer] loaded track info") # load bookmarks self.__progress.set_bookmarks(media_bookmarks.get_bookmarks(f)) self.emit_message(msgs.MEDIA_EV_LOADED, self, f) t = threading.Thread(target = self.__load_track_info, args = [f]) t.setDaemon(True) gobject.idle_add(lambda *x:t.start() and False) if (self.__offscreen_buffer): self.render_buffered(self.__offscreen_buffer)
def load(self, f): self.render() self.__wait_for_dsp() self.__player = self.call_service(msgs.MEDIA_SVC_GET_OUTPUT) self.__player.connect_status_changed(self.__on_status_changed) self.__player.connect_position_changed(self.__on_update_position) self.__player.connect_volume_changed(self.__on_change_player_volume) self.__player.connect_aspect_changed( self.__on_change_player_aspect_ratio) self.__player.connect_error(self.__on_error) self.__player.set_window(self.__screen.get_xid()) # loading may fail, so we need to setup a handler that frees the DSP # semaphore after some time if (self.__load_failed_handler): gobject.source_remove(self.__load_failed_handler) self.__load_failed_handler = gobject.timeout_add( 30000, self.__on_load_failed) try: self.__progress.set_message("Loading") self.__context_id = self.__player.load_video(f) self.__current_file = f # load bookmarks self.__progress.set_bookmarks(media_bookmarks.get_bookmarks(f)) self.emit_message(msgs.MEDIA_EV_LOADED, self, f) except: self.__progress.set_message("Error") logging.error("[videoplayer] error loading media file: %s\n%s", f, logging.stacktrace())
def handle_HTTPSERVER_SVC_BIND(self, owner, addr, port): if ((addr, port) in self.__listeners): return "address already in use" try: sock = socket.socket(socket.AF_INET) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((addr, port)) sock.listen(1) except: logging.error("[httpserv] error binding to %s:%d\n%s", addr, port, logging.stacktrace()) return "could not bind to address" iowatch = gobject.io_add_watch(sock, gobject.IO_IN, self.__on_new_client, owner) self.__listeners[(addr, port)] = _Listener(owner, sock, iowatch) logging.info("[httpserv] bound to TCP %s:%d", addr, port) return ""
def _get_info_set(self, criterion=lambda x: True): """ Format the image indices and related lighting information as prescribed by general_settings.batch_size. Outputs: indices, light_infos python lists containing, in parallel: batch_indices (long) torch.tensor containing indices into the observation list batch_light_infos (long) torch.tensor containing information required for the lighting model Typically indices into the light list """ device = torch.device(general_settings.device_name) training_indices = [] training_light_infos = [] for image_index, image in enumerate(self.images): if criterion(image): training_indices.append(image_index) training_light_infos.append(image.light_info) training_indices = torch.tensor(training_indices, dtype=torch.long, device=device) training_light_infos = torch.tensor(training_light_infos, dtype=torch.long, device=device) if training_light_infos.min() < 0: error("Trying to reconstruct an image without a light source.") batch_size = general_settings.batch_size training_indices = [ training_indices[i0:min(i0 + batch_size, len(training_indices))] for i0 in range(0, len(training_indices), batch_size) ] training_light_infos = [ training_light_infos[i0:min(i0 + batch_size, len(training_light_infos))] for i0 in range(0, len(training_light_infos), batch_size) ] return training_indices, training_light_infos
def higo_baseline(experiment_state, data_adapter, cache_path, higo_settings): """ Reimplementation of the Higo et al. 2009 paper Tomoaki Higo, Yasuyuki Matsushita, Neel Joshi, and Katsushi Ikeuchi A hand-held photometric stereo camera for 3-d modeling ICCV2009 Uses the PyMaxFlow library for graphcut problem: http://pmneila.github.io/PyMaxflow/. This library is installed as a python package by the installEnv.sh script. """ if not isinstance(experiment_state.locations, DepthMapParametrization): error("Higo et al. 2009 requires a depth map parametrization.") os.makedirs(cache_path, exist_ok=True) device = torch.device(general_settings.device_name) with torch.no_grad(): step_size = higo_settings['step_size'] step_radius = higo_settings['step_radius'] depth_range = step_size * step_radius # 2.5cm nr_steps = 2 * step_radius + 1 eta = higo_settings['eta'] * general_settings.intensity_scale lambda_n = higo_settings['lambda_n'] lambda_s = higo_settings['lambda_s'] * step_size * 1000 lambda_1 = higo_settings['lambda_1'] lambda_2 = higo_settings['lambda_2'] surface_constraint_threshold = 0.005 # 5mm surface_constraint_threshold = surface_constraint_threshold / ( depth_range / nr_steps) surface_constraint_penalization = 0.010 # 1cm ## 1) calculate the photometric loss volume, and the depth/normal hypothesis volume N_pixels = experiment_state.locations.get_point_count() photo_loss_volume = torch.zeros(N_pixels, nr_steps).to(device) depth_volume = torch.zeros(N_pixels, nr_steps, 1).to(device) normal_volume = torch.zeros(N_pixels, nr_steps, 3).to(device) diffuse_volume = torch.zeros(N_pixels, nr_steps, 3).to(device) # we need multiples of the step size for the graph cut later on initial_depth = experiment_state.locations.implied_depth_image().clone( ) initial_depth.div_(step_size).round_().mul_(step_size) for offset_idx in tqdm( range(nr_steps), desc="Solving all RANSAC problems (photometric loss volume)"): depth_offset = -depth_range + offset_idx * step_size cache_file = os.path.join(cache_path, "%8.6f.npz" % depth_offset) depth = initial_depth + depth_offset if os.path.exists(cache_file): cached = np.load(cache_file) normals = to_torch(cached['normals']) inliers_N = to_torch(cached['inliers_N']) inlier_photometric_error = to_torch( cached['inlier_photometric_error']) albedo = to_torch(cached['diffuse']) else: spoofed_experiment_state = ExperimentState.copy( experiment_state) spoofed_experiment_state.locations = DepthMapParametrization() spoofed_experiment_state.locations.initialize( depth, experiment_state.locations.mask, experiment_state.locations.invK, experiment_state.locations.invRt, ) normals, albedo, inliers, residuals = closed_form_lambertian_solution( spoofed_experiment_state, data_adapter, sample_radius=0, shadows_occlusions=False, verbose=False) inlier_photometric_error = (residuals * inliers).abs().sum( dim=1).sum(dim=1) inliers_N = inliers.squeeze().sum(dim=1) np.savez_compressed( cache_file, normals=to_numpy(normals), diffuse=to_numpy(albedo), inliers_N=to_numpy(inliers_N), inlier_photometric_error=to_numpy( inlier_photometric_error), ) depth_volume[:, offset_idx, 0] = experiment_state.locations.create_vector(depth) normal_volume[:, offset_idx] = normals diffuse_volume[:, offset_idx] = albedo.squeeze() photo_loss_volume[:, offset_idx] = eta * inlier_photometric_error / inliers_N - inliers_N # precalculation of neighbour relationships mask = experiment_state.locations.mask py, px = torch.meshgrid( [torch.arange(0, mask.shape[0]), torch.arange(0, mask.shape[1])]) pixels = torch.stack(( px[mask.squeeze()], py[mask.squeeze()], ), dim=0).to(device) indices = torch.zeros(*mask.shape[:2], 1).long().to(device) - 1 indices[mask] = torch.arange(N_pixels).to(device)[:, None] indices = torch.nn.functional.pad(indices, pad=(0, 0, 1, 1, 1, 1), value=-1) neighbours = [] for offset in [[-1, 0], [1, 0], [0, -1], [0, 1]]: offset_pixels = pixels + 1 # because of the padding for c in range(2): offset_pixels[c, :] += offset[c] offset_linidces = offset_pixels[ 0, :] + offset_pixels[1, :] * indices.shape[1] neighbours.append(indices.flatten()[offset_linidces]) neighbours = torch.stack(neighbours, dim=1) surface_constrain_cachefile = os.path.join( cache_path, "surface_normal_constraint.npz") ## 2) calculate the surface normal constraint loss volume: if not os.path.exists(surface_constrain_cachefile): # we add in a nonsense 'neighbour' that will never be able to win, for implementational cleanliness surface_constraint_volume = torch.zeros(N_pixels, nr_steps).to(device) neighbours_n = neighbours.clone() neighbours_n[neighbours_n < 0] = N_pixels depth_volume_n = torch.cat( (depth_volume, torch.zeros(1, nr_steps, 1).to(device))) normal_volume_n = torch.cat( (normal_volume, torch.ones(1, nr_steps, 3).to(device))) pixel_locs = torch.cat( (pixels.float(), torch.ones(1, pixels.shape[1]).to(device)), dim=0) pixel_locs_n = torch.cat( (pixel_locs, torch.zeros(pixel_locs.shape[0], 1).to(device)), dim=1) for offset_idx in tqdm( range(nr_steps), desc="Generating the surface constraint loss volume"): hypothesis_points = experiment_state.locations.invK @ ( pixel_locs * depth_volume[None, :, offset_idx, 0]) hypothesis_normals = normal_volume[:, offset_idx].transpose(0, 1) for n_idx in range(4): these_neighbours = neighbours_n[:, n_idx] n_pixel_locs = pixel_locs_n[:, these_neighbours] best_label_points = torch.zeros(3, N_pixels).to(device) best_label_normals = torch.zeros(3, N_pixels).to(device) best_label_offsets = torch.zeros(N_pixels).to(device) best_label_pdists = torch.zeros(N_pixels).to( device) + np.inf for n_offset_idx in range(nr_steps): n_hypothesis_points = experiment_state.locations.invK @ ( n_pixel_locs * depth_volume_n[None, these_neighbours, n_offset_idx, 0]) n_hypothesis_normals = normal_volume_n[ these_neighbours, n_offset_idx].transpose(0, 1) n_hypothesis_pdists = ( hypothesis_normals * (hypothesis_points - n_hypothesis_points)).abs().sum(dim=0) better_matches = n_hypothesis_pdists < best_label_pdists best_label_offsets[better_matches] = n_offset_idx best_label_pdists[ better_matches] = n_hypothesis_pdists[ better_matches] best_label_points[:, better_matches] = n_hypothesis_points[:, better_matches] best_label_normals[:, better_matches] = n_hypothesis_normals[:, better_matches] hypothesis_ldists = (best_label_offsets - offset_idx).abs() valid_best_labels = hypothesis_ldists < surface_constraint_threshold hypothesis_pdists = ( best_label_normals * (hypothesis_points - best_label_points)).abs().sum( dim=0) # we don't have parallel depth planes, however! surface_constraint_volume[ valid_best_labels, offset_idx] += hypothesis_pdists[valid_best_labels] surface_constraint_volume[ valid_best_labels == False, offset_idx] = surface_constraint_penalization np.savez_compressed( surface_constrain_cachefile, surface_constraint_volume=to_numpy(surface_constraint_volume), ) else: cached = np.load(surface_constrain_cachefile) surface_constraint_volume = to_torch( cached['surface_constraint_volume']) # at this point we can calculate the unary result, i.e. without TV-1 depth smoothness unary_loss_volume = photo_loss_volume + lambda_n * surface_constraint_volume winners_unary = torch.argmin(unary_loss_volume, dim=1) graphcut_cache_file = os.path.join(cache_path, "higo_graphcut.npz") if not os.path.exists(graphcut_cache_file): # 3) Graph-cut magic. TV-1 optimization on the depth # because we now have discretized depth values, there is only a finite number of labels to optimize over. # As such, we can use the graph construction from "Stereo Without Epipolar Lines: A Maximum-Flow Formulation" depth_min = depth_volume.min() depth_max = depth_volume.max() n_hyps = round((depth_max - depth_min).item() / step_size) + 1 depth_hypotheses = depth_min + ( depth_max - depth_min ) * torch.arange(n_hyps).float().to(device) / (n_hyps - 1) depth_hypotheses.div_(step_size).round_().mul_(step_size) # make it amenable to graphcut optimization, i.e. all positive values safe_unary_loss_volume = unary_loss_volume.clone() safe_unary_loss_volume = safe_unary_loss_volume - ( safe_unary_loss_volume.min() - 1) # a value definitely higher than the optimal solution's loss cost_upper_bound = safe_unary_loss_volume.sum( dim=0).sum().item() + 1 # create the bigger volume of unary weights # because of the way graphcut imposes smoothness cost this is the easiest to implement full_unary_loss_volume = torch.zeros( len(unary_loss_volume), n_hyps).to(device) + cost_upper_bound for step in range(nr_steps): # fill in these unary losses in the correct position in the full volume full_idces = ((depth_volume[:, step] - depth_min) / step_size).round().long() full_values = safe_unary_loss_volume[:, step, None] full_unary_loss_volume.scatter_(dim=1, index=full_idces, src=full_values) full_offsets = (depth_volume[:, 0, 0] - depth_min).div_(step_size).round_() import maxflow graph = maxflow.GraphFloat() node_ids = graph.add_grid_nodes((N_pixels, n_hyps + 1)) for hyp in tqdm( range(1, n_hyps + 1), desc="Building optimization graph - unary weights"): nodepairs = node_ids[:, hyp - 1:hyp + 1] edgeweights = to_numpy(full_unary_loss_volume[:, hyp - 1:hyp]) graph.add_grid_edges(nodepairs, weights=edgeweights, structure=np.array([[0, 0, 0], [0, 0, 1], [0, 0, 0]]), symmetric=1) # build terminal edges for x in tqdm(range(N_pixels), desc="Building optimization graph - terminal edges"): graph.add_tedge(node_ids[x, 0], cost_upper_bound, 0) graph.add_tedge(node_ids[x, n_hyps], 0, cost_upper_bound) # debug test: not including the smoothness loss *should* mean that we get exactly the unary winners # print("Starting unary maxflow calculation...") # tic = time.time() # unary_max_flow = graph.maxflow() # print("Finished in %ss" % (time.time() - tic)) # unary_min_cut = graph.get_grid_segments(node_ids) # winners_unary_test = np.nonzero(unary_min_cut[:,1:] != unary_min_cut[:,:-1])[1] # assert np.all(winners_unary_test == to_numpy(winners_unary) + to_numpy(full_offsets)), "Issue building the graph: unary solution does not match unary WTA" no_neighbour_node = graph.add_nodes(1) neighbours_g = to_numpy(neighbours) neighbours_g[neighbours_g < 0] = len(neighbours) node_ids_g = np.concatenate( (node_ids, np.ones((1, n_hyps + 1), dtype=node_ids.dtype) * no_neighbour_node), axis=0) for n_idx in range(4): neighbour_ids = np.take(node_ids_g[:, :-1], indices=neighbours_g[:, n_idx], axis=0) nodepairs = np.stack((node_ids[:, :-1], neighbour_ids), axis=2) edgeweights = lambda_s candidates = nodepairs[:, :, 1] != no_neighbour_node candidates = to_numpy((depth_volume[:, 0] - step_size * 3 <= depth_hypotheses[None]) * (depth_volume[:, -1] + step_size * 3 >= depth_hypotheses[None])) * candidates graph.add_grid_edges(nodepairs[candidates].reshape(-1, 2), weights=edgeweights, structure=np.array([[0, 0, 0], [0, 0, 1], [0, 0, 0]]), symmetric=0) print("Starting full maxflow calculation...") tic = time.time() max_flow = graph.maxflow() print("Finished in %ss" % (time.time() - tic)) min_cut = graph.get_grid_segments(node_ids) nonzeroes = np.nonzero(min_cut[:, 1:] != min_cut[:, :-1]) unique_nonzeroes = np.unique(nonzeroes[0], return_index=True) winners_graphcut = nonzeroes[1][unique_nonzeroes[1]] winners_graphcut = winners_graphcut - to_numpy(full_offsets) np.savez_compressed( graphcut_cache_file, winners_graphcut=winners_graphcut, ) else: cached = np.load(graphcut_cache_file) winners_graphcut = cached['winners_graphcut'] winners_graphcut = to_torch(winners_graphcut).long() #4) Depth refinement step def tangent_vectors(depth_image, invK, inv_extrinsics=None): """ Given HxWx1 depth map, return quick-and-dirty the HxWx3 LR and UP tangents Takes the weighted left-right and up-down neighbour points as spanning the local plane. """ assert len(depth_image.shape) == 3, "Depth map should be H x W x 1" assert depth_image.shape[2] == 1, "Depth map should be H x W x 1" H = depth_image.shape[0] W = depth_image.shape[1] data_shape = list(depth_image.shape) world_coords = depth_map_to_locations(depth_image, invK, inv_extrinsics) depth1 = depth_image[:-2, 1:-1] * depth_image[ 1:-1, 1:-1] * depth_image[2:, 1:-1] * depth_image[ 1:-1, :-2] * depth_image[1:-1, 2:] depth2 = depth_image[:-2, :-2] * depth_image[:-2, 2:] * depth_image[ 2:, :-2] * depth_image[2:, 2:] depth_mask = (depth1 * depth2 == 0).float() ud_vectors = (world_coords[:-2,1:-1,:] * 2 + world_coords[:-2,0:-2,:] + world_coords[:-2,2:,:]) \ - (world_coords[2:,1:-1,:] * 2 + world_coords[2:,0:-2,:] + world_coords[2:,2:,:]) ud_vectors = ud_vectors / (depth_mask + ud_vectors.norm(dim=2, keepdim=True)) lr_vectors = (world_coords[1:-1,:-2,:] * 2 + world_coords[0:-2,:-2,:] + world_coords[2:,:-2,:]) \ - (world_coords[1:-1,2:,:] * 2 + world_coords[0:-2,2:,:] + world_coords[2:,2:,:]) lr_vectors = lr_vectors / (depth_mask + lr_vectors.norm(dim=2, keepdim=True)) repad = lambda x: torch.nn.functional.pad(x, pad=(0, 0, 1, 1, 1, 1)) return repad(lr_vectors), repad(ud_vectors), repad( (depth_mask == 0).float()) def get_laplacian(depth_image, mask): kernel = to_torch( np.array([ [-0.25, -0.50, -0.25], [-0.50, 3.00, -0.50], [-0.25, -0.50, -0.25], ])).float() laplacian = torch.nn.functional.conv2d( depth_image[None, None, :, :, 0], kernel[None, None])[0, 0, :, :, None] laplacian_mask = torch.nn.functional.conv2d( mask[None, None, :, :, 0].float(), torch.ones_like(kernel)[None, None])[0, 0, :, :, None] == 9 repad = lambda x: torch.nn.functional.pad(x, pad=(0, 0, 1, 1, 1, 1)) return repad(laplacian * laplacian_mask.float()) depth_estimate = torch.gather(depth_volume, dim=1, index=winners_graphcut[:, None, None]) depth_estimate.requires_grad_(True) center_mask = mask.view(*mask.shape[:2], 1) with torch.no_grad(): normal_estimate = torch.gather(normal_volume, dim=1, index=winners_graphcut[:, None, None].expand( -1, -1, 3)) normal_image = torch.zeros(*center_mask.shape[:2], 3).to(device) normal_image.masked_scatter_(center_mask, normal_estimate) depth_estimate_initial = depth_estimate.clone() depth_image_initial = torch.zeros(*center_mask.shape[:2], 1).to(device) depth_image_initial.masked_scatter_(center_mask, depth_estimate_initial) pixel_locs = torch.cat( (pixels.float(), torch.ones(1, pixels.shape[1]).to(device)), dim=0) loop = tqdm(range(1000), desc="Depth refinement") loss_evolution = [] optimizer = torch.optim.Adam([depth_estimate], eps=1e-5, lr=0.0001, betas=[0.9, 0.99]) for iteration in loop: # term 1: position error initial_points = experiment_state.locations.invK @ ( pixel_locs * depth_estimate_initial.view(1, -1)) current_points = experiment_state.locations.invK @ ( pixel_locs * depth_estimate.view(1, -1)) position_diff = (initial_points - current_points).abs() position_error = (position_diff**2).sum() # term 2: the normal error depth_image = torch.zeros(*center_mask.shape[:2], 1).to(device) depth_image.masked_scatter_(center_mask, depth_estimate) lr_img, ud_img, mask_img = tangent_vectors( depth_image, experiment_state.locations.invK, experiment_state.locations.invRt) lr = lr_img[center_mask.expand_as(lr_img)].view(-1, 1, 3) ud = ud_img[center_mask.expand_as(ud_img)].view(-1, 1, 3) mask = mask_img[center_mask].view(-1, 1) normal_error = (mask * (((lr * normal_estimate).sum(dim=2)**2) + ((ud * normal_estimate).sum(dim=2)**2))).sum() # term 3: smoothness constraint laplacian = get_laplacian(depth_image, center_mask) laplacian_masked = laplacian # * (laplacian.abs() < 5 * step_size).float() smoothness_constraint = laplacian_masked.abs().sum() # the backprop total_loss = 1e5 * position_error + 10 * normal_error + 3000 * smoothness_constraint loss_evolution.append(total_loss.item()) loop.set_description("Depth refinement | loss %8.6f" % loss_evolution[-1]) optimizer.zero_grad() total_loss.backward() optimizer.step() plt.clf() plt.plot(loss_evolution) plt.savefig(os.path.join(cache_path, "higo_refinement_loss.png")) plt.close() # now return a new experiment_state new_experiment_state = ExperimentState.copy(experiment_state) new_experiment_state.locations = DepthMapParametrization() new_experiment_state.locations.initialize( experiment_state.locations.create_image(depth_estimate[:, 0]).squeeze(), experiment_state.locations.mask, experiment_state.locations.invK, experiment_state.locations.invRt, ) winner_normals = torch.gather(normal_volume, dim=1, index=winners_graphcut[:, None, None].expand( -1, -1, 3)).squeeze() winner_diffuse = torch.gather(diffuse_volume, dim=1, index=winners_graphcut[:, None, None].expand( -1, -1, 3)).squeeze() new_experiment_state.normals = experiment_state.normals.__class__( new_experiment_state.locations) new_experiment_state.normals.initialize(winner_normals) new_experiment_state.materials = experiment_state.materials.__class__( new_experiment_state.brdf) new_experiment_state.materials.initialize(winner_diffuse.shape[0], winner_diffuse, winner_diffuse.device) new_experiment_state.materials.brdf_parameters['specular'][ 'albedo'].data.zero_() new_experiment_state.materials.brdf_parameters['specular'][ 'roughness'].data.fill_(0.1) return new_experiment_state
async def on_message(self, message): try: def check(m): flag = False m_split = m.content.translate( {ord(i): None for i in '",.?!;:-_`*\''}).upper().split(" ") banned_words = [ "NIGGER", "N***A", "AUTISTIC", "RETARD", "RETARDED", "AIDS", "SPASTIC", "SPAZ", "RETARDS" ] for word in banned_words: if word in m_split: flag = True #print(m.content) return flag if check(message): await message.delete() return 0 message.content = message.content.replace( "@!", "@" ) #crappy fix for the mention inconsistency between platforms if len(message.content) == 0: return if isinstance(message.channel, discord.DMChannel): log.info("Ignoring Direct Message From %s: %s" % (str(message.author), str(message.content))) return mSplit = message.content.split() mList = [] for word in mSplit: user = discord.utils.get(message.guild.members, mention=word.replace("@", "@!").replace( "!!", "!")) if not user is None: if not user.nick is None: word = word.replace("@", "@!").replace("!!", "!") mList.append(word) message.content = " ".join(mList) if message.content.upper().startswith("V!UPDATE"): await message.channel.send("Syncing DB") for member in message.guild.members: insert_db_user(member) await message.channel.send("Completed DB Sync") args = message.content.split(" ") if message.content[0] == "$": message.content = message.content.replace("$", "!tag ") await self.client.process_commands(message) if args[0].lower() in self.client.disabled_commands: await error("[423] This command is currently disabled", message.channel) return False channel = message.channel if message.content.upper().startswith("!ENTER"): if "Manager" in [role.name for role in message.author.roles ] and self.client.raffles: embed = discord.Embed( title="Raffle", description= "Sorry %s, you are not allowed to enter raffles." % (message.author.mention), color=colour.reds) await message.channel.send(embed=embed) elif self.client.raffles and not message.author.name in self.client.enteries: self.client.enteries.append(message.author.name) embed = discord.Embed( title="Raffle", description="**%s** has been entered!" % (message.author.name), color=0x00ff73) await message.channel.send(embed=embed) elif self.client.raffles: embed = discord.Embed( title="Raffle", description= "Hey %s! You can only enter into the same raffle once!" % (message.author.mention), color=colour.reds) await message.channel.send(embed=embed) if message.content.upper().startswith("!VOTE"): if self.client.polls and not message.author.name in self.client.polls_enteries: args = message.content.split(" ") try: choice = int(args[1]) self.client.polls_votes[choice - 1] += 1 except (IndexError, TypeError, ValueError): embed = discord.Embed( title="Poll", description="Hey %s! That is not a valid option!" % (message.author.mention), color=colour.reds) await message.channel.send(embed=embed) return self.client.polls_enteries.append(message.author.name) embed = discord.Embed( title="Poll", description="**%s** you have voted for %s" % (message.author.name, self.client.polls_options[choice - 1]), color=0x00ff73) await message.channel.send(embed=embed) elif self.client.polls: embed = discord.Embed( title="Poll", description="Hey %s! You can only vote once" % (message.author.mention), color=colour.reds) await message.channel.send(embed=embed) if message.content.upper().startswith("!SUDO"): if message.author.id == 345514405775147023: args = message.content.split() target = discord.utils.get(message.guild.members, mention=args[1]) channel = self.client.get_channel(int(args[2])) contents = " ".join(args[3:]) message.content = contents message.author = target message.channel = channel await self.client.process_commands(message) if message.guild == None: return words = message.content.split() if not str( message.author.id ) in self.client.ignore_list and not str( message.channel.id ) in self.client.ignore_list and not message.author.id in self.client.cooldown and len( words) > 4 and len(message.content) > 16: multiplier = sql.db_query( "SELECT Level FROM Members WHERE UserID = 1")[0][0] if "Supporter" in [role.name for role in message.author.roles]: multiplier = multiplier + 0.5 bal = sql.db_query( "SELECT Balance FROM Members WHERE UserID = %s" % (str(message.author.id)))[0][0] if not "Manager" in [ role.name for role in message.author.roles ]: currentWeeklyPoints = sql.db_query( "SELECT weeklyActivity from Members WHERE UserID = %s" % (str(message.author.id)))[0][0] newWeeklyPoints = currentWeeklyPoints + 1 sql.execute_query( "UPDATE Members set weeklyActivity = %s WHERE UserID = %s" % (str(newWeeklyPoints), str(message.author.id))) level = get_profile(message.author.id)[1] self.client.cooldown.append(message.author.id) exp_add = int(round(random.randint(15, 25) * multiplier, 0)) add_exp(message.author.id, exp_add) channel = self.client.get_channel(547120498568331267) await check_level_up(message.author.id, message.guild, message.channel) await asyncio.sleep(60) try: self.client.cooldown.remove(message.author.id) except ValueError: #some people aren't always on cooldown pass def check(m): flag = False m_split = m.content.translate( {ord(i): None for i in '",.?!;:-_`*\''}).upper().split(" ") banned_words = [ "F**K", "F*****G", "DICK", "BOLLOCK", "F***S", "AIDS", "BOLLOCKS", "F****D", "W***E", "BASTARD", "SHIT", "S******G", "C**T", "WANKER", "BASTARD", "BELLEND", "N***A", "NIGGER", "PISS", "PISSING", "C**T", "CUNTS", "WANKERS", "RETARDS", "RETARD", "RETARDED", "F****T", "FKING", "FK", "CRAP", "PUSSY", "PENIS", "C**K" ] for word in banned_words: if word in m_split: flag = True return flag except Exception as e: log.error("Error Processing Message From %s - Error: %s" % (str(message.author), str(e)))
def initialize(self, data_adapter, initialization_settings): device = torch.device(general_settings.device_name) self.locations.initialize(data_adapter.center_depth, data_adapter.center_depth > 0, data_adapter.center_invK, data_adapter.center_invRt) self.observation_poses.initialize( torch.stack([ image.original_extrinsics[:3, :3] for image in data_adapter.images ]).to(device), torch.stack([ image.original_extrinsics[:3, 3:] for image in data_adapter.images ]).to(device), ) if initialization_settings['lights'] == "precalibrated": with open( initialization_settings['light_calibration_files'] ['positions'], "rb") as fh: light_positions = pickle.load(fh)[0] light_intensities = np.load( initialization_settings['light_calibration_files'] ['intensities']) light_attenuations = np.load( initialization_settings['light_calibration_files'] ['attenuations']) self.light_parametrization.initialize( torch.tensor(light_positions, dtype=torch.float32, device=device), torch.tensor(light_intensities, dtype=torch.float32, device=device), torch.tensor(light_attenuations, dtype=torch.float32, device=device), ) else: error("Only precalibrated lights are supported.") if any([ "closed_form" in initialization_settings[entry] for entry in initialization_settings ]): closed_form_normals, closed_form_diffuse, _, _ = closed_form_lambertian_solution( self, data_adapter) points = self.locations.location_vector() closed_form_normals = permutohedral_filter( closed_form_normals[None], points[None] / 0.003, torch.ones(points.shape[0]).to(points.device), False)[0] if initialization_settings['diffuse'] == "from_closed_form": self.materials.initialize(self.locations.get_point_count(), closed_form_diffuse, self.locations.device()) else: error("Only closed-form diffuse initialization is supported.") if initialization_settings['specular'] == "hardcoded": self.materials.brdf_parameters['specular']['albedo'].data.fill_( 0.5) roughness = self.materials.brdf_parameters['specular']['roughness'] roughness.data[:] = ( 0.1 + 0.3 * torch.arange(start=0, end=roughness.shape[0], step=1).view(-1, 1) / roughness.shape[0]) if 'eta' in self.materials.brdf_parameters['specular']: self.materials.brdf_parameters['specular']['eta'].data.fill_( 1.5) else: error("Only hardcoded specular initialization is supported.") if initialization_settings['normals'] == "from_closed_form": self.normals.initialize(closed_form_normals) elif initialization_settings['normals'] == "from_depth": self.normals.initialize(self.locations.implied_normal_vector())
def on_resource_error(err, sender): logging.error(f"{err}")
def __parse_stations(self, data, genre): """ Parses the list of stations. """ stations = [] next_page_url = "" soup = BeautifulSoup(data) resulttable = soup.find("div", {"id": "content"}) if (resulttable): for entry in resulttable.findAll("tr"): #print entry station = File(self) try: station.name = entry.find("span", { "class": "name" }).a.contents[0] except: continue try: now_playing = entry.find("p", { "class": "stream-onair" }).contents[1] except: now_playing = "" station.resource = _ICECAST_BASE + entry.find( "td", { "class": "tune-in" }).find("a")["href"] try: bitrate = entry.find("td", { "class": "tune-in" }).findAll("p", {"class": "format"})[1]["title"] except: bitrate = "-" try: typename = entry.find("a", { "class": "no-link" }).contents[0].strip() except: typename = "" if (typename == "MP3"): station.mimetype = "audio/mpeg" elif (typename == "AAC+"): station.mimetype = "audio/mp4" else: station.mimetype = "audio/x-unknown" station.path = "/" + genre + "/" + \ self.__encode_station(station.name, bitrate, station.mimetype, station.resource, genre) station.info = "Bitrate: %s\n" \ "Now playing: %s" % (bitrate, now_playing) station.icon = theme.icecast_station.get_path() stations.append(station) #end for pager_tag = resulttable.find("ul", {"class": "pager"}) if (pager_tag): link = pager_tag.findAll("a")[-1] if (not link.contents[0].isdigit()): # must be an arrow next_page_url = link["href"] #end if #end if #end if if (not stations): self.__current_folder.message = "station list not available" logging.error("icecast station listing download failed\n%s", logging.stacktrace()) return (stations, next_page_url)
def on_error(self, status): logging.error(status)
def optimize(experiment_state, data_adapter, optimization_settings, output_path_structure=None): """ Optimize the current experiment_state, given the observations in data_adapter and the optimization_settings. Optionally saves optimization progress plots to the output_path_structure, which is formatted as output_path_structure % plot_name. """ parameter_dictionary, learning_rate_dictionary, visualizer_dictionary = experiment_state.get_parameter_dictionaries( ) device = torch.device(general_settings.device_name) parameter_groups = [] for parameter in optimization_settings['parameters']: if not parameter in parameter_dictionary: error( "Cannot optimize over %s, as it is not part of the chosen parametrization." % parameter) parameter_groups.append({ 'params': parameter_dictionary[parameter], 'lr': learning_rate_dictionary[parameter], }) for parameter in optimization_settings['parameters']: if not parameter in parameter_dictionary: error( "Cannot optimize over %s, as it is not part of the chosen parametrization." % parameter) parameters = parameter_dictionary[parameter] if not isinstance(parameters, list): parameters = [parameters] [ error("cannot optimize over '%s[%d]': not a leaf variable." % (parameter, idx)) for idx, x in enumerate(parameters) if not x.is_leaf ] optimizer = torch.optim.Adam(parameter_groups, betas=[0.9, 0.9], eps=1e-3) iterations = optimization_settings['iterations'] scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer, milestones=[iterations / 2, iterations / 4 * 3], gamma=0.1) losses = [] for loss_name, weight in optimization_settings['losses'].items(): losses.append([loss_name, LossFunctionFactory(loss_name)(), weight]) shadow_cache = {} occlusion_cache = {} loss_evolutions = {'Total': []} loss_evolutions.update( dict([(losses[loss_idx][0], []) for loss_idx in range(len(losses))])) parameter_evolutions = defaultdict(lambda: []) if optimization_settings['target_set'] == "training": training_indices_batches, training_light_infos_batches = data_adapter.get_training_info( ) elif optimization_settings['target_set'] == "testing": training_indices_batches, training_light_infos_batches = data_adapter.get_testing_info( ) if any([ x != "observation_poses" for x in optimization_settings['parameters'] ]): log("Warning: optimizing non-pose parameters over the testing set." ) else: error( "optimization_settings['target_set'] should be one of ('training', 'testing')" ) total_training_views = sum([ len(training_indices) for training_indices in training_indices_batches ]) ctr_index = data_adapter.get_center_index() optimization_loop = tqdm(range(iterations)) for iteration in optimization_loop: optimizer.zero_grad() iteration_losses = defaultdict(lambda: 0) for training_indices, training_light_infos in zip( training_indices_batches, training_light_infos_batches): simulations = experiment_state.simulate(training_indices, training_light_infos, shadow_cache=shadow_cache) observations = experiment_state.extract_observations( data_adapter, training_indices, occlusion_cache=occlusion_cache) total_loss = 0.0 for loss_index in range(len(losses)): loss_name, loss_fcn, loss_weight = losses[loss_index] this_loss = loss_fcn.evaluate( simulations, observations, experiment_state, data_adapter).sum() * loss_weight * len( training_indices) / total_training_views total_loss += this_loss iteration_losses[loss_name] += this_loss.item() total_loss.backward() iteration_losses["Total"] += total_loss.item() experiment_state.clear_parametrization_caches() del simulations, observations, total_loss for loss in iteration_losses: loss_evolutions[loss].append(iteration_losses[loss]) if ctr_index is not None: for parameter in parameter_dictionary['observation_poses']: if parameter.grad is not None: parameter.grad[ctr_index].zero_() optimizer.step() experiment_state.enforce_parameter_bounds() if "photoconsistency L1" in loss_evolutions: desc_prefix = "Photometric L1 loss: %8.4f " % iteration_losses[ "photoconsistency L1"] else: desc_prefix = "" optimization_loop.set_description(desc_prefix + "Total loss: %8.4f" % iteration_losses["Total"]) with torch.no_grad(): for parameter in optimization_settings['parameters']: if visualizer_dictionary[parameter] is None: continue visualized = visualizer_dictionary[parameter]( parameter_dictionary[parameter]) if isinstance(visualized, dict): for x in visualized: parameter_evolutions[x].append(visualized[x].view( 1, -1)) else: parameter_evolutions[parameter].append( visualized.view(1, -1)) if ( iteration + 1 ) % general_settings.evolution_plot_frequency == 0 and output_path_structure is not None: plt.figure("Losses") plt.clf() loss_names = [] loss_values = [] for loss_name in loss_evolutions: loss_values.append(loss_evolutions[loss_name]) loss_names.append(loss_name) xvalues = np.arange(1, len(loss_values[0]) + 1).reshape( len(loss_values[0]), 1) plt.semilogy(xvalues.repeat(len(loss_names), 1), np.array(loss_values).T) plt.ylim([0.9e3, 1.1e6]) plt.legend(loss_names) plt.savefig(output_path_structure % "loss") for parameter in parameter_evolutions: plt.figure(parameter) plt.clf() plt.plot( xvalues.repeat( parameter_evolutions[parameter][0].shape[1], 1), torch.cat(parameter_evolutions[parameter], dim=0).cpu().numpy(), ) plt.savefig(output_path_structure % parameter) scheduler.step() plt.close("Losses") for parameter in parameter_evolutions: plt.close(parameter)
def DataAdapterFactory(name): if name == "XIMEA": return XimeaAdapter else: error("Data type '%s' is not known.")
def __init__(self, brdf_parametrization=None): super().__init__() if brdf_parametrization is None: error("MaterialParametrization should get created with a BrdfParametrization") self.brdf_parametrization = brdf_parametrization
def handle_COM_EV_APP_STARTED(self): if (self.__outputs): self.__current_output = self.__outputs[0] else: logging.error("no output device available")
async def connect( ) -> None: # ran before server startup, used to do things like connecting to mysql :D info(f"Asahi v{glob.version} starting") glob.web = ClientSession() # aiohttp session for external web requests from lists.players import PlayerList glob.players = PlayerList() # init player list try: glob.db = await fatFawkSQL.connect(**glob.config.sql ) # connect to db using config :p debug("Asahi connected to MySQL") except Exception: error(f"Asahi failed to connect to MySQL\n\n{traceback.format_exc()}") raise SystemExit(1) try: glob.redis = await aioredis.create_redis_pool( f"redis://{glob.config.redis['host']}", db=glob.config.redis["db"], password=glob.config.redis["password"] or None, ) debug("Asahi connected to Redis") except Exception: error(f"Asahi failed to connect to Redis\n\n{traceback.format_exc()}") raise SystemExit(1) from objects.player import Player botinfo = await glob.db.fetchrow( "SELECT name, pw, country, name FROM users WHERE id = 1", ) if not botinfo: error( "Bot account not found. " "Please insert the bot account with user ID 1 and start Asahi again" ) raise SystemExit(1) glob.bot = Player( id=1, name=botinfo["name"], offset=1, country_iso=botinfo["country"], country=country_codes[botinfo["country"].upper()], ) await glob.bot.set_stats() glob.players.append(glob.bot) debug(f"Added bot {glob.bot.name} to player list") async for ach_row in glob.db.iter("SELECT * FROM achievements"): ach_row["cond"] = eval(f'lambda s: {ach_row["cond"]}') ach_row["desc"] = ach_row.pop("descr") glob.achievements.append(Achievement(**ach_row)) init_customs() # set custom achievements list for assets proxy # add all channels to cache from objects.channel import Channel async for chan_row in glob.db.iter("SELECT * FROM channels"): chan_row["desc"] = chan_row.pop("descr") channel = Channel(**chan_row) glob.channels[channel.name] = channel debug(f"Added channel {channel.name} to channel list") # add announce channel to cache announce = Channel( name="#announce", desc="#1 scores and public announcements will be posted here", auto=True, perm=True, ) glob.channels[announce.name] = announce debug("Added channel #announce to channel list") # add lobby channel to cache lobby = Channel(name="#lobby", desc="Multiplayer lobby", auto=False, perm=True) glob.channels[lobby.name] = lobby debug("Added channel #lobby to channel list") # add all clans to cache async for clan_row in glob.db.iter("SELECT * FROM clans"): clan = Clan(**clan_row) clan_chan = Channel( name="#clan", desc=f"Clan chat for clan {clan.name}", auto=False, perm=True, ) clan.chan = clan_chan # uwu glob.clans[clan.id] = clan clan.country = await glob.db.fetchval( "SELECT country FROM users WHERE id = %s", [clan.owner], ) async for member_row in glob.db.iter( "SELECT id FROM users WHERE clan = %s", [clan.id], ): clan.members.append(member_row["id"]) await glob.redis.zadd(f"asahi:clan_leaderboard", clan.score, clan.id) await glob.redis.zadd( f"asahi:clan_leaderboard:{clan.country}", clan.score, clan.id, ) r = await glob.redis.zrevrank(f"asahi:clan_leaderboard", clan.id) cr = await glob.redis.zrevrank( f"asahi:clan_leaderboard:{clan.country}", clan.id, ) clan.rank = r + 1 if r else 0 clan.country_rank = cr + 1 if cr else 0 debug(f"Added clan {clan.name} to clan list") await prepare_tasks() # make new db conn for donor/freeze tasks glob.app.add_task(expired_donor) glob.app.add_task(freeze_timers) info(f"Asahi v{glob.version} started")
def __init__(self): self.__is_eof = False # reference to the MAFW registry gobject self.__registry = None # reference to the MAFW renderer gobject self.__renderer = None # current state of the renderer self.__current_state = _MAFW_STATE_TRANSITIONING # current position in the stream (used during retrieving the position) self.__current_position = -1 # track duration self.__duration = -1 self.__to_seek = 0 # sound volume self.__volume = 50 # time of loading for profiling self.__load_time = 0 # time when MediaBox has last changed the sound volume self.__last_volume_change_time = 0 # reference to the callbacks, so that they don't get garbage # collected too early self.__playback_cb = _MAFW_PLAYBACK_CB(self.__playback_cb) self.__position_cb = _MAFW_POSITION_CB(self.__position_cb) self.__property_cb = _MAFW_EXTENSION_PROPERTY_CB(self.__property_cb) # MAFW libraries self.__mafw = ctypes.CDLL("libmafw.so.0") self.__mafw_shared = ctypes.CDLL("libmafw-shared.so.0") AbstractBackend.__init__(self) # retrieve and initialise registry registry_p = self.__mafw.mafw_registry_get_instance() if (not registry_p): logging.error("could not get MAFW registry") return #end if self.__registry = c_gobject.wrap(registry_p) err_p = ctypes.POINTER(_GError)() self.__mafw_shared.mafw_shared_init(registry_p, ctypes.byref(err_p)) if (err_p): print "GError occured", err_p[0].message return #end if # listen for incoming renderers (this should be how we find # the gst-renderer) self.__registry.connect("renderer_added", self.__on_renderer_added) # some renderers could be loaded already (not really...). look for them list_p = self.__mafw.mafw_registry_get_renderers(registry_p) while (list_p): item = _GList(list_p[0]) logging.info("found preloaded MAFW renderer") renderer_p = item.data self.__register_renderer(c_gobject.wrap(renderer_p)) list_p = item.next
def on_exception(self, exception): logging.error(exception)
def evaluate_state(evaluation_name, object_name, gt_scan_folder, experiment_state): """ Evaluate the current experiment_state with the relevant ground truth. Performs registration refinement on the point clouds, and then calculates average geometric accuracy and normal angle (in the image domain). """ gt_scan_file = os.path.join(gt_scan_folder, "%s_manual.ply" % object_name) if not os.path.exists(gt_scan_file): error("WARNING> GT scan for %s not available: \n\t%s" % (object_name, gt_scan_file)) gt_scan_mesh = o3d.io.read_triangle_mesh(gt_scan_file) # stored in m gt_scan = gt_scan_mesh.sample_points_uniformly(int(1e6)) # objects covering 1m^2, that's 1mm^2 per point gt_scan.estimate_normals() estimated_cloud = o3d.geometry.PointCloud() estimated_cloud.points = to_o3d(to_numpy(experiment_state.locations.location_vector())) estimated_cloud.normals = to_o3d(to_numpy(experiment_state.normals.normals())) estimated_cloud.colors = to_o3d(to_numpy(experiment_state.materials.get_brdf_parameters()['diffuse'])) registration_transform = refine_registration( gt_scan, estimated_cloud, distance_threshold=0.005 #m ) gt_mesh_aligned = copy.deepcopy(gt_scan_mesh) gt_mesh_aligned.transform(registration_transform) # o3d.visualization.draw_geometries([gt_mesh_aligned, estimated_cloud]) # now project the gt_scan onto our image plane and calculate depth and normal errors there estimated_depth = to_numpy(experiment_state.locations.implied_depth_image()) estimated_normals = to_numpy(experiment_state.locations.create_image(experiment_state.normals.normals())) K_proj = to_numpy(experiment_state.locations.invK.inverse()) Rt = to_numpy(experiment_state.locations.invRt.inverse()) image_based_transform = image_based_alignment( np.array(gt_mesh_aligned.vertices), np.array(gt_mesh_aligned.vertex_normals), Rt, K_proj, estimated_depth, estimated_normals, verbose=False ) gt_mesh_aligned.transform(np.concatenate((image_based_transform, np.array([0,0,0,1]).reshape(1,4)), axis=0)) gt_normals_aligned, gt_depth_aligned = render_depth_normals( np.array(gt_mesh_aligned.vertices), np.array(gt_mesh_aligned.triangles), np.array(gt_mesh_aligned.vertex_normals), K_proj, Rt, estimated_depth.shape[:2], ) depth_diff = np.abs(estimated_depth - gt_depth_aligned) dilated_depth = cv2.dilate(gt_depth_aligned, np.ones((7,7))) eroded_depth = cv2.erode(gt_depth_aligned, np.ones((7,7))) edges = (dilated_depth - eroded_depth) > 0.02 valid_pixels = (estimated_depth > 0) * (gt_depth_aligned > 0) * (depth_diff < 0.02) edgevalid_pixels = (estimated_depth > 0) * (gt_depth_aligned > 0) * (depth_diff < 0.02) * (edges == 0) normal_dotprods = (gt_normals_aligned * estimated_normals).sum(axis=2).clip(min=-1., max=1.) normal_anglediff = np.arccos(normal_dotprods) / np.pi * 180.0 average_accuracy = (depth_diff * edgevalid_pixels).sum() / edgevalid_pixels.sum() average_angle_error = (edgevalid_pixels*normal_anglediff).sum() / edgevalid_pixels.sum() log("Evaluating %s - %s: depth accuracy %6.4fmm normal accuracy %10.4f degrees" % (evaluation_name, object_name, average_accuracy * 1000, average_angle_error))