def read(self, xmlfile, root = None): try: doc = etree.parse(xmlfile) except etree.XMLSyntaxError as e: debug.dprint("Invalid XML.") debug.dprint(e) return None self.lost_eles = [] self.added_eles = [] self.lost_attrs = [] self.added_attrs = [] if root is None: datatree = self.valid_children(":start")[0] else: datatree = self.valid_node(root) xmlnode = doc.getroot() self.xml_read_merge(datatree, xmlnode) self.xml_read_core(datatree, xmlnode, doc) if len(self.lost_eles) != 0: debug.deprint("WARNING: Lost XML elements:\n" + str(self.lost_eles)) if len(self.added_eles) != 0: debug.deprint("WARNING: Added XML elements:\n" + str(self.added_eles)) if len(self.lost_attrs) != 0: debug.deprint("WARNING: Lost XML attributes:\n" + str(self.lost_attrs)) if len(self.added_eles) != 0: debug.deprint("WARNING: Added XML attributes:\n" + str(self.added_attrs)) return datatree
def verify_genome(self, g): if debug.is_set(DEBUG_INTEGRITY): if not g.verify(): dcallstack(DEBUG_INTEGRITY) dprint(DEBUG_INTEGRITY, "g.verify() failed.") dprint(DEBUG_INTEGRITY, "g:\n", g.deep_string()) return
def adjust_fitness(self): age_debt = (self.age - self.age_of_last_improvement + 1) - neat.dropoff_age if age_debt == 0: age_debt = 1 for curorg in self.organisms: curorg.orig_fitness = curorg.fitness if age_debt >= 1 or self.obliterate: curorg.fitness = curorg.fitness * 0.01 if self.age <= 10: curorg.fitness = curorg.fitness * neat.age_significance if curorg.fitness < 0.0: curorg.fitness = 0.0001 curorg.fitness = curorg.fitness / len(self.organisms) self.organisms.sort(key=order_orgs_key) if debug.is_set(DEBUG_CHECK): for i in range(1, len(self.organisms)): if self.organisms[i-1].fitness < self.organisms[i].fitness: dprint(DEBUG_ERROR, "sorted organisms out of order %7.5f < %7.5f." % (self.organisms[i-1].fitness, self.organisms[i].fitness)) if self.organisms[0].orig_fitness > self.max_fitness_ever: self.max_fitness_ever = self.organisms[0].orig_fitness self.age_of_last_improvement = self.age num_parents = int(math.floor(neat.survival_thresh * float(len(self.organisms))+1.0)) self.organisms[0].champion = True if len(self.organisms) > num_parents: for i in range(num_parents, len(self.organisms)): self.organisms[i].eliminate = True return
def update(self, eventMgr): """ Update the snake's body. During the update: * If the movement succeed, emit a SNAKE_MOVE GameEvent. * If the snake eats a food, emit a SNAKE_EAT GameEvent. * If the snake dies, emit a SNAKE_DIE GameEvent. @eventMgr: A eventMgr for emitting events. """ if self._mark_die: dprint('die. "Uuuuaaahhhh!!"'.format(self=self), "length: ", len(self.body)) self.set_body([]) self._mark_die = False self.alive = False if not self.alive: return nextPos = self.next_head_pos() grid = self.world.field.get_grid_at(*nextPos) if grid is None: eventMgr.emit(SnakeDie( reason="block by field border", snake=self, pos=nextPos, )) self.die() else: self._nextGrid = grid self.eventMgr = eventMgr # dprint('try go forward') grid.lock.acquire(self, self.on_acquire_succeed, self.on_acquire_fail('blocked by others', grid.pos))
def F_score(predict: torch.Tensor, labels: torch.Tensor, beta: int, threshold: float = 0.5) -> float: if not isinstance(predict, torch.Tensor): predict = torch.tensor(predict) if not isinstance(labels, torch.Tensor): labels = torch.tensor(labels) if predict.shape != labels.shape: dprint(predict.shape) dprint(labels.shape) assert False predict = predict > threshold labels = labels > threshold TP = (predict & labels).sum(1).float() TN = ((~predict) & (~labels)).sum(1).float() FP = (predict & (~labels)).sum(1).float() FN = ((~predict) & labels).sum(1).float() precision = TP / (TP + FP + 1e-12) recall = TP / (TP + FN + 1e-12) F2 = (1 + beta**2) * precision * recall / (beta**2 * precision + recall + 1e-12) return F2.mean(0).item()
def load_dylib(path, **kwargs): ''' Function for loading dynamic library with ctypes Args: path: Path to user defined library abs_path: If setted to True, pyobjus will load library with absolute path provided by user -> path arg Otherwise it will look in /objc_usr_classes dir, which is in pyobjus root dir Note: Work in progress ''' # LOADING USER DEFINED CLASS (dylib) FROM /objc_classes/test/ DIR # usr_path = kwargs.get('usr_path', True) if not usr_path: if os.getcwd().split('/')[-1] != 'pyobjus': os.chdir('..') while os.getcwd().split('/')[-1] != 'pyobjus': os.chdir('..') root_pyobjus = os.getcwd() objc_test_dir = os.path.join(root_pyobjus, 'objc_classes', 'test') ctypes.CDLL(os.path.join(objc_test_dir, path)) else: ctypes.CDLL(path) dprint("Dynamic library {0} loaded".format(path))
def verify(self): verification = True for curorg in self.organisms: if not curorg.verify(): dprint(DEBUG_ERROR, "Organism verification failed.") verification = False return verification
def merge_results(index1: np.ndarray, distances1: np.ndarray, index2: np.ndarray, distances2: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """ Returns top-K of two sets. """ print("merging results") assert index1.shape == distances1.shape and index2.shape == distances2.shape assert index1.shape[1] == index2.shape[1] joint_indices = np.hstack((index1, index2)) joint_distances = np.hstack((distances1, distances2)) print("joint_indices", joint_indices.shape, "joint_distances", joint_distances.shape) assert joint_indices.shape == joint_distances.shape best_indices = np.zeros((index1.shape[0], K), dtype=int) best_distances = np.zeros((index1.shape[0], K), dtype=np.float32) for sample in range(joint_indices.shape[0]): if not USE_COSINE_DIST: closest_indices = np.argsort(joint_distances[sample]) else: closest_indices = np.argsort(-joint_distances[sample]) closest_indices = closest_indices[:K] best_indices[sample] = joint_indices[sample, closest_indices] best_distances[sample] = joint_distances[sample, closest_indices] print("best_indices", best_indices.shape, "best_distances", best_distances.shape) dprint(best_indices) dprint(best_distances) return best_indices, best_distances
def camera_movement_handler(self, task): '''Gets position of the ship and moves the camera.''' if(self.debug_cam): base.acceptOnce('q', self.__setattr__, ["debug_cam", False]) else: base.acceptOnce('q', self.__setattr__, ["debug_cam", True]) if (self.ship.getPos() > self.cameraNode.getPos() or self.ship.getPos() < self.cameraNode.getPos()): cameraPosLerp = LerpPosInterval( self.cameraNode, 0, self.ship.getPos(), bakeInStart=0) cameraPosLerp.start() if (not self.ship.getHpr() == self.cameraNode.getHpr() or self.ship.getHpr() < self.cameraNode.getHpr()): #Camera freaks out sometimes, must find solution #It has something to do with the hpr switching between # -180 to 180, 180 to -180, -1 to 1, and 1 to -1 #I think it flips the camera around completly rather than #going directly to the next position, this only happpens #with the lerp! #Make the 3rd param 0 to fix it, but then you have ugly turning. #I'm sure this can be fixed without a lerp dprint("SHIP HPR: ", self.cameraNode.getHpr()) cameraHprLerp = LerpHprInterval( self.cameraNode, 0.00, self.ship.getHpr(), bakeInStart=0) cameraHprLerp.start() return task.cont
def print_to_filename_by_species(self, filename): outFile = open(filename, "w") if not outFile: dprint(DEBUG_ERROR, "Can't open %s for output." % (filename,)) return False self.print_to_file_by_species(outFile) outFile.close() return True
def log_data(self, device, ts, num, temp, light): self.__correct_summary_log(device, ts, num, temp, light) if self.__save_logname is None: dprint('cannot log to empty file') else: logf = open(self.__save_logname, 'a') logf.write('{} {} {} {}\n'.format(ts, num, temp, light)) logf.close()
def __init__(self, world, name, player, follow_ship, ship_to_follow): self.world = world self.name = name self.player = player self.follow_ship = follow_ship self.ship_to_follow = ship_to_follow super(BasicEnemyShip, self).__init__(world, name, player) dprint("Computer ship " + self.getName() + " initialized.")
def _upgrade_042_to_052(self, config_parser): try: # upgrade strongs_blocked blocked = config_parser.getboolean("Options", "strongs_blocked") position = "underneath" if blocked else "inline" config_parser.set("Options", "strongs_position", position) except (NoSectionError, NoOptionError), e: dprint(WARNING, "Error on upgrading", e)
def __sync_log_to_gsheets(self): try: sh = hamstersheet.HamsterSheet(self.__date, self.__event_log, self.__summary_log) url = sh.get_url() dprint('uploaded sheet {}'.format(url)) self.__save_sheet_address(self.__date, url) except hamstersheet.SheetException as e: dprint('error while uploading sheet: {}'.format(e))
def SetFromString(self, argline): dprint(DEBUG_FILEINPUT, "Trait.SetFromString:argline:", argline) values = argline.split() if len(values) < neat.num_trait_params+1: dprint(DEBUG_ERROR, "Trait.SetFromString:argline:", argline, "too few items:", len(values), "should have", neat.num_trait_params+1) self.trait_id = int(values[0]) for i in range(neat.num_trait_params): self.params[i] = float(values[i+1]) return
def __check_day(self): dt = datetime.datetime.now() if self.__prev_date is not None: tt1 = self.__prev_date.timetuple() tt2 = dt.timetuple() if tt1[3] < 12 and tt2[3] >= 12: dprint('-' * 80) dprint('starting new day {:02d}.{:02d}'.format(tt2[2], tt2[1])) self.logger.newday() self.__prev_date = dt
def stop(self): if self.state == self.ST_RECORDING: self.clips[self.clip.name] = self.clip elif self.state == self.ST_PLAYING: game = self.game # player = game.world.players[0] if game.world.players else None # game.eventMgr.emit(events.GameEnd(player)) game.quit() self.state = self.ST_STOP dprint('recorder stopped')
def upgrade(self, config_parser, version_from): dprint(MESSAGE, "Upgrading from", version_from.getText()) if version_from <= SW.Version("0.3"): self._upgrade_03_to_04(config_parser) if version_from < SW.Version("0.4.1"): self._upgrade_04_to_041(config_parser) if version_from < SW.Version("0.4.2"): self._upgrade_041_to_042(config_parser) if version_from < SW.Version("0.5.2"): self._upgrade_042_to_052(config_parser)
def ValidateOptionFile(self, schemafile, filename, xmlRootNode=None, ignoreValidXMLCheck=False): debug.dprint("Validating options file against schema: " + schemafile, 1) schemafile = os.path.join(self._rootDir, schemafile) self.sch = schema.Schema(schemafile) self._TestSingle_file(filename, ignoreValidXMLCheck)
def movement_handler(self, task): '''Updates the position of the ship and handles acceleration''' force = self.Movement + (0, self.FORWARD_SPEED, 0) #torque = Vec3(0, 0, self.FORWARD_SPEED) if self.Movement == self.Forward or self.Movement == self.Stop: if self.Movement == self.Forward: print("Forward") self.FORWARD_SPEED += self.ACCEL if self.FORWARD_SPEED >= self.MaxSpeed: # Sets the top acceleration self.FORWARD_SPEED = self.MaxSpeed if self.Movement == self.Stop: print("Stopping!") if self.FORWARD_SPEED <= 0: self.FORWARD_SPEED = 0 self.ship.node().clearForces() else: self.FORWARD_SPEED -= self.DECEL print self.ship.node().getLinearDamping() print("FORWARD_SPEED:", self.FORWARD_SPEED) self.ship.node().setActive(True) self.ship.node().applyCentralForce( render.getRelativeVector(self.ship, (force))) #Begin rotation code pointer = base.win.getPointer(0) pointerX = pointer.getX() pointerY = pointer.getY() if base.win.movePointer(0, base.win.getXSize() / 2, base.win.getYSize() / 2): torque = render.getRelativeVector(self.ship, # Pitch (-(pointerY - base.win.getYSize() / 2) * 0.1, # Roll self.ROLL, # Heading -(pointerX - base.win.getXSize() / 2) * 0.1)) dprint("TORQUE: ", torque) self.ship.node().applyTorque(torque * self.TORQUE) #Slows down the ship when not turning, only effects Torque self.ship.node().setAngularDamping(0.6) #Slows down the ship when not moving, only effects Force self.ship.node().setLinearDamping(0.5) return task.cont
def _upgrade_04_to_041(self, config_parser): try: # upgrade font headwords = config_parser.getboolean("Filter", "strongs_headwords") mod = "HeadwordsTransliterated" if not headwords: mod = "" config_parser.set("Filter", "headwords_module", mod) except (NoSectionError, NoOptionError), e: dprint(WARNING, "Error on upgrading", e)
def make_network(q, ci_network_name, index=0): networks = q.list_networks() if ci_network_name not in [network["name"] for network in networks["networks"]]: dprint("q.create_network({'network': {'name':" + ci_network_name + ", 'admin_state_up': True}})['network']") test_net = q.create_network({"network": {"name": ci_network_name, "admin_state_up": True}})["network"] else: for net in networks["networks"]: if net["name"] == ci_network_name: test_net = net return test_net
def make_network(q, ci_network_name, index=0): networks = q.list_networks() if ci_network_name not in [network['name'] for network in networks['networks']]: dprint("q.create_network({'network': {'name':" + ci_network_name + ", 'admin_state_up': True}})['network']") test_net = q.create_network({'network': {'name': ci_network_name, 'admin_state_up': True}})['network'] else: for net in networks['networks']: if net['name'] == ci_network_name: test_net = net return test_net
def _upgrade_041_to_042(self, config_parser): import config try: config_parser = confparser.config() config_parser.read(config.sword_paths_file) if not config_parser.has_section("Install"): config_parser.add_section("Install") config_parser.set("Install", "LocalePath", "locales\\dummy") config_parser.write(open(config.sword_paths_file, "w")) except EnvironmentError, e: dprint(WARNING, "Error on upgrading - is sword.conf writable?", e)
def play(self, clipName=None): self.state = self.ST_PLAYING if not clipName: clip = self.clip else: clip = self.clips[clipName] # TODO: test if game and display restart normally game = self.game display = game.display configData = self.clip.configData game.setup_stage(configData, display) game.world.forbidGenFood = True # game.mainloop() dprint('recorder play')
def get_char(grid): if grid.type == SNAKE: if grid.lock.owner is None: dprint(grid) assert grid.lock.owner is not None return str(grid.content.secID % 10) # return 'SH'[grid.content.secID==0] elif grid.type == BLANK: # dprint('gird', grid) # return u'\u25A1'.encode('utf8') assert grid.lock.owner is None return '_' elif grid.type == FOOD: return 'F' else: return '?'
def load_framework(framework): ''' Function for loading frameworks Args: framework: Framework to load Raises: ObjcException if it can't load framework ''' NSBundle = pyobjus.autoclass('NSBundle') ns_framework = pyobjus.autoclass('NSString').stringWithUTF8String_(framework) bundle = NSBundle.bundleWithPath_(ns_framework) try: if bundle.load(): dprint("Framework {0} succesufully loaded!".format(framework)) except: raise pyobjus.ObjcException('Error while loading {0} framework'.format(framework))
def init_ship(self, model="models/ship.x"): """Initalizes a ship model node and a basic bullet ridgid body node Extend this method if needed. The default ridgid body node shape is a box with the approx. bounds of the model. When extending: Access the ships node with self.ship. Access the bullet ridgid body node with self.ship.node() Access the ships model with self.ship.getChild(0) For basic transforms only modify self.ship! Keyword arguments: model -- the path to the ships model (default "models/ship.x") """ #Debug dprint("Initializing ship: ", self.getName()) #Load the visual ship model ship_model = loader.loadModel(model) ship_model.setName(self.name + "_playerShip") #Roughly determine the size for collision shape min, max = ship_model.getTightBounds() bounds_size = (max - min) / 2 #Set up the shape for the ridgid body shape = BulletBoxShape(Vec3(bounds_size)) #Make the ridgid body node and assign it self.ship = render.attachNewNode( BulletRigidBodyNode(self.name + "_Ship")) #Set up some bullet stuff self.ship.node().addShape(shape) self.ship.node().setMass(3) self.ship.setCollideMask(BitMask32.allOn()) self.ship.setPos(0, 0, 0) #Attach the ridgid body to the bullet world self.world.attachRigidBody(self.ship.node()) #Parent the visual ship model to the ship node ship_model.reparentTo(self.ship)
def verify_baby(self, baby, mom, dad): if debug.is_set(DEBUG_INTEGRITY): if not baby.verify(): dcallstack(DEBUG_INTEGRITY) dprint(DEBUG_INTEGRITY, "baby.verify() failed.") dprint(DEBUG_INTEGRITY, "baby:\n", baby.deep_string()) if mom: dprint(DEBUG_INTEGRITY, "mom:\n", mom.deep_string()) if dad: dprint(DEBUG_INTEGRITY, "dad:\n", dad.deep_string()) return
def load(self, paths=()): config_parser = RawConfigParser() loaded = config_parser.read(self.write_path) loaded += config_parser.read(paths) if config_parser.has_option("Internal", "version"): version = config_parser.get("Internal", "version") elif loaded: version = "0.3" else: version = CONFIG_VERSION if SW.Version(version) < SW.Version(CONFIG_VERSION): self.upgrade(config_parser, SW.Version(version)) for section_name in config_parser.sections(): if section_name not in self.sections: dprint(WARNING, "Skipping unknown section '%s'" % section_name) continue section = self.sections[section_name] for option in config_parser.options(section_name): if option not in section.items: dprint( WARNING, "Skipping unknown item '%s.%s'" % (section_name, option)) continue if option in section.lazy_items: section.lazy_items.remove(option) type_reader = { str: config_parser.get, bool: config_parser.getboolean, int: config_parser.getint, float: config_parser.getfloat, "pickle": lambda x, y: pickle.loads(config_parser.get(x, y)) }[section.item_types[option]] section[option] = type_reader(section_name, option) self["Internal"]["version"] = CONFIG_VERSION
def make_network(q, ci_network_name, index=0): networks = q.list_networks() if ci_network_name not in [ network['name'] for network in networks['networks'] ]: dprint("q.create_network({'network': {'name':" + ci_network_name + ", 'admin_state_up': True}})['network']") test_net = q.create_network( {'network': { 'name': ci_network_name, 'admin_state_up': True }})['network'] else: for net in networks['networks']: if net['name'] == ci_network_name: test_net = net return test_net
def eat(self, grid): """ Eat the food on the grid. Clear the grid and place the new head there. @grid: The grid containing food """ grid.type = grids.BLANK grid.content = self.head # create the tail section tail = BodySection(self.body[-1].pos) tail.secID = len(self.body) self.move_forward(eat=True) self.body.append(tail) # allocate a grid for the tail tailGrid = self.world.field.get_grid_at(*tail.pos) tailGrid.type = grids.SNAKE tailGrid.content = tail dprint('eat') self.world.gen_food()
def load_framework(framework): ''' Function for loading frameworks Args: framework: Framework to load Raises: ObjcException if it can't load framework ''' NSBundle = pyobjus.autoclass('NSBundle') ns_framework = pyobjus.autoclass('NSString').stringWithUTF8String_( framework) bundle = NSBundle.bundleWithPath_(ns_framework) try: if bundle.load(): dprint("Framework {0} succesufully loaded!".format(framework)) except: raise pyobjus.ObjcException( 'Error while loading {0} framework'.format(framework))
def load(self, paths=()): config_parser = RawConfigParser() loaded = config_parser.read(self.write_path) loaded += config_parser.read(paths) if config_parser.has_option("Internal", "version"): version = config_parser.get("Internal", "version") elif loaded: version = "0.3" else: version = CONFIG_VERSION if SW.Version(version) < SW.Version(CONFIG_VERSION): self.upgrade(config_parser, SW.Version(version)) for section_name in config_parser.sections(): if section_name not in self.sections: dprint(WARNING, "Skipping unknown section '%s'" % section_name) continue section = self.sections[section_name] for option in config_parser.options(section_name): if option not in section.items: dprint(WARNING, "Skipping unknown item '%s.%s'" % ( section_name, option)) continue if option in section.lazy_items: section.lazy_items.remove(option) type_reader = { str: config_parser.get, bool: config_parser.getboolean, int: config_parser.getint, float: config_parser.getfloat, "pickle": lambda x, y:pickle.loads(config_parser.get(x, y)) }[section.item_types[option]] section[option] = type_reader(section_name, option) self["Internal"]["version"] = CONFIG_VERSION
def verify(self): # Network.verify() calls Genome.verify() so try it first, but not both if self.net: if not self.net.verify(): dprint(DEBUG_ERROR, "Network failed to verify.") dprint(DEBUG_ERROR, "Network:", self.net.deep_string()) return False elif self.gnome: if not self.gnome.verify(): dprint(DEBUG_ERROR, "Gnome failed to verify.") dprint(DEBUG_ERROR, "Genome:", self.gnome.deep_string()) return False return True
def __correct_summary_log(self, device, ts, num, temp, light): local_ts = time.time() if device not in self.__summary_log: self.__summary_log[device] = [(local_ts, ts, num, temp, light)] else: sumlog = self.__summary_log[device] assert sumlog prev_ts = 0 for i, log in enumerate(sumlog): remote_ts = log[1] if ts == remote_ts: return if prev_ts < ts < remote_ts: dprint('correcting log at position {} < {} < {}'.format( prev_ts, ts, remote_ts)) sumlog.insert(i, (log[0], ts, num, temp, light)) return prev_ts = remote_ts dprint('correcting log - appending data ts {}'.format(ts)) sumlog.append((local_ts, ts, num, temp, light))
def __create_sheet(self, date): name = self.SHEET_NAME_FORMAT.format(date) scope = ['https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive'] creds = ServiceAccountCredentials.from_json_keyfile_name(self.SECRET_FILE, scope) self.__client = gspread.authorize(creds) try: self.__sheet = self.__client.open(name) except gspread.SpreadsheetNotFound: pass except gspread.exceptions.GSpreadException as e: raise SheetException('error while creating sheet: {}'.format(e)) except google.auth.exceptions.TransportError as e: raise SheetException('sheets connection error: {}'.format(e)) else: self.__client.del_spreadsheet(self.__sheet.id) self.__sheet = self.__client.create(name) self.__sheet_id = self.__sheet.id dprint('created {}'.format(self.__sheet_id)) self.__sheet = self.__sheet.sheet1 self.__sheet.update_title(date)
def GAP(predicts: torch.Tensor, confs: torch.Tensor, targets: torch.Tensor) -> float: ''' Computes GAP@1 ''' if len(predicts.shape) != 1: dprint(predicts.shape) assert False if len(confs.shape) != 1: dprint(confs.shape) assert False if len(targets.shape) != 1: dprint(targets.shape) assert False assert predicts.shape == confs.shape and confs.shape == targets.shape sorted_confs, indices = torch.sort(confs, descending=True) confs = confs.cpu().numpy() predicts = predicts[indices].cpu().numpy() targets = targets[indices].cpu().numpy() res, true_pos = 0.0, 0 for i, (c, p, t) in enumerate(zip(confs, predicts, targets)): rel = int(p == t) true_pos += rel res += true_pos / (i + 1) * rel res /= targets.shape[ 0] # FIXME: incorrect, not all test images depict landmarks return res
def search_against_fragment(train_features: np.ndarray, test_features: np.ndarray) \ -> Tuple[np.ndarray, np.ndarray]: if USE_GPU: # build a flat index (CPU) if USE_COSINE_DIST: index_flat = faiss.IndexFlat(DIMS, faiss.METRIC_INNER_PRODUCT) else: index_flat = faiss.IndexFlatL2(DIMS) # make it into a GPU index index_flat = faiss.index_cpu_to_gpu(res, 0, index_flat) else: index_flat = faiss.IndexFlatIP(DIMS) index_flat.add(train_features) print("total size of the database:", index_flat.ntotal) # print("sanity search...") # distances, index = index_flat.search(train_features[:10], K) # actual search # print(index[:10]) # print(distances[:10]) print("searching") distances, index = index_flat.search(test_features, K) # actual search dprint(index) dprint(distances) dprint(describe(distances.flatten())) return index, distances
def GAP(predicts: np.ndarray, confs: np.ndarray, targets: np.ndarray) -> float: ''' Computes GAP@1 ''' if len(targets.shape) != 1: dprint(targets.shape) assert False assert predicts.shape == confs.shape indices = np.argsort(-confs) res, true_pos = 0.0, 0 num_predicts_per_sample = confs.shape[1] num_targets = len(targets) predicts = predicts.flatten() confs = confs.flatten() targets = np.repeat(targets.reshape(-1, 1), num_predicts_per_sample, axis=1) dprint(targets) targets = targets.flatten() dprint(targets) sorting_idx = np.argsort(-confs) predicts = predicts[sorting_idx] targets = targets[sorting_idx] for i, (p, t) in enumerate(zip(tqdm(predicts), targets)): rel = int(p == t) true_pos += rel res += true_pos / (i + 1) * rel res /= num_targets # TODO: incorrect, not all test images depict landmarks return res
def replace_amp(groups): ent = groups.group('amps') if not do_replace_specials and ent in ["lt", "gt", "amp"]: return groups.group() if ent in htmlentitydefs.name2codepoint: return unichr(htmlentitydefs.name2codepoint[ent]) if ent == "apos": return "'" if ent[0] == "#": try: if ent[1] == "x": return unichr(int(ent[1:], 16)) return unichr(int(ent[1:])) except ValueError: from debug import dprint, WARNING dprint(WARNING, "Invalid int in html escape", groups.group(0)) return ent
def spawn(self, g, size): #//Create size copies of the Genome #//Start with perturbed linkweights for count in range(1, size+1): dprint(DEBUG_INFO, "Creating organism %d" % (count, )) new_genome = g.duplicate(count) new_genome.mutate_link_weights(1.0, 1.0, Genome.COLDGAUSSIAN) new_genome.randomize_traits() new_organism = Organism() new_organism.SetFromGenome(0.0, new_genome, 1) self.organisms.append(new_organism) # #//Keep a record of the innovation and node number we are on self.cur_node_id = new_genome.get_last_node_id() self.cur_innov_num = new_genome.get_last_gene_innovnum() #//Separate the new Population into species self.speciate() return True
def read(self, xmlfile, root=None, stub=False): try: doc = etree.parse(xmlfile) except etree.XMLSyntaxError as e: debug.dprint("Invalid XML.") debug.dprint(e) return None self.lost_eles = [] self.added_eles = [] self.lost_attrs = [] self.added_attrs = [] if root is None: datatree = self.valid_children(":start")[0] else: if stub: datatree = self.valid_node_stub(root) else: datatree = self.valid_node(root) xmlnode = doc.getroot() self.xml_read_merge(datatree, xmlnode) self.xml_read_core(datatree.get_current_tree(), xmlnode, doc) if len(self.lost_eles) != 0: debug.deprint("WARNING: Lost XML elements:\n" + str(self.lost_eles)) if len(self.added_eles) != 0: debug.deprint("WARNING: Added XML elements:\n" + str(self.added_eles)) if len(self.lost_attrs) != 0: debug.deprint("WARNING: Lost XML attributes:\n" + str(self.lost_attrs)) if len(self.added_eles) != 0: debug.deprint("WARNING: Added XML attributes:\n" + str(self.added_attrs)) return datatree
def _upgrade_03_to_04(self, config_parser): try: # upgrade font font = config_parser.get("Html", "font_name") size = config_parser.getint("Html", "base_text_size") # only upgrade if it wasn't the same if font != "Arial" or size != 10: if not config_parser.has_section("Font"): config_parser.add_section("Font") config_parser.set("Font", "default_fonts", pickle.dumps((font, int(size), False))) layout = config_parser.get("BPBible", "layout") if layout is not None: obj = pickle.loads(layout) d = {"en": obj} config_parser.set("BPBible", "layout", pickle.dumps(d)) except (NoSectionError, NoOptionError), e: dprint(WARNING, "Error on upgrading", e)
def update(self): """ @return: True if something updated. False if still blocking. """ if len(self.waitingList) > 1: self.fail() if self._fail: self._fail = False dprint(self, 'failed') for target, on_succeed, on_fail in self.waitingList: if on_fail: on_fail() self.waitingList = [] return True if self.owner is None: if len(self.waitingList) == 1: target, on_succeed, on_fail = self.waitingList.pop() if config.PRINT_SYNC: dprint('give', self, 'to', target) if on_succeed: on_succeed() self.owner = target return True return False
def _upgrade_03_to_04(self, config_parser): try: # upgrade font font = config_parser.get("Html", "font_name") size = config_parser.getint("Html", "base_text_size") # only upgrade if it wasn't the same if font != "Arial" or size != 10: if not config_parser.has_section("Font"): config_parser.add_section("Font") config_parser.set("Font", "default_fonts", pickle.dumps( (font, int(size), False) )) layout = config_parser.get("BPBible", "layout") if layout is not None: obj = pickle.loads(layout) d = {"en": obj} config_parser.set("BPBible", "layout", pickle.dumps(d)) except (NoSectionError, NoOptionError), e: dprint(WARNING, "Error on upgrading", e)
def send_time(self, timeout=None): t = time.time() dprint('sending time {}'.format(int(t))) self.sport.send('{} {}'.format(self.SYSTEM_TIME, int(t))) time.sleep(1) line = None start_t = t = time.time() while timeout is None or (t - start_t < timeout): line = self.sport.readline() if line: if line.strip() == self.SYSTEM_OK: dprint('time ok') return True else: dprint('bad line: {}'.format(line)) t = time.time() dprint('protocol timeout') return False
def TestXmlFiles(self, testDir, depth): debug.dprint("Checking xml files:", 1) for filename in self._TestFiles("xml", testDir, depth): try: xmlParse = xml.dom.minidom.parse(filename) debug.dprint(filename + " : Pass", 1) self._passes += 1 except xml.parsers.expat.ExpatError: debug.dprint(filename + " : Fail", 1) self._optionErrors[filename] = xml.parsers.expat.ExpatError return
def _TestSingle_file(self, filename, ignoreValidXMLCheck=False): optionsTree = self.sch.read(filename) lost_eles, added_eles, lost_attrs, added_attrs = self.sch.read_errors() if (ignoreValidXMLCheck): if len(lost_eles) + len(lost_attrs) == 0: debug.dprint(filename + " : Pass", 1) self._passes += 1 else: debug.dprint(filename + " : Fail", 1) self._optionErrors[filename] = (lost_eles, added_eles, lost_attrs, added_attrs) else: if len(lost_eles) + len(added_eles) + len(lost_attrs) + len( added_attrs) == 0 and optionsTree.valid: debug.dprint(filename + " : Pass", 1) self._passes += 1 else: debug.dprint(filename + " : Fail", 1) self._optionErrors[filename] = (lost_eles, added_eles, lost_attrs, added_attrs)
def print_recursively(self, indent=""): s = self.__str__() debug.dprint(indent + ' ' + s.replace('\n', '\n' + indent + ' '), 0, newline = False) debug.dprint("", 0) for i in range(len(self.children)): if isinstance(self.children[i], Tree): self.children[i].print_recursively(indent + ">>") elif isinstance(self.children[i], choice.Choice): ref = self.children[i].get_current_tree() ref.print_recursively(indent + ">>") if i < len(self.children) - 1: debug.dprint("", 0) return
def generate_submission(val_loader: Any, test_loader: Any, model: Any, epoch: int, model_path: Any) -> np.ndarray: score, threshold = validate(val_loader, model, epoch) predicts, _ = inference(test_loader, model) dprint(predicts.shape) labels = [" ".join([str(i) for i, p in enumerate(pred) if p > threshold]) for pred in tqdm(predicts)] dprint(len(labels)) dprint(np.array(labels)) sub = test_loader.dataset.df sub['attribute_ids'] = labels sub_name = f'submissions_{os.path.basename(model_path)[:-4]}.csv' sub.to_csv(sub_name, index=False)
1. define level-2 train and validation sets 2. for every sample from validation set, find K nearest samples from the train set 3. make a prediction about classes 4. calculate the metric 5. take full train set 6. for every sample from the test set, find K nearest samples from the full train set 7. make a prediction 8. generate submission ''' predict_test = sys.argv[1] == '--test' # load distances info dist_file = np.load(sys.argv[2], allow_pickle=True) distances, indices = dist_file['distances'], dist_file['indices'] dprint(distances.shape) dprint(indices.shape) dprint(np.max(indices.flatten())) # load dataframe full_train_df = pd.read_csv('../data/train.csv') full_train_df.drop(columns='url', inplace=True) train_df = pd.read_csv( '../data/splits/50_samples_18425_classes_fold_0_train.csv') train_mask = ~full_train_df.id.isin(train_df.id) knn_train_df = full_train_df.loc[train_mask] dprint(knn_train_df.shape) if predict_test: df = pd.read_csv('../data/test.csv') df = df.loc[df.id.apply(lambda img: os.path.exists(
def run() -> float: np.random.seed(0) model_dir = config.experiment_dir logger.info('=' * 50) train_loader, val_loader, test_loader = load_data(args.fold) logger.info(f'creating a model {config.model.arch}') model = create_model(config, pretrained=args.weights is None).cuda() criterion = get_loss(config) if args.summary: torchsummary.summary(model, (3, config.model.input_size, config.model.input_size)) if args.lr_finder: optimizer = get_optimizer(config, model.parameters()) lr_finder(train_loader, model, criterion, optimizer) sys.exit() if args.weights is None and config.train.head_only_warmup: logger.info('-' * 50) logger.info(f'doing warmup for {config.train.warmup.steps} steps') logger.info(f'max_lr will be {config.train.warmup.max_lr}') optimizer = get_optimizer(config, model.parameters()) warmup_scheduler = get_warmup_scheduler(config, optimizer) freeze_layers(model) train_epoch(train_loader, model, criterion, optimizer, 0, warmup_scheduler, None, config.train.warmup.steps) unfreeze_layers(model) if args.weights is None and config.train.enable_warmup: logger.info('-' * 50) logger.info(f'doing warmup for {config.train.warmup.steps} steps') logger.info(f'max_lr will be {config.train.warmup.max_lr}') optimizer = get_optimizer(config, model.parameters()) warmup_scheduler = get_warmup_scheduler(config, optimizer) train_epoch(train_loader, model, criterion, optimizer, 0, warmup_scheduler, None, config.train.warmup.steps) optimizer = get_optimizer(config, model.parameters()) if args.weights is None: last_epoch = -1 else: last_checkpoint = torch.load(args.weights) model_arch = last_checkpoint['arch'].replace('se_', 'se') if model_arch != config.model.arch: dprint(model_arch) dprint(config.model.arch) assert model_arch == config.model.arch model.load_state_dict(last_checkpoint['state_dict']) if 'optimizer' in last_checkpoint.keys(): optimizer.load_state_dict(last_checkpoint['optimizer']) logger.info(f'checkpoint loaded: {args.weights}') last_epoch = last_checkpoint['epoch'] if 'epoch' in last_checkpoint.keys() else 99 logger.info(f'loaded the model from epoch {last_epoch}') if args.lr != 0: set_lr(optimizer, float(args.lr)) elif 'lr' in config.optimizer.params: set_lr(optimizer, config.optimizer.params.lr) elif 'base_lr' in config.scheduler.params: set_lr(optimizer, config.scheduler.params.base_lr) if not args.cosine: lr_scheduler = get_scheduler(config.scheduler, optimizer, last_epoch= (last_epoch if config.scheduler.name != 'cyclic_lr' else -1)) assert config.scheduler2.name == '' lr_scheduler2 = get_scheduler(config.scheduler2, optimizer, last_epoch=last_epoch) \ if config.scheduler2.name else None else: epoch_size = min(len(train_loader), config.train.max_steps_per_epoch) \ * config.train.batch_size set_lr(optimizer, float(config.cosine.start_lr)) lr_scheduler = CosineLRWithRestarts(optimizer, batch_size=config.train.batch_size, epoch_size=epoch_size, restart_period=config.cosine.period, period_inc=config.cosine.period_inc, max_period=config.cosine.max_period) lr_scheduler2 = None if args.predict_oof or args.predict_test: print('inference mode') assert args.weights is not None if args.predict_oof: gen_train_prediction(val_loader, model, last_epoch, args.weights) else: gen_test_prediction(test_loader, model, args.weights) sys.exit() logger.info(f'training will start from epoch {last_epoch + 1}') best_score = 0.0 best_epoch = 0 last_lr = get_lr(optimizer) best_model_path = args.weights for epoch in range(last_epoch + 1, config.train.num_epochs): logger.info('-' * 50) if not is_scheduler_continuous(lr_scheduler) and lr_scheduler2 is None: # if we have just reduced LR, reload the best saved model lr = get_lr(optimizer) if lr < last_lr - 1e-10 and best_model_path is not None: logger.info(f'learning rate dropped: {lr}, reloading') last_checkpoint = torch.load(best_model_path) assert(last_checkpoint['arch']==config.model.arch) model.load_state_dict(last_checkpoint['state_dict']) optimizer.load_state_dict(last_checkpoint['optimizer']) logger.info(f'checkpoint loaded: {best_model_path}') set_lr(optimizer, lr) last_lr = lr if config.train.lr_decay_coeff != 0 and epoch in config.train.lr_decay_milestones: n_cycles = config.train.lr_decay_milestones.index(epoch) + 1 total_coeff = config.train.lr_decay_coeff ** n_cycles logger.info(f'artificial LR scheduler: made {n_cycles} cycles, decreasing LR by {total_coeff}') set_lr(optimizer, config.scheduler.params.base_lr * total_coeff) lr_scheduler = get_scheduler(config.scheduler, optimizer, coeff=total_coeff, last_epoch=-1) # (last_epoch if config.scheduler.name != 'cyclic_lr' else -1)) if isinstance(lr_scheduler, CosineLRWithRestarts): restart = lr_scheduler.epoch_step() if restart: logger.info('cosine annealing restarted, resetting the best metric') best_score = min(config.cosine.min_metric_val, best_score) train_epoch(train_loader, model, criterion, optimizer, epoch, lr_scheduler, lr_scheduler2, config.train.max_steps_per_epoch) score, _, _ = validate(val_loader, model, epoch) if type(lr_scheduler) == ReduceLROnPlateau: lr_scheduler.step(metrics=score) elif not is_scheduler_continuous(lr_scheduler): lr_scheduler.step() if type(lr_scheduler2) == ReduceLROnPlateau: lr_scheduler2.step(metrics=score) elif lr_scheduler2 and not is_scheduler_continuous(lr_scheduler2): lr_scheduler2.step() is_best = score > best_score best_score = max(score, best_score) if is_best: best_epoch = epoch if is_best: best_model_path = os.path.join(model_dir, f'{config.version}_f{args.fold}_e{epoch:02d}_{score:.04f}.pth') data_to_save = { 'epoch': epoch, 'arch': config.model.arch, 'state_dict': model.state_dict(), 'score': score, 'optimizer': optimizer.state_dict(), 'config': config } torch.save(data_to_save, best_model_path) logger.info(f'a snapshot was saved to {best_model_path}') logger.info(f'best score: {best_score:.04f}') return -best_score
from PIL import Image from tqdm import tqdm from scipy.stats import describe from debug import dprint with open('imagenet1000.txt') as f: imagenet = eval(f.read()) categories = list(imagenet.values()) with open('imagenet_classes.pkl', 'rb') as ff: predicts = pickle.load(ff) predicts = np.vstack(predicts) dprint(predicts.shape) dprint(describe(predicts.flatten())) classes = np.argmax(predicts, axis=1) # confs = predicts[:, classes] # this hangs my PC dprint(classes) imagenet_classes = [ categories[classes[i]] for i in tqdm(range(predicts.shape[0])) ] confs = [predicts[i, classes[i]] for i in tqdm(range(predicts.shape[0]))] assert len(sys.argv) == 2 sub = pd.read_csv(sys.argv[1]) fig = plt.figure(figsize=(16, 16))