def details(name): account_name = session['account_name'] if 'account_name' in session else '' history = ReadHistory.query.filter(ReadHistory.account == account_name, ReadHistory.book == name).first() latest_read_chapter = history.latest_chapter if history else '' root = os.path.join(constants.BOOK_IMGS_PATH, name) chapters = [{ 'name': chapter_name, 'first_page': utils.first_or_default([ img_name for img_name in sorted( os.listdir(os.path.join(root, chapter_name))) if os.path.isfile(os.path.join(root, chapter_name, img_name)) ], '_'), 'is_new': chapter_name > latest_read_chapter } for chapter_name in reversed(sorted(os.listdir(root))) if os.path.isdir(os.path.join(root, chapter_name))] return render_template('books/details.html', book={ 'name': name, 'chapters': chapters })
async def add_role(self, server, role_name, member): sroles = list(server.roles) added_role = first(sroles, lambda x: x.name == role_name) wanderer = first_or_default(sroles, lambda x: x.name == "Wanderers") roles = [added_role] + [x for x in member.roles if x != wanderer] await self.replace_roles(member, *roles)
def kick_session(self, session_id, sender): session = first_or_default( lambda s: s.session_id == session_id, self.dispatcher.ws_server.connections) if not session: raise RpcException(errno.ENOENT, 'Session {0} not found'.format(session_id)) session.logout('Kicked out by {0}'.format(sender.user.name))
def split_contour(contour, max_angle): # сразу сравнивать с эталонным элипсом??? pts = contour.measurements().approx_points angles = contour.measurements().approx_points_angles mask = max_angle <= angles indexes = np.where(mask)[0] if indexes.size == 0: return [Polygon(pts)] parts = [] for i in range(len(indexes) - 1): # todo: м.б. формировать список уникальных контуров в этом цикле?? index = indexes[i] next_index = indexes[i + 1] if (next_index - index) < 2: # пропускаем фрагменты из 2 и менее точек continue part_pts = pts[index:next_index + 1] parts.append(Polygon(part_pts)) part_pts = np.append(pts[indexes[-1]:], pts[:indexes[0] + 1], axis=0) if len(part_pts) > 2: # пропускаем фрагменты из 2 и менее точек parts.append(Polygon(part_pts)) if len(parts) == 0: return [] # DEBUG: uncomment # _parts = [] # for p in parts: # if len(p.points) < 4: # _parts.append(p) # else: # splits = ContourSplitter.split_by_rotation_change_points(p.points) # _parts.extend([Polygon(pts) for pts in splits]) # parts = _parts ############################ unique_parts = [parts[0]] for i in range(1, len(parts)): part = parts[i] uq_part, uq_part_index = \ utils.first_or_default(unique_parts, lambda uniq_part: uniq_part.is_equivalent(part, 4)) if uq_part is None: unique_parts.append(part) elif part.arc_len > uq_part.arc_len: # compare sizes and use bigger part unique_parts[uq_part_index] = part return unique_parts
def get_connected_clients(self, share_name): result = [] for i in psutil.process_iter(): if i.name() != 'afpd': continue conns = filter(lambda c: c.pid == i.pid, psutil.net_connections('inet')) conn = first_or_default(lambda c: c.laddr[1] == 548, conns) if not conn: continue result.append({ 'host': conn.laddr[0], 'share': None, 'user': i.username() })
def details(name): account_name = session['account_name'] if 'account_name' in session else '' history = ReadHistory.query.filter( ReadHistory.account==account_name, ReadHistory.book==name).first() latest_read_chapter = history.latest_chapter if history else '' root = os.path.join(constants.BOOK_IMGS_PATH, name) chapters = [{ 'name': chapter_name, 'first_page' : utils.first_or_default( [img_name for img_name in sorted(os.listdir(os.path.join(root, chapter_name))) if os.path.isfile(os.path.join(root, chapter_name, img_name))], '_'), 'is_new' : chapter_name > latest_read_chapter } for chapter_name in reversed(sorted(os.listdir(root))) if os.path.isdir(os.path.join(root, chapter_name))] return render_template('books/details.html', book={ 'name' : name, 'chapters' : chapters})
async def remove_rank(self, server, rank_name, member): sranks = list(server.roles) badrank = first_or_default(sranks, lambda x: x.name == rank_name) ranks = [x for x in member.roles if x != badrank] await self.replace_roles(member, *ranks)
def run(self, name, updated_params): volume = self.datastore.get_one('volumes', ('name', '=', name)) if not volume: raise TaskException(errno.ENOENT, 'Volume {0} not found'.format(name)) if 'name' in updated_params: # Renaming pool. Need to export and import again using different name new_name = updated_params['name'] self.join_subtasks(self.run_subtask('zfs.pool.export', name)) self.join_subtasks(self.run_subtask('zfs.pool.import', volume['id'], new_name)) # Rename mountpoint self.join_subtasks(self.run_subtask('zfs.configure', new_name, new_name, { 'mountpoint': {'value': '{0}/{1}'.format(VOLUMES_ROOT, new_name)} })) volume['name'] = new_name self.datastore.update('volumes', volume['id'], volume) if 'topology' in updated_params: new_vdevs = {} updated_vdevs = [] params = {} subtasks = [] old_topology = self.dispatcher.call_sync( 'volumes.query', [('name', '=', name)], {'single': True, 'select': 'topology'} ) for group, vdevs in list(updated_params['topology'].items()): for vdev in vdevs: if 'guid' not in vdev: new_vdevs.setdefault(group, []).append(vdev) continue # look for vdev in existing configuration using guid old_vdev = first_or_default(lambda v: v['guid'] == vdev['guid'], old_topology[group]) if not old_vdev: raise TaskException(errno.EINVAL, 'Cannot extend vdev {0}: not found'.format(vdev['guid'])) if compare_vdevs(old_vdev, vdev): continue if old_vdev['type'] not in ('disk', 'mirror'): raise TaskException( errno.EINVAL, 'Cannot extend vdev {0}, {1} is not mirror or disk'.format( old_vdev['guid'], old_vdev['type'] ) ) if vdev['type'] != 'mirror': raise TaskException( errno.EINVAL, 'Cannot change vdev {0} type ({1}) to {2}'.format( old_vdev['guid'], old_vdev['type'], vdev['type'] ) ) if old_vdev['type'] == 'mirror' and vdev['type'] == 'mirror' and \ len(old_vdev['children']) + 1 != len(vdev['children']): raise TaskException( errno.EINVAL, 'Cannot extend mirror vdev {0} by more than one disk at once'.format(vdev['guid']) ) if old_vdev['type'] == 'disk' and vdev['type'] == 'mirror' and len(vdev['children']) != 2: raise TaskException( errno.EINVAL, 'Cannot extend disk vdev {0} by more than one disk at once'.format(vdev['guid']) ) updated_vdevs.append({ 'target_guid': vdev['guid'], 'vdev': vdev['children'][-1] }) for vdev, group in iterate_vdevs(new_vdevs): if vdev['type'] == 'disk': subtasks.append(self.run_subtask('disks.format.gpt', vdev['path'], 'freebsd-zfs', { 'blocksize': params.get('blocksize', 4096), 'swapsize': params.get('swapsize', 2048) if group == 'data' else 0 })) for vdev in updated_vdevs: subtasks.append(self.run_subtask('disks.format.gpt', vdev['vdev']['path'], 'freebsd-zfs', { 'blocksize': params.get('blocksize', 4096), 'swapsize': params.get('swapsize', 2048) })) self.join_subtasks(*subtasks) new_vdevs = convert_topology_to_gptids(self.dispatcher, new_vdevs) for vdev in updated_vdevs: vdev['vdev']['path'] = get_disk_gptid(self.dispatcher, vdev['vdev']['path']) self.join_subtasks(self.run_subtask( 'zfs.pool.extend', name, new_vdevs, updated_vdevs) )