Ejemplo n.º 1
0
	def __evUpdatedInfo(self):
		if self.newService and config.plugins.threed.autothreed.value != "0" and self.session.nav.getCurrentlyPlayingServiceReference():
			self.newService = False
			ref = self.session.nav.getCurrentService()
			serviceRef = self.session.nav.getCurrentlyPlayingServiceReference()
			spath = serviceRef.getPath()
			if spath:
				if spath[0] == '/':
					serviceHandler = eServiceCenter.getInstance()
					r = eServiceReference(ref.info().getInfoString(iServiceInformation.sServiceref))
					info = serviceHandler.info(r)
					if info:
						name = ServiceReference(info.getInfoString(r, iServiceInformation.sServiceref)).getServiceName().replace('\xc2\x86', '').replace('\xc2\x87', '')
					else:
						name = os_basename(spath) # filename
				else:
					name = serviceRef.getName() # partnerbox servicename
			else:
				name =  ServiceReference(ref.info().getInfoString(iServiceInformation.sServiceref)).getServiceName().replace('\xc2\x86', '').replace('\xc2\x87', '')
			if "3d" in name.lower():
				if config.plugins.threed.autothreed.value == "1":
					mode = THREE_D_SIDE_BY_SIDE
				else:
					mode = THREE_D_TOP_BOTTOM
			else:
				mode = THREE_D_OFF
			if self.lastmode != mode:
				switchmode(mode)
Ejemplo n.º 2
0
	def __evUpdatedInfo(self):
		if self.newService and config.plugins.threed.autothreed.value != "0" and self.session.nav.getCurrentlyPlayingServiceReference():
			self.newService = False
			ref = self.session.nav.getCurrentService() 
			serviceRef = self.session.nav.getCurrentlyPlayingServiceReference()
			spath = serviceRef.getPath()
			if spath:
				if spath[0] == '/':
					serviceHandler = eServiceCenter.getInstance()
					r = eServiceReference(ref.info().getInfoString(iServiceInformation.sServiceref))
					info = serviceHandler.info(r)
					if info:
						name = ServiceReference(info.getInfoString(r, iServiceInformation.sServiceref)).getServiceName().replace('\xc2\x86', '').replace('\xc2\x87', '')
					else:
						name = os_basename(spath) # filename
				else:
					name = serviceRef.getName() # partnerbox servicename
			else:
				name =  ServiceReference(ref.info().getInfoString(iServiceInformation.sServiceref)).getServiceName().replace('\xc2\x86', '').replace('\xc2\x87', '')
			if "3d" in name.lower():
				if config.plugins.threed.autothreed.value == "1":
					mode = THREE_D_SIDE_BY_SIDE
				else:
					mode = THREE_D_TOP_BOTTOM
			else:
				mode = THREE_D_OFF
			if self.lastmode != mode:
				switchmode(mode)
Ejemplo n.º 3
0
    def get_xsd_filepath(self, file_data):
        """提出者別タクソノミのxsdファイルパス取得"""
        # xsdファイル名取得
        element = self.root.find('.//%s' % self.link_schema_ref)

        if file_data is None:
            # 絶対パス生成
            return os_join(os_dirname(self.file), element.get(self.xlink_href))
        else:
            return os_basename(element.get(self.xlink_href))
Ejemplo n.º 4
0
    def __init__(self, filename, mode='r', depth=16, rate=44100, channels=2):
        """ AudioIO(filename, [mode='r', depth=16, rate=44100, channels=2])
        -> Open an audio file for file like access to audio files.

        """

        if not all([i in self._supported_modes for i in mode]):
            raise ValueError("(%s) Mode has to be one of %s." %
                             (self.__class__.__name__,
                             self._supported_modes))

        if depth not in self._valid_depth:
            raise ValueError("(%s) Invalid depth %s, valid depths are %s" %
                             (self.__class__.__name__,
                              depth,
                              self._valid_depth))

        super(AudioIO, self).__init__()

        if filename:
            if 'r' in mode and not os_isfile(filename):
                if not filename.startswith('http://'):
                    raise(IOError("%s: No such file or directory" % filename))

        self.three_byte = False

        # self._buffer_size = 8192  # 16384 // (depth // (8 // channels))
        self._buffer_size = 8192  # 16384 // (depth // (8 // channels))

        self._filename = filename
        self._mode = mode

        self._depth = depth
        self._rate = rate
        self._channels = channels
        self._floatp = False

        self._width = self._depth // 8

        self._length = 1

        self._bigendian = False
        self._unsigned = False

        self._loops = -1
        self._loop_count = 0

        self._closed = True

        # The default name is the filename minus the extension.
        name = os_basename(self._filename.rsplit('.', 1)[0])

        self._info_dict = {'name': name}
Ejemplo n.º 5
0
def _build_mod_list(mod_path):
    """ _build_mod_list(mod_path, suffix) -> Add all the paths in mod_path to
    sys.path and return a list of all modules in sys.path ending in suffix.

    """

    mod_path = [mod_path] if type(mod_path) is str else mod_path

    # Add the path of this file to the search path.
    mod_path.append(os_abspath(os_dirname(__file__)))

    # Build the list of modules in mod_path(s).
    mod_list = ('{0}.{1}.{2}'.format(os_basename(path), \
                    os_basename(root).replace(os_basename(path), ''), \
                    name.rsplit('.', 1)[0]).replace('..', '.') \
                    for path in mod_path \
                        if os_isdir(path) \
                            for root, dirs, files in os_walk(path) \
                                for name in files \
                                    if name.endswith('.py'))

    return mod_list
Ejemplo n.º 6
0
def _build_mod_list(mod_path: list) -> list:
    """ _build_mod_list(mod_path, suffix) -> Add all the paths in mod_path to
    sys.path and return a list of all modules in sys.path ending in suffix.

    """

    mod_path = [mod_path] if type(mod_path) is str else mod_path

    # Add the path of this file to the search path.
    mod_path.append(os_abspath(os_dirname(__file__)))

    # Build the list of modules in mod_path(s).
    mod_list = ('{0}.{1}.{2}'.format(os_basename(path), \
                    os_basename(root).replace(os_basename(path), ''), \
                    name.rsplit('.', 1)[0]).replace('..', '.') \
                    for path in mod_path \
                        if os_isdir(path) \
                            for root, dirs, files in os_walk(path) \
                                for name in files \
                                    if name.endswith('.py'))

    return mod_list
Ejemplo n.º 7
0
    def __init__(self, filename, mode='r', depth=16, rate=44100, channels=2):
        """ AudioIO(filename, [mode='r', depth=16, rate=44100, channels=2])
        -> Open an audio file for file like access to audio files.

        """

        if not all([i in self._supported_modes for i in mode]):
            raise ValueError("(%s) Mode has to be one of %s." %
                             (self.__class__.__name__, self._supported_modes))

        if depth not in self._valid_depth:
            raise ValueError(
                "(%s) Invalid depth %s, valid depths are %s" %
                (self.__class__.__name__, depth, self._valid_depth))

        super(AudioIO, self).__init__()

        if filename:
            if 'r' in mode and not os_isfile(filename):
                if not filename.startswith('http://'):
                    raise (IOError("%s: No such file or directory" % filename))

        self.three_byte = False

        # self._buffer_size = 8192  # 16384 // (depth // (8 // channels))
        self._buffer_size = 8192  # 16384 // (depth // (8 // channels))

        self._filename = filename
        self._mode = mode

        self._depth = depth
        self._rate = rate
        self._channels = channels
        self._floatp = False

        self._width = self._depth // 8

        self._length = 1

        self._bigendian = False
        self._unsigned = False

        self._loops = -1
        self._loop_count = 0

        self._closed = True

        # The default name is the filename minus the extension.
        name = os_basename(self._filename.rsplit('.', 1)[0])

        self._info_dict = {'name': name}
Ejemplo n.º 8
0
def get_xbrl_files(file, re_xbrl_file_match):
    """XBRLファイルデータ取得"""
    xbrl_files = OrderedDict()

    if not os_isfile(file):
        print('not found %s' % file)
        return xbrl_files

    # zipオブジェクト作成
    with ZipFile(file, 'r') as zip_obj:
        # ファイルリスト取得
        infos = zip_obj.infolist()

        # zipアーカイブから対象ファイルを読み込む
        od_xbrl = OrderedDict()
        for info in infos:
            filename = os_basename(info.filename)

            # zipからデータを読み込んで辞書に入れる
            if re_xbrl_file_match(info.filename):
                od_xbrl.update({filename: zip_obj.read(info.filename)})

    xbrl_files.update({'xbrl': od_xbrl})
    return xbrl_files
Ejemplo n.º 9
0
def get_codec(filename: str, mod_path: list = [], cached: bool = True, blacklist: list = []) -> AudioIO:
    """ get_codec(filename, mod_path=[], cached=True, blacklist=[]) -> Load the
    codecs in the path and return the first one that can play the file, or the
    one with the default attribute set.

        filename        The file the codec needs to handle
        mod_path        Additional search paths for modules
        cached          Use cached codecs if available
        blacklist       Modules not to load

    """

    # Codec cache dictionary
    global __codec_cache

    from urllib.parse import urlparse

    from .import_util import load_lazy_import, unload_lazy_import

    # Get the file extension.
    file_ext = os_splitext(filename)[1].lower()

    # Get protocol.
    file_prot = urlparse(filename).scheme

    if cached:
        # Load and already cached codec.
        if file_ext in __codec_cache:
            return __codec_cache[file_ext]
        elif file_prot in __codec_cache:
            return __codec_cache[file_prot]

    # Get a list of modules ending in '_file.py'
    mod_list = _build_mod_list(mod_path, "_file.py", blacklist)

    codec = None
    dummy = None

    # Make importing lazy.
    # load_lazy_import(mod_path=mod_path)

    # This packages name.
    this_pkgname = __name__.split(".", 1)[0]

    # Load the codec module that can handle file.
    for path, name in mod_list:
        # Get the package name from path.
        pkgname = os_basename(path.rstrip("/"))

        # Import the package if it is different from this one.
        if pkgname != this_pkgname and pkgname:
            try:
                __import__(pkgname)
            except ImportError as err:
                continue

        # Load the module.
        try:
            module = import_module(".%s" % os_splitext(name)[0], pkgname)
        except ImportError as err:
            print("Skipping module: (%s) because of error: %s" % (name, err))
            continue

        # Get the filetypes and handler from module.
        supported_dict = getattr(module, "__supported_dict", {})

        # Get the handler.
        handler = getattr(module, supported_dict.get("handler", "dummy"), None)

        # Don't even check this module if it does not have a handler.
        if not handler:
            continue

        # Try not to use the dummy handler.
        if "dummy" in name:
            dummy = handler
            continue

        # Check the module dependencies.
        dependencies = supported_dict.get("dependencies", {})
        if not _check_dependencies(dependencies):
            continue

        issupported = supported_dict.get("issupported", lambda *a: False)
        ext = supported_dict.get("ext", [])
        protocol = supported_dict.get("protocol", [])

        default = supported_dict.get("default", False)

        # Add filetype handlers to the codec cache.
        __codec_cache.update(((key, handler) for key in ext))

        # Add protocol handlers to the codec cache.
        __codec_cache.update(((key, handler) for key in protocol))

        # Check if filename is supported.
        if issupported(filename) or file_ext in ext or file_prot in protocol:
            codec = handler
            if default:
                break
        elif not codec and ".*" in ext:
            codec = handler

    # Turn off lazy imports.
    # unload_lazy_import()

    # No codec was found so default to the dummy codec.
    if not codec:
        codec = dummy

    return codec
Ejemplo n.º 10
0
def get_io(fileobj: AudioIO, mod_path: list = [], cached: bool = True, blacklist: list = []) -> DevIO:
    """ get_io(fileobj, mod_path=[], cached=True, blacklist=[]) -> Finds a
    audio device that can take the data read from fileobj and returns it.

    """

    # IO device cache dictionary
    global __io_cache

    from .import_util import load_lazy_import, unload_lazy_import

    # Get the file input data type.
    annotations = getattr(getattr(fileobj, "read"), "__annotations__", {})
    file_input = annotations.get("return", str)

    # Get the file output data type.
    annotations = getattr(getattr(fileobj, "write"), "__annotations__", {})
    file_output = annotations.get("data", str)

    if cached:
        # Load and already cached audio device.
        if file_input in __io_cache:
            return __io_cache[file_input]
        elif file_output in __io_cache:
            return __io_cache[file_output]

    # Get a list of modules ending in '_io.py'
    mod_list = _build_mod_list(mod_path, "_io.py", blacklist)

    device = None
    dummy = None

    # Make importing lazy.
    # load_lazy_import(mod_path=mod_path)

    # This packages name.
    this_pkgname = __name__.split(".", 1)[0]

    # Load the codec module that can handle file.
    for path, name in mod_list:
        # Get the package name from path.
        pkgname = os_basename(path.rstrip("/"))

        # Import the package if it is different from this one.
        if pkgname != this_pkgname and pkgname:
            try:
                __import__(pkgname)
            except ImportError as err:
                continue

        # Load the module.
        module = import_module(".%s" % os_splitext(name)[0], pkgname)

        # Get the filetypes and handler from module.
        supported_dict = getattr(module, "__supported_dict", {})

        handler = getattr(module, supported_dict.get("handler", "dummy"), None)

        if not handler:
            continue

        # Try not to use the dummy.
        if "dummy" in name:
            dummy = handler
            continue

        # Check the module dependencies.
        dependencies = supported_dict.get("dependencies", {})
        if not _check_dependencies(dependencies):
            continue

        input_t = supported_dict.get("input", [])
        output_t = supported_dict.get("output", [])

        default = supported_dict.get("default", False)

        # Add device input to io cache
        __io_cache.update(((key, handler) for key in input_t))

        # Add device output to io cache.
        __io_cache.update(((key, handler) for key in output_t))

        # Check if filename is supported.
        if "r" in fileobj.mode and file_input in output_t:
            device = handler
            if default:
                break
        elif "w" in fileobj.mode and file_output in input_t:
            device = handler
            if default:
                break

    # Turn off lazy imports.
    # unload_lazy_import()

    # No device was found so use the dummy_io.
    if not device:
        device = dummy

    return device
Ejemplo n.º 11
0
def main(args):
    """ Encode args['filename'] times.

    """

    from os.path import basename as os_basename
    from os.path import isfile as os_isfile
    from os.path import splitext as os_splitext
    from sys import stdin as sys_stdin
    from sys import stdout as sys_stdout
    from select import select
    from time import sleep as time_sleep
    from termios import tcgetattr, tcsetattr, ECHO, ICANON, TCSANOW
    from termios import VMIN, VTIME

    from musio import open_file, open_device

    if args['debug']:
        from musio import io_util
        io_util.DEBUG = True

    filename = args['filename']
    output = os_splitext(os_basename(filename))[0] + '.' + args['filetype']
    output_bytes = output.encode('utf-8', 'surrogateescape')
    output_printable = output_bytes.decode('utf-8', 'ignore')
    if os_isfile(output):
        if raw_input("Overwrite %s (y/n): " % output_printable).lower().startswith('n'):
            return

    # Save the current terminal state.
    normal = tcgetattr(sys_stdin)
    quiet = tcgetattr(sys_stdin)

    # Do not wait for key press and don't echo.
    quiet[3] &= ~(ECHO | ICANON)
    quiet[6][VMIN] = 0
    quiet[6][VTIME] = 0

    # Set the new terminal state.
    tcsetattr(sys_stdin, TCSANOW, quiet)

    # Value returned to tell the calling function whether to quit or
    # not.
    quit_val = True

    if args['filetype'].lower() == 'ogg':
        quality = args['quality'] / 10 if args['quality'] in range(-1, 11) else 0.5
    elif args['filetype'].lower() == 'mp3':
        quality = args['quality'] if args['quality'] in range(0, 10) else 2

    try:
        with open_file(**args) as in_file:
            in_file_title = in_file._info_dict.get('title',
                                                in_file._info_dict['name'])
            comment_dict = {'title': in_file_title}
            comment_dict.update(in_file._info_dict)
            for i in ['title', 'artist', 'album', 'year', 'comment',
                      'track', 'genre']:
                if args.get(i, ''):
                    comment_dict[i] = args[i]

            with open_file(output, 'w', depth=in_file.depth, rate=in_file.rate,
                        channels=in_file.channels, quality=quality,
                        comment_dict=comment_dict) as out_file:
                in_file.loops = 0

                if args['show_position']:
                    filename_bytes = filename.encode('utf-8', 'surrogateescape')
                    filename_printable = filename_bytes.decode('utf-8', 'ignore')
                    print("Encoding: %s to %s" % (filename, output))
                    print(in_file)

                for data in in_file:
                    if args['show_position']:
                        if in_file.length > 0:
                            # Calculate the percentage played.
                            pos = (in_file.position * 100) / float(in_file.length)

                            # Make the string.
                            pos_str = 'Position: %.2f%%' % pos

                            # Find the length of the string.
                            format_len = len(pos_str) + 2

                            # Print the string and after erasing the old
                            # one using ansi escapes.
                            print('\033[%dD\033[K%s' % (format_len, pos_str),
                                  end='')
                            sys_stdout.flush()
                    out_file.write(data)

                    # Check for input.
                    r, _, _ = select([sys_stdin], [], [], 0)

                    # Get input if there was any otherwise continue.
                    if r:
                        command = r[0].readline().lower()
                        # Handle input commands.
                        if command.startswith('q'):
                            quit_val = False
                            break
                        elif command == '\n':
                            break

    except Exception as err:
        print("Error: %s" % err)
        raise(err)
    finally:
        # Re-set the terminal state.
        tcsetattr(sys_stdin, TCSANOW, normal)

    if args['show_position']:
        print("\nDone.")

    return quit_val
Ejemplo n.º 12
0
    def __init__(self, file, file_data):
        self.file = file

        # ファイル名解析
        self.info = self.parse_filename(os_basename(self.file))

        # XBRLファイル読み込み
        self.root = get_etree_obj_from_file(self.file, file_data)
        self.nsmap = self.root.nsmap
        self.ns_prefixes = {v: k for (k, v) in self.nsmap.items()}

        # 名前空間(NameSpace)の定義を取得
        ns_def = xbrl_namespace.NS_INSTANCE_20180228

        # 名前空間 DEI語彙スキーマ (管理情報)
        self.ns_dei = None

        # 名前空間 企業内容等の開示に関する内閣府令 (表紙・サマリ・本文など)
        self.ns_crp = None

        # 名前空間 日本基準財務諸表のうち本表に係る部分 (財務諸表)
        self.ns_pfs = None

        # 名前空間 提出者別タクソノミ
        self.ns_self = None

        # 勘定科目などを定義している名前空間を取得
        ns_list = []
        for (ns_prefix, ns) in self.nsmap.items():
            if ns_def['jpdei_cor'](ns):
                ns_list.append((0, ns))
                self.ns_dei = ns
            elif ns_def['jpcrp_cor'](ns):
                ns_list.append((1, ns))
                self.ns_crp = ns
            elif ns_def['jppfs_cor'](ns):
                ns_list.append((2, ns))
                self.ns_pfs = ns
            elif ns_def['self'](ns_prefix):
                ns_list.append((3, ns))
                self.ns_self = ns

        # 管理情報(dei)が上に来るとデバッグし易かったのでソート
        ns_list.sort(key=lambda x: x[0], reverse=False)

        # タグ名/属性名定義
        self.link_schema_ref = '{%s}schemaRef' % ns_def['link']
        self.xlink_href = '{%s}href' % ns_def['xlink']
        self.xbrli_context = '{%s}context' % ns_def['xbrli']
        self.xbrli_entity = '{%s}entity' % ns_def['xbrli']
        self.xbrli_identifier = '{%s}identifier' % ns_def['xbrli']
        self.xbrli_period = '{%s}period' % ns_def['xbrli']
        self.xbrli_start_date = '{%s}startDate' % ns_def['xbrli']
        self.xbrli_end_date = '{%s}endDate' % ns_def['xbrli']
        self.xbrli_instant = '{%s}instant' % ns_def['xbrli']
        self.xbrli_scenario = '{%s}scenario' % ns_def['xbrli']
        self.xbrldi_explicit_member = '{%s}explicitMember' % ns_def['xbrldi']
        self.xsi_nil = '{%s}nil' % ns_def['xsi']

        # xsdファイルパス取得
        self.xsd = self.get_xsd_filepath(file_data)

        # コンテキストタグ(日付情報)取得
        self.context_tags = self.get_context_tags()

        # 管理情報・財務諸表データ取得
        self.xbrl_datas = []
        for (number, ns) in ns_list:
            self.xbrl_datas.append((ns, self.get_xbrl_datas(ns)))

        # 変数削除
        del self.root
        return
Ejemplo n.º 13
0
def get_io(fileobj, mod_path=[], cached=True, blacklist=[]):
    """ get_io(fileobj, mod_path=[], cached=True, blacklist=[]) -> Finds a
    audio device that can take the data read from fileobj and returns it.

    """

    # IO device cache dictionary
    global __io_cache

    from .import_util import load_lazy_import, unload_lazy_import

    # Get the file input data type.
    annotations = getattr(getattr(fileobj, 'read'), '__annotations__', {})
    file_input = annotations.get('return', unicode)

    # Get the file output data type.
    annotations = getattr(getattr(fileobj, 'write'), '__annotations__', {})
    file_output = annotations.get('data', unicode)

    if cached:
        # Load and already cached audio device.
        if file_input in __io_cache:
            return __io_cache[file_input]
        elif file_output in __io_cache:
            return __io_cache[file_output]

    # Get a list of modules ending in '_io.py'
    mod_list = _build_mod_list(mod_path, '_io.py', blacklist)

    device = None
    dummy = None

    # Make importing lazy.
    # load_lazy_import(mod_path=mod_path)

    # This packages name.
    this_pkgname = __name__.split('.', 1)[0]

    # Load the codec module that can handle file.
    for path, name in mod_list:
        # Get the package name from path.
        pkgname = os_basename(path.rstrip('/'))

        # Import the package if it is different from this one.
        if pkgname != this_pkgname and pkgname:
            try:
                __import__(pkgname)
            except ImportError as err:
                continue

        # Load the module.
        module = import_module('.%s' % os_splitext(name)[0], pkgname)

        # Get the filetypes and handler from module.
        supported_dict = getattr(module, '__supported_dict', {})

        handler = getattr(module, supported_dict.get('handler', 'dummy'), None)

        if not handler:
            continue

        # Try not to use the dummy.
        if 'dummy' in name:
            dummy = handler
            continue

        # Check the module dependencies.
        dependencies = supported_dict.get('dependencies', {})
        if not _check_dependencies(dependencies):
            continue

        input_t = supported_dict.get('input', [])
        output_t = supported_dict.get('output', [])

        default = supported_dict.get('default', False)

        # Add device input to io cache
        __io_cache.update(((key, handler) for key in input_t))

        # Add device output to io cache.
        __io_cache.update(((key, handler) for key in output_t))

        # Check if filename is supported.
        if 'r' in fileobj.mode and file_input in output_t:
            device = handler
            if default: break
        elif 'w' in fileobj.mode and file_output in input_t:
            device = handler
            if default: break

    # Turn off lazy imports.
    # unload_lazy_import()

    # No device was found so use the dummy_io.
    if not device: device = dummy

    return device
Ejemplo n.º 14
0
def basename(path):
    """
  Returns the base name of |path|, not including the extension
  """
    return splitext(os_basename(path))[0]
Ejemplo n.º 15
0
    def __init__(self, file, file_data, org_file_name):
        self.file = file

        # ファイル名解析
        self.info = self.parse_filename(os_basename(org_file_name))
        if self.info['報告書'] is None:
            # 再解析
            if 'E25850-' in org_file_name:
                self.info = self.parse_filename_e25850(os_basename(org_file_name))

        # XBRLファイル読み込み
        self.root = get_etree_obj_from_file(self.file, file_data)
        self.nsmap = self.root.nsmap
        self.ns_prefixes = {v: k for (k, v) in self.nsmap.items()}

        # 名前空間 文書情報タクソノミ
        self.ns_di = None

        # 名前空間 企業別タクソノミ
        self.ns_self = None

        # 名前空間 IFRS
        self.ns_ifrs = None

        # 名前空間 その他スキーマ
        self.ns_jpfr_oe = None

        # 名前空間 xbrldi
        self.ns_xbrldi = None

        # 勘定科目などを定義している名前空間を取得
        ns_list = []
        if self.info['会計基準'] == 'jpfr':
            # 名前空間(NameSpace)の定義を取得
            ns_def = xbrl_namespace.NS_INSTANCE_20130301

            for (ns_prefix, namespace) in self.nsmap.items():
                if ns_def['jpfr-di'](namespace):
                    ns_list.append((0, namespace))
                    self.ns_di = namespace
                elif re_match('^jpfr-t-[a-z]*$', ns_prefix):
                    ns_list.append((1, namespace))
                elif ns_def['self'](ns_prefix):
                    ns_list.append((2, namespace))
                    self.ns_self = namespace
                elif ns_def['jpfr-oe'](namespace):
                    self.ns_jpfr_oe = namespace

            ns_list.sort(key=lambda x: (x[0], x[1]), reverse=False)

        elif self.info['会計基準'] == 'ifrs':
            # 名前空間(NameSpace)の定義を取得
            ns_def = xbrl_namespace.NS_INSTANCE_IFRS_20130301

            for (ns_prefix, namespace) in self.nsmap.items():
                if ns_def['ifrs'](namespace):
                    ns_list.append((0, namespace))
                    self.ns_ifrs = namespace
                elif ns_def['self'](ns_prefix):
                    ns_list.append((1, namespace))
                    self.ns_self = namespace
                elif ns_def['xbrldi'] == namespace:
                    self.ns_xbrldi = namespace

            ns_list.sort(key=lambda x: (x[0], x[1]), reverse=False)

        else:
            print('会計基準の判定失敗')
            raise

        # タグ名/属性名定義
        self.link_schema_ref = '{%s}schemaRef' % ns_def['link']
        self.xlink_href = '{%s}href' % ns_def['xlink']
        self.xbrli_context = '{%s}context' % ns_def['xbrli']
        self.xbrli_entity = '{%s}entity' % ns_def['xbrli']
        self.xbrli_identifier = '{%s}identifier' % ns_def['xbrli']
        self.xbrli_period = '{%s}period' % ns_def['xbrli']
        self.xbrli_start_date = '{%s}startDate' % ns_def['xbrli']
        self.xbrli_end_date = '{%s}endDate' % ns_def['xbrli']
        self.xbrli_instant = '{%s}instant' % ns_def['xbrli']
        self.xbrli_scenario = '{%s}scenario' % ns_def['xbrli']
        self.jpfr_oe_non_consolidated = '{%s}NonConsolidated' % self.ns_jpfr_oe if self.ns_jpfr_oe else None
        self.xbrldi_explicit_member = '{%s}explicitMember' % self.ns_xbrldi if self.ns_xbrldi else None
        self.xsi_nil = '{%s}nil' % ns_def['xsi']

        # xsdのファイルパスと名前空間を取得
        self.xsd = self.get_xsd_filepath(file_data)

        # コンテキストタグ(日付情報)取得
        self.context_tags = self.get_context_tags()

        # 管理情報・財務諸表データ取得
        self.xbrl_datas = []
        for (number, ns) in ns_list:
            self.xbrl_datas.append((ns, self.get_xbrl_datas(ns)))

        # 変数削除
        del self.root
        return
Ejemplo n.º 16
0
def get_codec(filename, mod_path=[], cached=True, blacklist=[]):
    """ get_codec(filename, mod_path=[], cached=True, blacklist=[]) -> Load the
    codecs in the path and return the first one that can play the file, or the
    one with the default attribute set.

        filename        The file the codec needs to handle
        mod_path        Additional search paths for modules
        cached          Use cached codecs if available
        blacklist       Modules not to load

    """

    # Codec cache dictionary
    global __codec_cache

    from urlparse import urlparse

    from .import_util import load_lazy_import, unload_lazy_import

    # Get the file extension.
    file_ext = os_splitext(filename)[1].lower()

    # Get protocol.
    file_prot = urlparse(filename).scheme

    if cached:
        # Load and already cached codec.
        if file_ext in __codec_cache:
            return __codec_cache[file_ext]
        elif file_prot in __codec_cache:
            return __codec_cache[file_prot]

    # Get a list of modules ending in '_file.py'
    mod_list = _build_mod_list(mod_path, '_file.py', blacklist)

    codec = None
    dummy = None

    # Make importing lazy.
    # load_lazy_import(mod_path=mod_path)

    # This packages name.
    this_pkgname = __name__.split('.', 1)[0]

    # Load the codec module that can handle file.
    for path, name in mod_list:
        # Get the package name from path.
        pkgname = os_basename(path.rstrip('/'))

        # Import the package if it is different from this one.
        if pkgname != this_pkgname and pkgname:
            try:
                __import__(pkgname)
            except ImportError as err:
                continue

        # Load the module.
        try:
            module = import_module('.%s' % os_splitext(name)[0], pkgname)
        except ImportError as err:
            print("Skipping module: (%s) because of error: %s" % (name, err))
            continue

        # Get the filetypes and handler from module.
        supported_dict = getattr(module, '__supported_dict', {})

        # Get the handler.
        handler = getattr(module, supported_dict.get('handler', 'dummy'), None)

        # Don't even check this module if it does not have a handler.
        if not handler:
            continue

        # Try not to use the dummy handler.
        if 'dummy' in name:
            dummy = handler
            continue

        # Check the module dependencies.
        dependencies = supported_dict.get('dependencies', {})
        if not _check_dependencies(dependencies):
            continue

        issupported = supported_dict.get('issupported', lambda *a: False)
        ext = supported_dict.get('ext', [])
        protocol = supported_dict.get('protocol', [])

        default = supported_dict.get('default', False)

        # Add filetype handlers to the codec cache.
        __codec_cache.update(((key, handler) for key in ext))

        # Add protocol handlers to the codec cache.
        __codec_cache.update(((key, handler) for key in protocol))

        # Check if filename is supported.
        if issupported(filename) or file_ext in ext or file_prot in protocol:
            codec = handler
            if default: break
        elif not codec and '.*' in ext:
            codec = handler

    # Turn off lazy imports.
    # unload_lazy_import()

    # No codec was found so default to the dummy codec.
    if not codec: codec = dummy

    return codec
Ejemplo n.º 17
0
def _get_xbrl_datas(xbrl_file, xbrl_file_data):
    """データ取得"""

    # xbrlファイル読み込み
    if RE_XBRL_P_V1_MATCH(os_basename(xbrl_file)):
        # 旧 EDINET XBRL
        # print(xbrl_file)
        xbrl = xbrl_jpfr_Parser(xbrl_file, xbrl_file_data)
        xbrl_ver = 1
    elif RE_XBRL_P_V2_MATCH(os_basename(xbrl_file)):
        # print(xbrl_file)
        xbrl = xbrl_jpcor_Parser(xbrl_file, xbrl_file_data)
        xbrl_ver = 2
    else:
        # 監査報告書のXBRLが該当(jpaud-***.xbrl)
        # print('未対応のファイル名 %s' % xbrl_file)
        return None

    # データをリストに変換
    data_labels = [
        'version',
        '提出日',
        '提出回数',
        '報告対象期間期末日',
        '追番',
        '第N期',
        '名前空間接頭辞',
        'tag',
        'id',
        'context',
        '開始日',
        '終了日',
        '期末日',
        '連結',
        '値',
    ]

    context_tags = xbrl.context_tags

    xbrl_infos = [
        xbrl_ver,
        xbrl.info['提出日'],
        xbrl.info['提出回数'],
        xbrl.info['報告対象期間期末日'],
        xbrl.info['追番'],
        xbrl.info['第N期'],
    ]

    datas = []
    datas_append = datas.append

    xbrl_standard = xbrl.info['会計基準'] if '会計基準' in xbrl.info else None

    # xbrl.xbrl_datasの種類(namespaceに対応する接頭辞)
    # 管理情報(jpfr-di, ifrs, jpdei_cor)
    # 表紙・サマリ・本文など(jpcrp_cor)
    # 財務諸表(jpfr-t-***, ifrs, jppfs_cor)
    # 提出者別タクソノミ(*E00000*)
    for (namespace, xbrl_data) in xbrl.xbrl_datas:

        # キーのタプル(タグ名・コンテキスト・ID)
        # 値の辞書(属性・テキスト)
        for ((t_tag, t_context_ref, t_id), v) in xbrl_data.items():

            # タグ名から名前空間を分離 & 接頭辞に変換
            (t_ns, t_tag_name) = t_tag.rsplit('}', maxsplit=1)
            try:
                datas_append(
                    # XBRLバージョンと文書情報
                    xbrl_infos +

                    # 名前空間接頭辞 タグ名 id属性 コンテキスト
                    [
                        xbrl.ns_prefixes[t_ns.lstrip('{')],
                        t_tag_name,
                        t_id,
                        t_context_ref,
                    ] +

                    # 開始日 終了日 期末日
                    _get_dates(context_tags[t_context_ref]['period']) +

                    # 連結区分 型変換した値
                    [
                        _get_consolidated_or_nonconsolidated(
                            context_tags[t_context_ref], xbrl_ver,
                            xbrl_standard),
                        conv_str_to_num(v['text']),
                    ])
            except:
                print(format_exc())
    del (xbrl, xbrl_infos, context_tags)

    # データフレームに変換
    df = pd_DataFrame(datas, columns=data_labels)
    del (datas, data_labels)

    def df_conv_str_to_datetime(t_colulmn_name):
        """文字列 -> 日付変換"""
        try:
            df[t_colulmn_name] = pd_to_datetime(df[t_colulmn_name])
        except (TypeError, ValueError):
            print('変換エラー %s conv_str_to_num で再試行' % t_colulmn_name)
            df[t_colulmn_name] = df[t_colulmn_name].apply(conv_str_to_num)
        return

    for colulmn_name in ('提出日', '開始日', '終了日', '期末日'):
        df_conv_str_to_datetime(colulmn_name)

    return df
Ejemplo n.º 18
0
def basename(path):
  """
  Returns the base name of |path|, not including the extension
  """
  return splitext(os_basename(path))[0]
Ejemplo n.º 19
0
    def __init__(self,
                 filename: str,
                 mode: str = 'r',
                 depth: int = 16,
                 rate: int = 44100,
                 channels: int = 2,
                 bigendian: bool = False,
                 unsigned: bool = False,
                 **kwargs):
        """Loads and play all filetypes supported.

        Load the correct codec for the file and acts as a wrapper providing
        additional funcionality.
        """
        codec = get_codec(filename, blacklist=[os_basename(__file__)])

        self._supported_modes = getattr(codec, '_supported_modes', 'r')

        source = codec(filename, mode=mode, **kwargs)

        super(AllFile, self).__init__(filename, mode, source.depth,
                                      source.rate, source.channels)

        self._source = source

        self._bigendian = bigendian
        self._unsigned = unsigned

        self._state = None

        annotations = getattr(codec.read, '__annotations__')
        self.read.__annotations__.update(annotations)

        self._buffer = annotations.get('return', bytes)()
        self._buffer_size = self._source.buffer_size

        self._length = self._source.length
        self._info_dict = self._source._info_dict
        self.write = self._source.write

        self._closed = False

        if self._depth != self._source.depth:
            self._convert_depth = lambda data: \
                audioop.lin2lin(data, self._source._width, self._width)
        else:
            self._convert_depth = lambda data: data

        if self._unsigned != self._source.unsigned:
            self._convert_unsigned = lambda data: \
                audioop.bias(data, self._source._width, 128)
        else:
            self._convert_unsigned = lambda data: data

        # Make it stereo
        if self._source.channels < self._channels:
            self._convert_channels = lambda data: audioop.tostereo(
                data, self._width, 1, 1)
        # Make it mono
        elif self._source.channels > self._channels:
            self._convert_channels = lambda data: audioop.tomono(
                data, self._width, 1, 1)
        else:
            self._convert_channels = lambda data: data

        # Convert the sample rate of the data to the requested rate.
        if self._rate != self._source.rate:
            self._convert_rate = lambda data: audioop.ratecv(
                data, self._width, self._channels, self._source.rate, self.
                _rate, self._state)
        else:
            self._convert_rate = lambda data: (data, self._state)

        if self._bigendian != self._source.bigendian:
            self._convert_endian = swap_endian
        else:
            self._convert_endian = lambda data: data
Ejemplo n.º 20
0
    def getSlice(self, index, sliceZ, sliceZ2=0):
        """Retrieve a slice of a dataset from the DM file. The data set will have a shape according to
        3D = [sliceZ,Y,X] or 4D: [sliceZ2,sliceZ,Y,X]

        Note: Most DM3 and DM4 files contain a small "thumbnail" as the first dataset written as RGB data. This function ignores that dataset if it exists. To retrieve the thumbnail use the getThumbnail() function.

        Warning: DM4 files with 4D data sets are written as [X,Y,Z1,Z2]. This code currently gets the [X,Y] slice.
        Getting the [Z1,Z2] slice is not yet implemented. Use the getMemmap() function to retrieve arbitrary slices of
        large data sets.

        Parameters
        ----------
            index : int
                The number of the dataset in the DM file.
            sliceZ : int
                The slice to get along the first dimension (C-ordering)
                for 3D datasets or 4D datasets.
            sliceZ2 : int
                For 4D dataset

        Returns
        -------
            : dict
                A dictionary containing meta data and the data.
        """
        # The first dataset is usually a thumbnail. Test for this and skip the thumbnail automatically
        if self.numObjects == 1:
            ii = index
        else:
            ii = index + 1

        # Check that the dataset exists.
        try:
            self._checkIndex(ii)
        except:
            raise

        # Check sliceZ and sliceZ2 are within the data array size bounds
        if sliceZ > (self.zSize[ii] - 1):
            raise IndexError(
                'Index out of range, trying to access element {} of {} valid elements'
                .format(sliceZ, self.zSize))
        if sliceZ2 > (self.zSize2[ii] - 1):
            raise IndexError(
                'Index out of range, trying to access element {} of {} valid elements'
                .format(sliceZ2, self.zSize2))

        self.seek(self.fid, self.dataOffset[ii],
                  0)  # Seek to start of dataset from beginning of the file

        outputDict = {}
        outputDict['filename'] = os_basename(self.filename)

        # Parse the dataset to see what type it is (image, 3D image series, spectra, 4D, etc.)
        if self.xSize[ii] > 0:
            # determine the number of bytes to skip
            pixelCount = int(self.xSize[ii]) * int(self.ySize[ii])
            byteCount = pixelCount * np.dtype(
                self._DM2NPDataType(self.dataType[ii])).itemsize
            jj = 0  # counter to determine where the first scale value starts
            for nn in self.dataShape[0:ii]:
                jj += nn  # sum up all number of dimensions for previous datasets
            if self.zSize[ii] == 1:  # 2D data
                outputDict['data'] = self.fromfile(
                    self.fid,
                    count=pixelCount,
                    dtype=self._DM2NPDataType(self.dataType[ii])).reshape(
                        (self.ySize[ii], self.xSize[ii]))
            elif self.zSize2[ii] > 1:  # 4D data
                self.seek(self.fid, sliceZ * sliceZ2 * byteCount,
                          1)  # skip ahead from current position
                outputDict['data'] = self.fromfile(
                    self.fid,
                    count=pixelCount,
                    dtype=self._DM2NPDataType(self.dataType[ii])).reshape(
                        (self.ySize[ii], self.xSize[ii]))
            else:  # 3D array
                self.seek(self.fid, sliceZ * byteCount,
                          1)  # skip ahead from current position
                outputDict['data'] = self.fromfile(
                    self.fid,
                    count=pixelCount,
                    dtype=self._DM2NPDataType(self.dataType[ii])).reshape(
                        (self.ySize[ii], self.xSize[ii]))

            # Return the proper meta data for this one image
            # need to reverse the order to match the C-ordering of the data
            outputDict['pixelUnit'] = self.scaleUnit[jj:jj + 2][::-1]
            outputDict['pixelSize'] = self.scale[jj:jj + 2][::-1]
            outputDict['pixelOrigin'] = self.origin[jj:jj + 2][::-1]

        # Ensure the data is loaded into memory from the buffer
        if self._on_memory:
            outputDict['data'] = np.array(outputDict['data'])

        return outputDict
Ejemplo n.º 21
0
    def getDataset(self, index):
        """Retrieve a dataset from the DM file.

        Notes
        -----
            Most DM3 and DM4 files contain a small "thumbnail" as the first dataset written as RGB data. This
            function ignores that dataset if it exists. To retrieve the thumbnail use the getThumbnail() function.

            The pixelOrigin returned is not actually the start of the coordinates. The start of the energy axis
            for EELS (for example) will be pixelSize * pixelOrigin. dmReader() returns the correct coordinates.
            The correct origin is: pixelSize * pixelOrigin and be careful about the sign as it seems some datasets
            might use -pixelOrigin in the previous equation.

        Parameters
        ----------
            index : int
                The number of the data set to retrieve ignoring the thumbnail. If a thumbnail exists then index = 0
                actually corresponds to the second data set in a DM file.

        Returns
        -------
            : dict
                A dictionary of the data and meta data. The data is associated
                with the 'data' key in the dictionary.

        """
        # The first dataset is usually a thumbnail. Test for this and skip the thumbnail automatically
        if self.numObjects == 1:
            ii = index
        else:
            ii = index + 1

        # Check that the dataset exists.
        try:
            self._checkIndex(ii)
        except:
            raise

        self.seek(self.fid, self.dataOffset[ii],
                  0)  # Seek to start of dataset from beginning of the file

        outputDict = {}

        outputDict['filename'] = os_basename(self.filename)

        # Parse the dataset to see what type it is (image, image series, spectra, etc.)
        if self.xSize[ii] > 0:
            pixelCount = int(self.xSize[ii]) * int(self.ySize[ii]) * int(
                self.zSize[ii]) * int(self.zSize2[ii])
            jj = 0  # counter to determine where the first scale value starts
            for nn in self.dataShape[0:ii]:
                jj += nn  # sum up all number of dimensions for previous datasets
            # if self.dataType == 23: #RGB image(s)
            #    temp = self.fromfile(self.fid,count=pixelCount,dtype=np.uint8).reshape(self.ysize[ii],self.xsize[ii])
            if self.zSize[ii] == 1:
                # 2D data and 1D spectra
                outputDict['data'] = self.fromfile(
                    self.fid,
                    count=pixelCount,
                    dtype=self._DM2NPDataType(self.dataType[ii])).reshape(
                        (self.ySize[ii], self.xSize[ii]))

                # Reverse the order to match the C-ordering of the data
                outputDict['pixelUnit'] = self.scaleUnit[jj:jj + self.
                                                         dataShape[ii]][::-1]
                outputDict['pixelSize'] = self.scale[jj:jj +
                                                     self.dataShape[ii]][::-1]
                outputDict['pixelOrigin'] = self.origin[jj:jj + self.
                                                        dataShape[ii]][::-1]

                # Match size of meta data if necessary
                if outputDict['data'].ndim > len(outputDict['pixelOrigin']):
                    outputDict['data'] = np.squeeze(outputDict['data'])
            elif self.zSize2[ii] > 1:  # 4D data
                outputDict['data'] = self.fromfile(
                    self.fid,
                    count=pixelCount,
                    dtype=self._DM2NPDataType(self.dataType[ii])).reshape(
                        (self.zSize2[ii], self.zSize[ii], self.ySize[ii],
                         self.xSize[ii]))
                # Reverse the order to match the C-ordering of the data
                outputDict['pixelUnit'] = self.scaleUnit[jj:jj + self.
                                                         dataShape[ii]][::-1]
                outputDict['pixelSize'] = self.scale[jj:jj +
                                                     self.dataShape[ii]][::-1]
                outputDict['pixelOrigin'] = self.origin[jj:jj + self.
                                                        dataShape[ii]][::-1]
            else:  # 3D array
                outputDict['data'] = self.fromfile(
                    self.fid,
                    count=pixelCount,
                    dtype=self._DM2NPDataType(self.dataType[ii])).reshape(
                        (self.zSize[ii], self.ySize[ii], self.xSize[ii]))
                # Reverse the order to match the C-ordering of the data
                outputDict['pixelUnit'] = self.scaleUnit[jj:jj + self.
                                                         dataShape[ii]][::-1]
                outputDict['pixelSize'] = self.scale[jj:jj +
                                                     self.dataShape[ii]][::-1]
                outputDict['pixelOrigin'] = self.origin[jj:jj + self.
                                                        dataShape[ii]][::-1]

        # Ensure the data is loaded into memory from the buffer
        if self._on_memory:
            outputDict['data'] = np.array(outputDict['data'])

        # Remove singular dimensions if needed
        #outputDict['data'] = np.squeeze(outputDict['data'])
        return outputDict
Ejemplo n.º 22
0
def main(args: dict) -> None:
    """ Encode args['filename'] times.

    """

    from os.path import basename as os_basename
    from os.path import isfile as os_isfile
    from os.path import splitext as os_splitext
    from sys import stdin as sys_stdin
    from select import select
    from time import sleep as time_sleep
    from termios import tcgetattr, tcsetattr, ECHO, ICANON, TCSANOW
    from termios import VMIN, VTIME

    from musio import open_file, open_device

    if args['debug']:
        from musio import io_util
        io_util.DEBUG = True

    filename = args['filename']
    output = os_splitext(os_basename(filename))[0] + '.' + args['filetype']
    output_bytes = output.encode('utf-8', 'surrogateescape')
    output_printable = output_bytes.decode('utf-8', 'ignore')
    if os_isfile(output):
        if input("Overwrite %s (y/n): " % output_printable).lower().startswith('n'):
            return

    # Save the current terminal state.
    normal = tcgetattr(sys_stdin)
    quiet = tcgetattr(sys_stdin)

    # Do not wait for key press and don't echo.
    quiet[3] &= ~(ECHO | ICANON)
    quiet[6][VMIN] = 0
    quiet[6][VTIME] = 0

    # Set the new terminal state.
    tcsetattr(sys_stdin, TCSANOW, quiet)

    # Value returned to tell the calling function whether to quit or
    # not.
    quit_val = True

    if args['filetype'].lower() == 'ogg':
        quality = args['quality'] / 10 if args['quality'] in range(-1, 11) else 0.5
    elif args['filetype'].lower() == 'mp3':
        quality = args['quality'] if args['quality'] in range(0, 10) else 2

    try:
        with open_file(**args) as in_file:
            in_file_title = in_file._info_dict.get('title',
                                                in_file._info_dict['name'])
            comment_dict = {'title': in_file_title}
            comment_dict.update(in_file._info_dict)
            for i in ['title', 'artist', 'album', 'year', 'comment',
                      'track', 'genre']:
                if args.get(i, ''):
                    comment_dict[i] = args[i]

            with open_file(output, 'w', depth=in_file.depth, rate=in_file.rate,
                        channels=in_file.channels, quality=quality,
                        comment_dict=comment_dict) as out_file:
                in_file.loops = 0

                if args['show_position']:
                    filename_bytes = filename.encode('utf-8', 'surrogateescape')
                    filename_printable = filename_bytes.decode('utf-8', 'ignore')
                    print("Encoding: %s to %s" % (filename_printable, output_printable))
                    print(in_file)

                for data in in_file:
                    if args['show_position']:
                        if in_file.length > 0:
                            # Calculate the percentage played.
                            pos = (in_file.position * 100) / in_file.length

                            # Make the string.
                            pos_str = 'Position: %.2f%%' % pos

                            # Find the length of the string.
                            format_len = len(pos_str) + 2

                            # Print the string and after erasing the old
                            # one using ansi escapes.
                            print('\033[%dD\033[K%s' % (format_len, pos_str),
                                  end='', flush=True)
                    out_file.write(data)

                    # Check for input.
                    r, _, _ = select([sys_stdin], [], [], 0)

                    # Get input if there was any otherwise continue.
                    if r:
                        command = r[0].readline().lower()
                        # Handle input commands.
                        if command.startswith('q'):
                            quit_val = False
                            break
                        elif command == '\n':
                            break

    except Exception as err:
        print("Error: %s" % err, flush=True)
        raise(err)
    finally:
        # Re-set the terminal state.
        tcsetattr(sys_stdin, TCSANOW, normal)

    if args['show_position']:
        print("\nDone.")

    return quit_val
Ejemplo n.º 23
0
    def __init__(self, filename, mode='r', depth=16, rate=44100, channels=2,
                 bigendian=False, unsigned=False, **kwargs):
        """ AllFile(self, filename, mode='r', depth=16, rate=44100, channels=2,
                    bigendian=False, unsigned=False, **kwargs) -> Loads the
        correct codec for the file and acts as a wrapper providing additional
        funcionality.

        """

        codec = get_codec(filename, blacklist=[os_basename(__file__)])

        self._supported_modes = getattr(codec, '_supported_modes', 'r')

        source = codec(filename, mode=mode, **kwargs)

        super(AllFile, self).__init__(filename, mode, source.depth,
                                      source.rate, source.channels)

        self._source = source

        self._bigendian = bigendian
        self._unsigned = unsigned

        self._state = None

        annotations = getattr(codec.read, '__annotations__')
        self.read.__annotations__.update(annotations)

        self._buffer = annotations.get('return', bytes)()
        self._buffer_size = self._source.buffer_size

        self._length = self._source.length
        self._info_dict = self._source._info_dict
        self.write = self._source.write

        self._closed = False

        if self._depth != self._source.depth:
            self._convert_depth = lambda data: \
                audioop.lin2lin(data, self._source._width, self._width)
        else:
            self._convert_depth = lambda data: data

        if self._unsigned != self._source.unsigned:
            self._convert_unsigned = lambda data: \
                audioop.bias(data, self._source._width, 128)
        else:
            self._convert_unsigned = lambda data: data

        # Make it stereo
        if self._source.channels < self._channels:
            self._convert_channels = lambda data: audioop.tostereo(data,
                                                                   self._width,
                                                                   1, 1)
        # Make it mono
        elif self._source.channels > self._channels:
            self._convert_channels = lambda data: audioop.tomono(data,
                                                                 self._width,
                                                                 1, 1)
        else:
            self._convert_channels = lambda data: data

        # Convert the sample rate of the data to the requested rate.
        if self._rate != self._source.rate:
            self._convert_rate = lambda data: audioop.ratecv(data, self._width,
                                                             self._channels,
                                                             self._source.rate,
                                                             self._rate,
                                                             self._state)
        else:
            self._convert_rate = lambda data: (data, self._state)

        if self._bigendian != self._source.bigendian:
            self._convert_endian = swap_endian
        else:
            self._convert_endian = lambda data: data