示例#1
0
def compare_strats(*strat_fns):
    import more_itertools

    for iteration_results in zip(*(fn() for fn in strat_fns)):
        print(iteration_results)
        if not more_itertools.all_equal(ab for ab, _, _ in iteration_results):
            break
示例#2
0
	def playlist_songs_delete(self, playlist_songs):
		"""Delete songs from playlist.

		Parameters:
			playlist_songs (dict or list): A playlist song dict
				or a list of playlist song dicts.

		Returns:
			dict: Playlist dict including songs.
		"""

		if not more_itertools.all_equal(
			playlist_song['playlistId']
			for playlist_song in playlist_songs
		):
			raise ValueError("All 'playlist_songs' must be from the same playlist.")

		mutations = [
			mc_calls.PlaylistEntriesBatch.delete(playlist_song['id'])
			for playlist_song in playlist_songs
		]
		self._call(
			mc_calls.PlaylistEntriesBatch,
			mutations)

		return self.playlist(
			playlist_songs[0]['playlistId'],
			include_songs=True
		)
示例#3
0
 def terminal(self) -> bool:
     winners = ((0, 1, 2), (3, 4, 5), (6, 7, 8), (0, 3, 6), (1, 4, 7),
                (2, 5, 8), (0, 4, 8), (2, 4, 6))
     for w in winners:
         if self.board[w[0]] and all_equal(self.board[list(w)]):
             return True
     return False
示例#4
0
def audit_tables(opened_data: AuditTables,
                 commitment_data: TallyTables) -> bool:
    # check if there any tables to audit
    if len(opened_data.keys()) == 0:
        print("Opened data has no tables")
        return False

    # check if opened data has the same keys as commitment data
    if set(opened_data.keys()) != set(commitment_data.keys()):
        print("Opened data and commitments have different number of tables")
        return False

    # check if all corresponding tables are valid
    for key in opened_data:
        if not audit_table(opened_data[key], commitment_data[key]):
            print(f"Table {key} is not valid")
            return False

    # check if all tables have the same length
    if not all_equal(
            len(opened_table) for opened_table in opened_data.values()):
        print("Tables have different lengths")
        return False

    return True
示例#5
0
	def find_mpeg_frames(data):
		frames = []
		cached_frames = None
		buffer_size = 128
		buffer = data.peek(buffer_size)

		while len(buffer) >= buffer_size:
			sync_start = buffer.find(b'\xFF')

			if sync_start >= 0:
				data.seek(sync_start, os.SEEK_CUR)

				if bitstruct.unpack('u11', data.peek(2))[0] == 2047:
					for _ in range(4):
						try:
							frame = MPEGFrameHeader.parse(data)
							frames.append(frame)
							if frame._xing and frame._xing.num_frames:
								break
							data.seek(frame._start + frame._size, os.SEEK_SET)
						except (FormatError, *bitstruct.Error):
							data.seek(1, os.SEEK_CUR)
							break
				else:
					data.seek(sync_start + 1, os.SEEK_CUR)

				if frames and (len(frames) >= 4 or frames[0]._xing):
					break

				if len(frames) >= 2 and cached_frames is None:
					cached_frames = frames.copy()

				del frames[:]
			else:
				data.seek(buffer_size, os.SEEK_CUR)

			buffer = data.peek(buffer_size)

		# I actually found a PNG file that had multiple consecutive MPEG frames parsed.
		# The all_equal check combats this false positive by
		# making sure certain attributes don't change between frames.
		if not frames:
			if (
				cached_frames
				and more_itertools.all_equal(
					[
						frame.channel_mode,
						frame.channels,
						frame.layer,
						frame.sample_rate,
						frame.version,
					]
					for frame in cached_frames
				)
			):
				frames = cached_frames
			else:
				raise FormatError("No XING header and insufficient MPEG frames.")

		return frames
def longestCommonPrefix(self, s):
    ans = '' 
    for letters in zip(*s): 
        if all_equal(letters): 
            ans += letters[0] 
        else:
            break 
    return ans 
示例#7
0
    def main_metric(
        self,
        selected_metric: Union[str, List[str]] = None
    ) -> (Union[str, List[str]], str):
        """Configure and retrieve the metric to be used for monitoring.

        The first time it is called, the main metric is configured based on the
        specified metrics in ``selected_metric`` or, if not provided, on the first
        metric in the outputs. Subsequent calls return the configured main metric.
        If a subsequent call specifies ``selected_metric``, configuration is done again.

        Return:
            a tuple containing the main metric name and the ordering.
                Note that the first element might be a concatenation of several
                metrics in case ``selected_metric`` is a list. This is useful for
                considering more than one metric as the best
                (``metric_end()`` will sum over them).
        """
        if self._main_metric_list is None or selected_metric is not None:
            if not selected_metric:
                names = [self.outputs.metrics[0].name]
                ordering = self.outputs.metrics[0].best_ordering
            else:
                metrics = {m.name: m for m in self.outputs.metrics}
                if isinstance(selected_metric, (list, tuple)):
                    selected = []
                    for selection in selected_metric:
                        if selection not in metrics:
                            raise KeyError(
                                f'Main metric {selection} is not a configured metric; '
                                f'available options are: {list(metrics.keys())}'
                            )
                        selected.append(metrics[selection])
                    names = [m.name for m in selected]
                    orderings = [m.best_ordering for m in selected]
                    if not all_equal(orderings):
                        raise ValueError(
                            f'Main metrics {names} have different '
                            f'ordering: {orderings}')
                    ordering = orderings[0]
                else:
                    try:
                        selected = metrics[selected_metric]
                    except KeyError:
                        raise KeyError(
                            f'Main metric {selected_metric} is not a configured metric;'
                            f' available options are: {metrics.keys()}')
                    names = [selected.name]
                    ordering = selected.best_ordering
            self._main_metric_list = names
            self._main_metric_name = '+'.join(names)
            self._main_metric_ordering = ordering
        return self._main_metric_name, self._main_metric_ordering
示例#8
0
 def _is_guild_count_landmark(guild_count):
     """Return True if the bot is in a special number of guilds"""
     # TODO: Put this in config.py
     guild_count_string = str(guild_count)
     return (
         # 1111, 22222, 55555, etc.
         all_equal(guild_count_string)
         # 2000, 30000, 40000, etc
         or (guild_count_string[0] != '1' and set(guild_count_string[1:]) == {'0'})
         or Stats._is_guild_count_milestone(guild_count - 1)
         or Stats._is_guild_count_milestone(guild_count + 1)
     )
示例#9
0
    def insufficient_material(self):
        piece_counter = collections.Counter(
            map(str.lower, self.board._position))

        if piece_counter['p'] or piece_counter['r'] or piece_counter['q']:
            return False

        if sum(piece_counter.values()) <= 3:
            return True

        return all_equal(
            _tile_type(i) for i, c in enumerate(self.board._position)
            if c.lower() == 'b')
示例#10
0
 def __init__(self, columns: Dict[Any, Union[Iterable, List]]):
     self._length = None
     num_samples = [len(col) for col in columns.values()]
     if not all_equal(num_samples):
         col_num_samples = {
             name: len(samples)
             for name, samples in columns.items()
         }
         raise ValueError(
             'Number of samples from different encoders do not match: '
             '{}'.format(col_num_samples))
     self._length = num_samples[0] if num_samples else 0
     self.columns = columns
示例#11
0
 def remove_symmetries(self):
     # (Probably still broken when only some dims are equal.)
     origin = [0 for _ in self.dims]
     for i in range(1, 2**len(self.dims)):
         corner = [(self.dims[n] - 1) * ((i >> n) % 2)
                   for n in range(len(self.dims))]
         self.model.Add(self[origin] <= self[corner])
     if all_equal(self.dims):
         for i, dim in enumerate(self.dims):
             corner = copy(origin)
             corner[i] = dim - 1
             if i == 0:
                 first_corner = corner
                 continue
             self.model.Add(self[first_corner] <= self[corner])
示例#12
0
def group_word_forms(
        forms_with_dists: List[Tuple[str, float]]) -> List[RhymeResult]:
    common_prefix_len = min(len(form) for form, _ in forms_with_dists)
    while not mit.all_equal(form[:common_prefix_len]
                            for form, _ in forms_with_dists):
        common_prefix_len -= 1

    forms_with_dists.sort(key=lambda fd: (fd[1], len(fd[0]), fd[0]))
    base_form = forms_with_dists[0]
    flex_forms = ((f'-{form[common_prefix_len:]}', dist)
                  for form, dist in forms_with_dists[1:])

    return [
        RhymeResult(form, dist)
        for form, dist in it.chain([base_form], flex_forms)
    ]
示例#13
0
def audit_pre_election_tables(pre_election_tables: PreElectionTables,
                              tally_tables: TallyTables) -> bool:
    if set(pre_election_tables.keys()) != set(tally_tables.keys()):
        print("Not matching keys for pre election and tally tables")
        return False

    for key in pre_election_tables:
        if not audit_pre_election_table(pre_election_tables[key],
                                        tally_tables[key]):
            print(f"Not valid table {key} for pre election and tally")
            return False

    if not all_equal(
            len(pre_election_table)
            for pre_election_table in pre_election_tables.values()):
        print(
            "Different tables have different lengths, this should not happen")
        return False

    return True
示例#14
0
def audit_table(
    opened_votes_table: OpenedVoteTable, commitment_table: CommitmentTable
) -> bool:
    if len(opened_votes_table) == 0:
        print("Opened data empty")
        return False

    if len(opened_votes_table) != len(commitment_table):
        print("Not equal number of rows")
        return False

    # check if all votes has the same side opened
    if not all_equal(bool(opened_vote.left) for opened_vote in opened_votes_table):
        print("Two columns opened")
        return False

    for i, (opened_vote, commitment) in enumerate(
        zip(opened_votes_table, commitment_table)
    ):
        if audit_vote(opened_vote, commitment) is False:
            print(f"Row {i} commitment is not validating!")
            return False

    return True
示例#15
0
    def get_reaction(self):
        pcr = deepcopy(self.base_reaction)
    
        require_reagent(pcr, 'water')
        require_reagent(pcr, 'template DNA')
        require_reagent(pcr, 'forward primer')
        require_reagent(pcr, 'reverse primer')

        pcr.extra_volume = self.extra_volume_uL, 'µL'
        pcr.extra_percent = self.extra_percent
        
        if self.num_reactions:
            pcr.num_reactions = self.num_reactions
        if self.reaction_volume_uL:
            pcr.hold_ratios.volume = self.reaction_volume_uL, 'µL'

        pcr['water'].order = 1

        pcr['template DNA'].order = 2
        pcr['template DNA'].name = merge_names(self.template_tags)
        pcr['template DNA'].master_mix = all_equal(self.template_tags)

        if x := self.template_volume_uL:
            pcr['template DNA'].volume = x, 'µL'
示例#16
0
 def test_one(self):
     """Return True if the iterable is singular"""
     self.assertTrue(mi.all_equal('0'))
     self.assertTrue(mi.all_equal([0]))
示例#17
0
 def test_empty(self):
     """Return True if the iterable is empty"""
     self.assertTrue(mi.all_equal(''))
     self.assertTrue(mi.all_equal([]))
示例#18
0
 def test_tricky(self):
     """Not everything is identical, but everything is equal"""
     items = [1, complex(1, 0), 1.0]
     self.assertTrue(mi.all_equal(items))
示例#19
0
 def test_false(self):
     """Not everything is equal"""
     self.assertFalse(mi.all_equal('aaaaab'))
     self.assertFalse(mi.all_equal([0, 0, 0, 1]))
示例#20
0
 def test_true(self):
     """Everything is equal"""
     self.assertTrue(mi.all_equal('aaaaaa'))
     self.assertTrue(mi.all_equal([0, 0, 0, 0]))
示例#21
0
 def test_empty(self):
     """Return True if the iterable is empty"""
     self.assertTrue(mi.all_equal(''))
     self.assertTrue(mi.all_equal([]))
示例#22
0
 def test_false(self):
     """Not everything is equal"""
     self.assertFalse(mi.all_equal('aaaaab'))
     self.assertFalse(mi.all_equal([0, 0, 0, 1]))
示例#23
0
def bitstream_ins(iterable=None, n=None, bstream=None, fixed_length=False, lencode=False, v=False):
    """
    Return a BitStream representation of `iterable` by repeated insertions.
    `iterable` should be an iterable of integers, such as from `range(n)`.
    
      `bitstream_ins(range(0)).bin` --> `""`
      `bitstream_ins(range(1)).bin` --> `"0"`
      `bitstream_ins(range(2)).bin` --> `"01"`

    If `iterable` is not provided (or provided as `None`), `n` must be instead,
    and `iterable` will be created as `range(n)`. Providing both `iterable` and
    `n` will raise a `ValueError`.

    All bitstrings are of the same length when `fixed_length` is True, or
    else will be only as long as needed (i.e. no left zero padding) if False
    (default: False).

    If `fixed_length` is False and `lencode` is True, the bitstream will be
    further compressed by encoding the same value at different lengths

    `range(6)` bitstrings if `fixed_length`: `{0,1,10,11,100,101}`
     --> bitstream: '0110111000101', length = 13
    `range(6)` bitstrings if `fixed_length` & `lencode`: `{0,1,00,01,10,11}`
     --> bitstream: '0100011011', length = 10

    Must provide a uniquely decodable bitstream: cannot reuse codewords for different
    symbols ∴ `lencode` is forbidden unless `fixed_length` is False.

    Print progress information on the range being created and individual insertions
    (integer, length) if the verbosity parameter `v` is True.
    """
    assert not (lencode and fixed_length), ValueError("Can't lencode fixed length")
    if n is None:
        assert not iterable is None, ValueError("Must provide iterable or n")
        n = len(iterable)
        assert all_equal(map(type, iterable)), TypeError("iterable of mixed types")
        if n > 0:
            msg = "Not an iterable of `int`s: omit & provide `n=len(iterable)` instead"
            assert isinstance(iterable[0], int), TypeError(msg)
    else:
        assert isinstance(n, int), TypeError("n must be an integer")
        assert not n < 0, ValueError("n cannot be negative")
    if n == 0:
        # void iterator from 0-length `iterable` or `n=0` always gives empty bitstring
        return BitStream() # BitStream("").bin is BitStream().bin
    lenfunc = bitstr_len_lencoded if lencode else bitstr_len
    if fixed_length:
        if iterable is None:
            if n > 2:
                max_l = lenfunc(n-1)
            else:
                max_l = 1
        else:
            # Can't see how to check maximum value needed for max_l other than iterating
            max_val = max(iterable)
    bit_seq = range(n) if iterable is None else iterable
    if v:
        print(f"Initialising stream on {bit_seq}")
    if bstream is None:
        bstream = BitStream()
    assert isinstance(bstream, BitStream), TypeError("bstream ≠ bitstring⠶BitStream")
    if fixed_length:
        for b in bit_seq: # `range(n)` or `iterable`
            if v:
                print(f"Inserting {b} at length {max_l}")
            insert_into_bitstream(bstream, b, max_l)
    else:
        if lencode:
            if iterable is None:
                bit_length_counter = bin_readout_offset = 0 # initialise counts
                for b in bit_seq: # `range(n)`
                    if b > 1:
                        ###l2bp2 = (b+2).bit_length()-1 # l2bp2 means "log2 b plus 2"
                        l2bp2 = bitstr_len_lencoded(b)
                        if l2bp2 > bit_length_counter: # b = 2,6,14,30,...
                            bit_length_counter = l2bp2
                            bin_readout_offset = 2**bit_length_counter - 2
                        # (the rest) b = 3,4,5, 7,...,13, 15...29, 31,...
                        l = lenfunc(b) # assign l based on 'true' (decoded) value
                        # decoded value minus offset = encoded value
                        b -= bin_readout_offset # reassign rather than making
                    else: # (b < 2) are not modified by 'lencoding'
                        # fall thru without counter++ or `b -= bin_readout_offset`
                        l = lenfunc(b)
                    if v:
                        print(f"Inserting {b} at length {l}")
                    insert_into_bitstream(bstream, b, l)
            else:
                bit_length_counter = bin_readout_offset = 0 # initialise counts
                for b in bit_seq: # `range(n)`
                    if b > 1:
                        # no mantissa, int(log2(b+2)) via stackoverflow.com/a/28033134/
                        # log2(b+2)-1 gives the count of the run of `l`-length codewords
                        # or equivalently the power of the Mersenne prime max. codeword
                        # length `l`, e.g. 2^(3)-1 = 7, 7 is the max. lencoded codeword
                        # length `l=3` (i.e. in binary 7 is `111`).
                        ###l2bp2 = (b+2).bit_length()-1 # l2bp2 means "log2 b plus 2"
                        l2bp2 = bitstr_len_lencoded(b)
                        if l2bp2 > bit_length_counter: # b = 2,6,14,30,...
                            bit_length_counter = l2bp2
                            bin_readout_offset = 2**bit_length_counter - 2
                        # (the rest) b = 3,4,5, 7,...,13, 15...29, 31,...
                        l = lenfunc(b) # assign l based on 'true' (decoded) value
                        # decoded value minus offset = encoded value
                        b -= bin_readout_offset # reassign rather than making
                    else: # (b < 2) are not modified by 'lencoding'
                        # fall thru without counter++ or `b -= bin_readout_offset`
                        l = lenfunc(b)
                    if v:
                        print(f"Inserting {b} at length {l}")
                    insert_into_bitstream(bstream, b, l)
        else:
            for b in bit_seq: # `range(n)` or `iterable`
                l = lenfunc(b)
                if v:
                    print(f"Inserting {b} at length {l}")
                insert_into_bitstream(bstream, b, l)
    return bstream
示例#24
0
def merge_names(names):
    names = list(names)
    if all_equal(names):
        return names[0]
    else:
        return ','.join(names)
示例#25
0
def iequal(*iterables: Iterable) -> bool:
    """ Test if all iterable objects have identical contents """
    _sentinel = object()
    zipped = zip_longest(*iterables, fillvalue=_sentinel)
    return all(map(lambda x: all_equal(x), zipped))
示例#26
0
def get_habitat_download_info(allow_create: bool = False):
    """Get a dictionary giving a specification of where habitat data lives
    online.

    # Parameters

    allow_create: Whether or not we should try to regenerate the json file that represents
        the above dictionary. This is potentially unsafe so please only set this to `True`
        if you're sure it will download what you want.
    """
    json_save_path = os.path.join(DATASET_DIR,
                                  ".habitat_datasets_download_info.json")
    if allow_create and not os.path.exists(json_save_path):
        url = "https://raw.githubusercontent.com/facebookresearch/habitat-lab/master/README.md"
        output = urlopen(url).read().decode("utf-8")

        lines = [l.strip() for l in output.split("\n")]

        task_table_started = False
        table_lines = []
        for l in lines:
            if l.count("|") > 3 and l[0] == l[-1] == "|":
                if task_table_started:
                    table_lines.append(l)
                elif "Task" in l and "Link" in l:
                    task_table_started = True
                    table_lines.append(l)
            elif task_table_started:
                break

        url_pat = re.compile("\[.*\]\((.*)\)")

        def get_url(in_str: str):
            match = re.match(pattern=url_pat, string=in_str)
            if match:
                return match.group(1)
            else:
                return in_str

        header = None
        rows = []
        for i, l in enumerate(table_lines):
            l = l.strip("|")
            entries = [
                get_url(e.strip().replace("`", "")) for e in l.split("|")
            ]

            if i == 0:
                header = [e.lower().replace(" ", "_") for e in entries]
            elif not all_equal(entries):
                rows.append(entries)

        link_ind = header.index("link")
        extract_ind = header.index("extract_path")
        config_ind = header.index("config_to_use")
        assert link_ind >= 0

        data_info = {}
        for row in rows:
            id = row[link_ind].split("/")[-1].replace(".zip",
                                                      "").replace("_", "-")
            data_info[id] = {
                "link": row[link_ind],
                "rel_path": row[extract_ind],
                "config_url": row[config_ind],
            }

        with open(json_save_path, "w") as f:
            json.dump(data_info, f)

    with open(json_save_path, "r") as f:
        return json.load(f)
示例#27
0
	def playlist_songs_move(
		self,
		playlist_songs,
		*,
		after=None,
		before=None,
		index=None,
		position=None
	):
		"""Move songs in a playlist.

		Note:
			* Provide no optional arguments to move to end.
			* Provide playlist song dicts for ``after`` and/or ``before``.
			* Provide a zero-based ``index``.
			* Provide a one-based ``position``.

			Songs are inserted *at* given index or position.
			It's also possible to move to the end by using
			``len(songs)`` for index or ``len(songs) + 1`` for position.

		Parameters:
			playlist_songs (list): A list of playlist song dicts.
			after (dict, Optional): A playlist song dict ``songs`` will follow.
			before (dict, Optional): A playlist song dict ``songs`` will precede.
			index (int, Optional): The zero-based index position to insert ``songs``.
			position (int, Optional): The one-based position to insert ``songs``.

		Returns:
			dict: Playlist dict including songs.
		"""

		if not more_itertools.all_equal(
			playlist_song['playlistId']
			for playlist_song in playlist_songs
		):
			raise ValueError("All 'playlist_songs' must be from the same playlist.")

		playlist = self.playlist(playlist_songs[0]['playlistId'], include_songs=True)

		prev, next_ = get_ple_prev_next(
			playlist['tracks'],
			after=after,
			before=before,
			index=index,
			position=position,
		)

		prev_id = prev.get('id')
		next_id = next_.get('id')

		mutations = []
		for playlist_song in playlist_songs:
			mutation = mc_calls.PlaylistEntriesBatch.update(
				playlist_song,
				preceding_entry_id=prev_id,
				following_entry_id=next_id
			)
			mutations.append(mutation)
			prev_id = playlist_song['id']

		self._call(
			mc_calls.PlaylistEntriesBatch,
			mutations
		)

		return self.playlist(
			playlist['id'],
			include_songs=True
		)
示例#28
0
 def test_true(self):
     """Everything is equal"""
     self.assertTrue(mi.all_equal('aaaaaa'))
     self.assertTrue(mi.all_equal([0, 0, 0, 0]))
示例#29
0
def merge_zip_files_func():
    from more_itertools import all_equal
    from fs import open_fs
    from fs.compress import write_zip
    from fs.copy import copy_fs_if_newer
    from filetype import guess

    def ask_for_dst_path():
        return tui.prompt_input(
            '? Input the path of the ZIP file to merge into: ')

    def is_zip_file(path: str):
        mime = guess(path).mime
        if mime == 'application/zip':
            return True
        else:
            print('! Not a ZIP file: {path}')
            return False

    args = rtd.args
    dry_run = args.dry_run
    auto_yes = args.yes
    src_l = args.src or mylib.ex.ostk.clipboard.list_path()
    src_l = [s for s in src_l if is_zip_file(s)]

    if len(src_l) < 2:
        print(f'! at least 2 zip files')
        return
    print('# Merge all below ZIP files:')
    print('\n'.join(src_l))
    dbx_l = [mylib.easy.split_path_dir_base_ext(p) for p in src_l]
    if all_equal([d for d, b, x in dbx_l]):
        common_dir = dbx_l[0][0]
    else:
        common_dir = ''
    if all_equal([x for d, b, x in dbx_l]):
        common_ext = dbx_l[0][-1]
    else:
        common_ext = ''
    if common_dir and common_ext:
        common_base = os.path.commonprefix([b for d, b, x in dbx_l]).strip()
        if common_base:
            tmp_dst = mylib.easy.join_path_dir_base_ext(
                common_dir, common_base, common_ext)
            if auto_yes or tui.prompt_confirm(
                    f'? Merge into ZIP file "{tmp_dst}"', default=True):
                dst = tmp_dst
            else:
                dst = ask_for_dst_path()
        else:
            dst = ask_for_dst_path()
    elif common_dir:
        if auto_yes or tui.prompt_confirm(
                f'? Put merged ZIP file into this dir "{common_dir}"',
                default=True):
            filename = tui.prompt_input(
                f'? Input the basename of the ZIP file to merge into: ')
            dst = fstk.make_path(common_dir, filename)
        else:
            dst = ask_for_dst_path()
    else:
        dst = ask_for_dst_path()
    if dry_run:
        print(f'@ Merge into ZIP file "{dst}"')
        return
    print(f'* Merge into ZIP file "{dst}"')
    with open_fs('mem://tmp') as tmp:
        for s in src_l:
            with open_fs(f'zip://{s}') as z:
                copy_fs_if_newer(
                    z, tmp
                )  # todo: seem check time of ZIP-FS but not files inside
        write_zip(tmp, dst)
    for s in src_l:
        if s == dst:
            continue
        send2trash(s)
        print(f'# Trash <- {s}')
示例#30
0
 def test_tricky(self):
     """Not everything is identical, but everything is equal"""
     items = [1, complex(1, 0), 1.0]
     self.assertTrue(mi.all_equal(items))
示例#31
0
    def load(cls, data):
        if not isinstance(data, DataReader):  # pragma: nocover
            data = DataReader(data)

        frames = cls.find_mp3_frames(data)

        samples_per_frame, _ = MP3SamplesPerFrame[(frames[0].version,
                                                   frames[0].layer)]

        data.seek(0, os.SEEK_END)
        end_pos = data.tell()

        # This is an arbitrary amount that should hopefully encompass all end tags.
        # Starting low so as not to add unnecessary processing time.
        chunk_size = 64 * 1024
        if end_pos > chunk_size:
            data.seek(-(chunk_size), os.SEEK_END)
        else:
            data.seek(0, os.SEEK_SET)

        end_buffer = data.read()

        end_tag_offset = 0
        for tag_type in [b'APETAGEX', b'LYRICSBEGIN', b'TAG']:
            tag_offset = end_buffer.rfind(tag_type)

            if tag_offset > 0:
                tag_offset = len(end_buffer) - tag_offset

                if tag_offset > end_tag_offset:
                    end_tag_offset = tag_offset

        audio_start = frames[0]._start
        audio_size = end_pos - audio_start - end_tag_offset

        bitrate_mode = MP3BitrateMode.UNKNOWN

        xing_header = frames[0]._xing
        if xing_header:
            num_samples = samples_per_frame * xing_header.num_frames

            # I prefer to include the Xing/LAME header as part of the audio.
            # Google Music seems to do so for calculating client ID.
            # Haven't tested in too many other scenarios.
            # But, there should be enough low-level info for people to calculate this if desired.
            if xing_header._lame:
                # Old versions of LAME wrote invalid delay/padding for
                # short MP3s with low bitrate.
                # Subtract them only them if there would be samples left.
                lame_padding = xing_header._lame.delay + xing_header._lame.padding
                if lame_padding < num_samples:
                    num_samples -= lame_padding

                if xing_header._lame.bitrate_mode in [1, 8]:
                    bitrate_mode = MP3BitrateMode.CBR
                elif xing_header._lame.bitrate_mode in [2, 9]:
                    bitrate_mode = MP3BitrateMode.ABR
                elif xing_header._lame.bitrate_mode in [3, 4, 5, 6]:
                    bitrate_mode = MP3BitrateMode.VBR
        else:
            if more_itertools.all_equal([frame['bitrate']
                                         for frame in frames]):
                bitrate_mode = MP3BitrateMode.CBR

            num_samples = samples_per_frame * (audio_size / frames[0]._size)

        if bitrate_mode == MP3BitrateMode.CBR:
            bitrate = frames[0].bitrate
        else:
            # Subtract Xing/LAME frame size from audio_size for bitrate calculation accuracy.
            if xing_header:
                bitrate = ((audio_size - frames[0]._size) * 8 *
                           frames[0].sample_rate) / num_samples
            else:
                bitrate = (audio_size * 8 *
                           frames[0].sample_rate) / num_samples

        duration = (audio_size * 8) / bitrate

        version = frames[0].version
        layer = frames[0].layer
        protected = frames[0].protected
        sample_rate = frames[0].sample_rate
        channel_mode = frames[0].channel_mode
        channels = frames[0].channels

        return cls(audio_start, audio_size, xing_header, version, layer,
                   protected, bitrate, bitrate_mode, channel_mode, channels,
                   duration, sample_rate)
示例#32
0
 def test_one(self):
     """Return True if the iterable is singular"""
     self.assertTrue(mi.all_equal('0'))
     self.assertTrue(mi.all_equal([0]))
示例#33
0
        # Setup the primers.  This is complicated because the primers might 
        # get split into their own mix, if the volumes that would be added 
        # to the PCR reaction are too small.

        primer_mix = None
        primer_keys = [
                'forward primer',
                'reverse primer',
        ]
        use_primer_mix = []

        for p, primers in zip(primer_keys, zip(*self.primer_pairs)):
            pcr[p].order = 3
            pcr[p].name = merge_names(x.tag for x in primers)
            pcr[p].hold_conc.stock_conc = min(x.stock_uM for x in primers), 'µM'
            pcr[p].master_mix = all_equal(x.tag for x in primers)

            primer_scale = pcr.scale if pcr[p].master_mix else 1
            primer_vol = primer_scale * pcr[p].volume

            if primer_vol < '0.5 µL':
                use_primer_mix.append(p)

        if use_primer_mix:
            pcr['primer mix'].order = 4
            pcr['primer mix'].stock_conc = '10x'
            pcr['primer mix'].volume = pcr.volume / 10
            pcr['primer mix'].master_mix = all(
                    pcr[p].master_mix
                    for p in use_primer_mix
            )