def _make_genomic_array(name, gsize, seqs, order, storage, cache=None, datatags=None, overwrite=False, store_whole_genome=True, random_state=None): if overwrite: warnings.warn( 'overwrite=True is without effect ' 'due to revised caching functionality.' 'The argument will be removed in the future.', FutureWarning) if datatags is not None: warnings.warn( 'datatags is without effect ' 'due to revised caching functionality.' 'The argument will be removed in the future.', FutureWarning) """Create a genomic array or reload an existing one.""" # always use int 16 to store bioseq indices # do not use int8 at the moment, because 'N' is encoded # as -1024, which causes an underflow with int8. # changed to be more permissive for the order. dtype = 'int16' if order > 3 else 'int32' # Extract chromosome lengths seqloader = SeqLoader(gsize, seqs, order) # At the moment, we treat the information contained # in each bw-file as unstranded datatags = [name] if cache: files = seqs parameters = [ gsize.tostr(), storage, dtype, order, store_whole_genome, version, random_state ] cache_hash = create_sha256_cache(files, parameters) else: cache_hash = None garray = create_genomic_array(gsize, stranded=False, storage=storage, datatags=datatags, cache=cache_hash, store_whole_genome=store_whole_genome, order=order, conditions=['idx'], overwrite=overwrite, padding_value=NOLETTER, typecode=dtype, loader=seqloader) return garray
def _make_genomic_array(name, fastafile, order, storage, cache=True, datatags=None, overwrite=False, store_whole_genome=True): """Create a genomic array or reload an existing one.""" # always use int 16 to store bioseq indices # do not use int8 at the moment, because 'N' is encoded # as -1024, which causes an underflow with int8. dtype = 'int16' # Load sequences from refgenome seqs = fastafile # Extract chromosome lengths chromlens = {} for seq in seqs: chromlens[seq.id] = len(seq) - order + 1 seqloader = SeqLoader(seqs, order) # At the moment, we treat the information contained # in each bw-file as unstranded datatags = [name] + datatags if datatags else [name] datatags += ['order{}'.format(order)] garray = create_genomic_array(chromlens, stranded=False, storage=storage, datatags=datatags, cache=cache, store_whole_genome=store_whole_genome, order=order, conditions=['idx'], overwrite=overwrite, typecode=dtype, loader=seqloader) return garray
def _make_genomic_array(name, fastafile, order, storage, seqtype, cache=True, datatags=None, overwrite=False, store_whole_genome=True): """Create a genomic array or reload an existing one.""" # always use int 16 to store bioseq indices # do not use int8 at the moment, because 'N' is encoded # as -1024, which causes an underflow with int8. dtype = 'int16' # Load sequences from refgenome seqs = [] if isinstance(fastafile, str): fastafile = [fastafile] if not isinstance(fastafile[0], Bio.SeqRecord.SeqRecord): for fasta in fastafile: # += is necessary since sequences_from_fasta # returns a list seqs += sequences_from_fasta(fasta, seqtype) else: # This is already a list of SeqRecords seqs = fastafile # Extract chromosome lengths chromlens = {} for seq in seqs: chromlens[seq.id] = len(seq) - order + 1 def _seq_loader(cover, seqs, order): print('Convert sequences to index array') for seq in seqs: if cover._full_genome_stored: interval = GenomicInterval(seq.id, 0, len(seq) - order + 1, '.') else: interval = GenomicInterval( *_str_to_iv(seq.id, template_extension=0)) indarray = np.asarray(seq2ind(seq), dtype=dtype) if order > 1: # for higher order motifs, this part is used filter_ = np.asarray([ pow(len(seq.seq.alphabet.letters), i) for i in range(order) ]) indarray = np.convolve(indarray, filter_, mode='valid') cover[interval, 0] = indarray # At the moment, we treat the information contained # in each bw-file as unstranded datatags = [name] + datatags if datatags else [name] datatags += ['order{}'.format(order)] cover = create_genomic_array(chromlens, stranded=False, storage=storage, datatags=datatags, cache=cache, store_whole_genome=store_whole_genome, order=order, conditions=['idx'], overwrite=overwrite, typecode=dtype, loader=_seq_loader, loader_args=(seqs, order)) return cover
def create_from_bam( cls, name, # pylint: disable=too-many-locals bamfiles, regions=None, genomesize=None, conditions=None, min_mapq=None, binsize=None, stepsize=None, flank=0, resolution=1, storage='ndarray', dtype='int', stranded=True, overwrite=False, pairedend='5prime', template_extension=0, aggregate=None, datatags=None, cache=False, channel_last=True, store_whole_genome=False): """Create a Cover class from a bam-file (or files). This constructor can be used to obtain coverage from BAM files. For single-end reads the read will be counted at the 5 prime end. Paired-end reads can be counted relative to the 5 prime ends of the read (default) or with respect to the midpoint. Parameters ----------- name : str Name of the dataset bamfiles : str or list bam-file or list of bam files. regions : str or None Bed-file defining the region of interest. If set to None, the coverage will be fetched from the entire genome and a genomic indexer must be attached later. genomesize : dict or None Dictionary containing the genome size. If `genomesize=None`, the genome size is determined from the bam header. If `store_whole_genome=False`, this option does not have an effect. conditions : list(str) or None List of conditions. If `conditions=None`, the conditions are obtained from the filenames (without the directories and file-ending). min_mapq : int Minimal mapping quality. Reads with lower mapping quality are filtered out. If None, all reads are used. binsize : int or None Binsize in basepairs. For binsize=None, the binsize will be determined from the bed-file directly which requires that all intervals in the bed-file are of equal length. Otherwise, the intervals in the bed-file will be split to subintervals of length binsize in conjunction with stepsize. Default: None. stepsize : int or None stepsize in basepairs for traversing the genome. If stepsize is None, it will be set equal to binsize. Default: None. flank : int Flanking size increases the interval size at both ends by flank base pairs. Default: 0 resolution : int Resolution in base pairs divides the region of interest in windows of length resolution. This effectively reduces the storage for coverage data. The resolution must be selected such that min(stepsize, binsize) is a multiple of resolution. Default: 1. storage : str Storage mode for storing the coverage data can be 'ndarray', 'hdf5' or 'sparse'. Default: 'ndarray'. dtype : str Typecode to be used for storage the data. Default: 'int'. stranded : boolean Indicates whether to extract stranded or unstranded coverage. For unstranded coverage, reads aligning to both strands will be aggregated. overwrite : boolean Overwrite cachefiles. Default: False. datatags : list(str) or None List of datatags. Together with the dataset name, the datatags are used to construct a cache file. If :code:`cache=False`, this option does not have an effect. Default: None. pairedend : str Indicates whether to count reads at the '5prime' end or at the 'midpoint' for paired-end reads. Default: '5prime'. template_extension : int Elongates intervals by template_extension which allows to properly count template mid-points whose reads lie outside of the interval. This option is only relevant for paired-end reads counted at the 'midpoint' and if the coverage is not obtained from the whole genome, e.g. regions is not None. aggregate : callable or None Aggregation operation for loading genomic array. If None, the coverage amounts to the raw counts. Default: None cache : boolean Indicates whether to cache the dataset. Default: False. channel_last : boolean Indicates whether the condition axis should be the last dimension or the first. For example, tensorflow expects the channel at the last position. Default: True. store_whole_genome : boolean Indicates whether the whole genome or only selected regions should be loaded. If False, a bed-file with regions of interest must be specified. Default: False """ if pysam is None: # pragma: no cover raise Exception( 'pysam not available. ' '`create_from_bam` requires pysam to be installed.') if regions is not None: gindexer = GenomicIndexer.create_from_file(regions, binsize, stepsize, flank) else: gindexer = None if isinstance(bamfiles, str): bamfiles = [bamfiles] if conditions is None: conditions = [ os.path.splitext(os.path.basename(f))[0] for f in bamfiles ] if min_mapq is None: min_mapq = 0 full_genome_index = store_whole_genome if not full_genome_index and not gindexer: raise ValueError( 'Either regions must be supplied or store_whole_genome must be True' ) if not full_genome_index: # if whole genome should not be loaded gsize = { _iv_to_str(iv.chrom, iv.start, iv.end): iv.end - iv.start for iv in gindexer } else: # otherwise the whole genome will be fetched, or at least # a set of full length chromosomes if genomesize is not None: # if a genome size has specifically been given, use it. gsize = genomesize.copy() else: header = pysam.AlignmentFile(bamfiles[0], 'r') # pylint: disable=no-member gsize = {} for chrom, length in zip(header.references, header.lengths): gsize[chrom] = length def _bam_loader(garray, files): print("load from bam") for i, sample_file in enumerate(files): print('Counting from {}'.format(sample_file)) aln_file = pysam.AlignmentFile(sample_file, 'rb') # pylint: disable=no-member for chrom in gsize: array = np.zeros( (get_chrom_length(gsize[chrom], resolution), 2), dtype=dtype) locus = _str_to_iv(chrom, template_extension=template_extension) if len(locus) == 1: locus = (locus[0], 0, gsize[chrom]) # locus = (chr, start, end) # or locus = (chr, ) for aln in aln_file.fetch(*locus): if aln.is_unmapped: continue if aln.mapq < min_mapq: continue if aln.is_read2: # only consider read1 so as not to double count # fragments for paired end reads # read2 will also be false for single end # reads. continue if aln.is_paired: # if paired end read, consider the midpoint if not (aln.is_proper_pair and aln.reference_name == aln.next_reference_name): # only consider paired end reads if both mates # are properly mapped and they map to the # same reference_name continue # if the next reference start >= 0, # the read is considered as a paired end read # in this case we consider the mid point if pairedend == 'midpoint': pos = min(aln.reference_start, aln.next_reference_start) + \ abs(aln.template_length) // 2 else: if aln.is_reverse: # last position of the downstream read pos = max( aln.reference_end, aln.next_reference_start + aln.query_length) else: # first position of the upstream read pos = min(aln.reference_start, aln.next_reference_start) else: # here we consider single end reads # whose 5 prime end is determined strand specifically if aln.is_reverse: pos = aln.reference_end else: pos = aln.reference_start if not garray._full_genome_stored: # if we get here, a region was given, # otherwise, the entire chromosome is read. pos -= locus[1] + template_extension if pos < 0 or pos >= locus[2] - locus[1]: # if the read 5 p end or mid point is outside # of the region of interest, the read is discarded continue # compute divide by the resolution pos //= resolution # fill up the read strand specifically if aln.is_reverse: array[pos, 1] += 1 else: array[pos, 0] += 1 # apply the aggregation if aggregate is not None: array = aggregate(array) if stranded: lp = locus + ('+', ) garray[GenomicInterval(*lp), i] = array[:, 0] lm = locus + ('-', ) garray[GenomicInterval(*lm), i] = array[:, 1] else: # if unstranded, aggregate the reads from # both strands garray[GenomicInterval(*locus), i] = array.sum(axis=1) return garray datatags = [name] + datatags if datatags else [name] # At the moment, we treat the information contained # in each bw-file as unstranded cover = create_genomic_array(gsize, stranded=stranded, storage=storage, datatags=datatags, cache=cache, conditions=conditions, overwrite=overwrite, typecode=dtype, store_whole_genome=store_whole_genome, resolution=resolution, loader=_bam_loader, loader_args=(bamfiles, )) return cls(name, cover, gindexer, padding_value=0, dimmode='all', channel_last=channel_last)
def create_from_array( cls, name, # pylint: disable=too-many-locals array, gindexer, genomesize=None, conditions=None, resolution=1, storage='ndarray', overwrite=False, datatags=None, cache=False, channel_last=True, store_whole_genome=False): """Create a Cover class from a numpy.array. The purpose of this function is to convert output prediction from keras which are in numpy.array format into a Cover object. Parameters ----------- name : str Name of the dataset array : numpy.array A 4D numpy array that will be re-interpreted as genomic array. gindexer : GenomicIndexer Genomic indices associated with the values contained in array. genomesize : dict or None Dictionary containing the genome size to fetch the coverage from. If `genomesize=None`, the genome size is automatically determined from the GenomicIndexer. If `store_whole_genome=False` this option does not have an effect. conditions : list(str) or None List of conditions. If `conditions=None`, the conditions are obtained from the filenames (without the directories and file-ending). resolution : int Resolution in base pairs divides the region of interest in windows of length resolution. This effectively reduces the storage for coverage data. The resolution must be selected such that min(stepsize, binsize) is a multiple of resolution. Default: 1. storage : str Storage mode for storing the coverage data can be 'ndarray', 'hdf5' or 'sparse'. Default: 'ndarray'. overwrite : boolean Overwrite cachefiles. Default: False. datatags : list(str) or None List of datatags. Together with the dataset name, the datatags are used to construct a cache file. If :code:`cache=False`, this option does not have an effect. Default: None. cache : boolean Indicates whether to cache the dataset. Default: False. store_whole_genome : boolean Indicates whether the whole genome or only selected regions should be loaded. Default: False. channel_last : boolean This tells the constructor how to interpret the array dimensions. It indicates whether the condition axis is the last dimension or the first. For example, tensorflow expects the channel at the last position. Default: True. """ if not store_whole_genome: # if whole genome should not be loaded gsize = { _iv_to_str(iv.chrom, iv.start, iv.end): iv.end - iv.start for iv in gindexer } elif genomesize: gsize = genomesize.copy() else: # if not supplied, determine the genome size automatically # based on the gindexer intervals. gsize = get_genome_size_from_regions(gindexer) if not channel_last: array = np.transpose(array, (0, 3, 1, 2)) if conditions is None: conditions = ["Cond_{}".format(i) for i in range(array.shape[-1])] # check if dimensions of gindexer and array match if len(gindexer) != array.shape[0]: raise ValueError( "Data incompatible: " "The number intervals in gindexer" " must match the number of datapoints in the array " "(len(gindexer) != array.shape[0])") if store_whole_genome: # in this case the intervals must be non-overlapping # in order to obtain unambiguous data. if gindexer.binsize > gindexer.stepsize: raise ValueError( "Overlapping intervals: " "With overlapping intervals the mapping between " "the array and genomic-array values is ambiguous. " "Please ensure that binsize <= stepsize.") # determine the resolution resolution = gindexer[0].length // array.shape[1] # determine strandedness stranded = True if array.shape[2] == 2 else False def _array_loader(garray, array, gindexer): print("load from array") for i, region in enumerate(gindexer): iv = region for cond in range(array.shape[-1]): if stranded: iv.strand = '+' garray[iv, cond] = array[i, :, 0, cond].astype(dtype) iv.strand = '-' garray[iv, cond] = array[i, :, 1, cond].astype(dtype) else: garray[iv, cond] = array[i, :, 0, cond] return garray # At the moment, we treat the information contained # in each bw-file as unstranded datatags = [name] + datatags if datatags else [name] datatags += ['resolution{}'.format(resolution)] cover = create_genomic_array(gsize, stranded=stranded, storage=storage, datatags=datatags, cache=cache, conditions=conditions, resolution=resolution, overwrite=overwrite, typecode=array.dtype, store_whole_genome=store_whole_genome, loader=_array_loader, loader_args=(array, gindexer)) return cls(name, cover, gindexer, padding_value=0, dimmode='all', channel_last=channel_last)
def create_from_bed( cls, name, # pylint: disable=too-many-locals bedfiles, regions=None, genomesize=None, conditions=None, binsize=None, stepsize=None, resolution=1, flank=0, storage='ndarray', dtype='int', dimmode='all', mode='binary', store_whole_genome=False, overwrite=False, channel_last=True, datatags=None, cache=False): """Create a Cover class from a bed-file (or files). Parameters ----------- name : str Name of the dataset bedfiles : str or list bed-file or list of bed files. regions : str or None Bed-file defining the region of interest. If set to None a genomesize must be supplied and a genomic indexer must be attached later. genomesize : dict or None Dictionary containing the genome size to fetch the coverage from. If `genomesize=None`, the genome size is fetched from the region of interest. conditions : list(str) or None List of conditions. If `conditions=None`, the conditions are obtained from the filenames (without the directories and file-ending). binsize : int or None Binsize in basepairs. For binsize=None, the binsize will be determined from the bed-file directly which requires that all intervals in the bed-file are of equal length. Otherwise, the intervals in the bed-file will be split to subintervals of length binsize in conjunction with stepsize. Default: None. stepsize : int or None stepsize in basepairs for traversing the genome. If stepsize is None, it will be set equal to binsize. Default: None. resolution : int Resolution in base pairs divides the region of interest in windows of length resolution. This effectively reduces the storage for coverage data. The resolution must be selected such that min(stepsize, binsize) is a multiple of resolution. Default: 1. flank : int Flanking size increases the interval size at both ends by flank bins. Note that the binsize is defined by the resolution parameter. Default: 0. storage : str Storage mode for storing the coverage data can be 'ndarray', 'hdf5' or 'sparse'. Default: 'ndarray'. dtype : str Typecode to define the datatype to be used for storage. Default: 'int'. dimmode : str Dimension mode can be 'first' or 'all'. If 'first', only the first element of size resolution is returned. Otherwise, all elements of size resolution spanning the interval are returned. Default: 'all'. mode : str Mode of the dataset may be 'binary', 'score' or 'categorical'. Default: 'binary'. overwrite : boolean Overwrite cachefiles. Default: False. datatags : list(str) or None List of datatags. Together with the dataset name, the datatags are used to construct a cache file. If :code:`cache=False`, this option does not have an effect. Default: None. store_whole_genome : boolean Indicates whether the whole genome or only selected regions should be loaded. If False, a bed-file with regions of interest must be specified. Default: False. channel_last : boolean Indicates whether the condition axis should be the last dimension or the first. For example, tensorflow expects the channel at the last position. Default: True. cache : boolean Indicates whether to cache the dataset. Default: False. """ if regions is None and genomesize is None: raise ValueError('Either regions or genomesize must be specified.') if regions is not None: gindexer = GenomicIndexer.create_from_file(regions, binsize, stepsize, flank) else: gindexer = None if not store_whole_genome: # if whole genome should not be loaded gsize = { _iv_to_str(iv.chrom, iv.start, iv.end): iv.end - iv.start for iv in gindexer } else: # otherwise the whole genome will be fetched, or at least # a set of full length chromosomes if genomesize is not None: # if a genome size has specifically been given, use it. gsize = genomesize.copy() else: gsize = get_genome_size_from_regions(regions) if isinstance(bedfiles, str): bedfiles = [bedfiles] if mode == 'categorical': if len(bedfiles) > 1: raise ValueError('Only one bed-file is ' 'allowed with mode=categorical') sample_file = bedfiles[0] regions_ = _get_genomic_reader(sample_file) max_class = 0 for reg in regions_: if reg.score > max_class: max_class = reg.score if conditions is None: conditions = [str(i) for i in range(int(max_class + 1))] if conditions is None: conditions = [ os.path.splitext(os.path.basename(f))[0] for f in bedfiles ] def _bed_loader(garray, bedfiles, genomesize, mode): print("load from bed") for i, sample_file in enumerate(bedfiles): regions_ = _get_genomic_reader(sample_file) for region in regions_: gidx = GenomicIndexer.create_from_region( region.iv.chrom, region.iv.start, region.iv.end, region.iv.strand, binsize, stepsize, flank) for greg in gidx: if region.score is None and mode in [ 'score', 'categorical' ]: raise ValueError( 'No Score available. Score field must ' 'present in {}'.format(sample_file) + \ 'for mode="{}"'.format(mode)) # if region score is not defined, take the mere # presence of a range as positive label. if mode == 'score': garray[greg, i] = np.dtype(dtype).type(region.score) elif mode == 'categorical': garray[greg, int(region.score)] = np.dtype(dtype).type(1) elif mode == 'binary': garray[greg, i] = np.dtype(dtype).type(1) return garray # At the moment, we treat the information contained # in each bed-file as unstranded datatags = [name] + datatags if datatags else [name] datatags += ['resolution{}'.format(resolution)] cover = create_genomic_array(gsize, stranded=False, storage=storage, datatags=datatags, cache=cache, conditions=conditions, resolution=resolution, overwrite=overwrite, typecode=dtype, store_whole_genome=store_whole_genome, loader=_bed_loader, loader_args=(bedfiles, gsize, mode)) return cls(name, cover, gindexer, padding_value=0, dimmode=dimmode, channel_last=channel_last)
def create_from_bigwig( cls, name, # pylint: disable=too-many-locals bigwigfiles, regions=None, genomesize=None, conditions=None, binsize=None, stepsize=None, resolution=1, flank=0, storage='ndarray', dtype='float32', overwrite=False, dimmode='all', aggregate=np.mean, datatags=None, cache=False, store_whole_genome=False, channel_last=True, nan_to_num=True): """Create a Cover class from a bigwig-file (or files). Parameters ----------- name : str Name of the dataset bigwigfiles : str or list bigwig-file or list of bigwig files. regions : str or None Bed-file defining the region of interest. If set to None, the coverage will be fetched from the entire genome and a genomic indexer must be attached later. Otherwise, the coverage is only determined for the region of interest. genomesize : dict or None Dictionary containing the genome size. If `genomesize=None`, the genome size is determined from the bigwig file. If `store_whole_genome=False`, this option does not have an effect. conditions : list(str) or None List of conditions. If `conditions=None`, the conditions are obtained from the filenames (without the directories and file-ending). binsize : int or None Binsize in basepairs. For binsize=None, the binsize will be determined from the bed-file directly which requires that all intervals in the bed-file are of equal length. Otherwise, the intervals in the bed-file will be split to subintervals of length binsize in conjunction with stepsize. Default: None. stepsize : int or None stepsize in basepairs for traversing the genome. If stepsize is None, it will be set equal to binsize. Default: None. resolution : int Resolution in base pairs divides the region of interest in windows of length resolution. This effectively reduces the storage for coverage data. The resolution must be selected such that min(stepsize, binsize) is a multiple of resolution. Default: 1. flank : int Flanking size increases the interval size at both ends by flank bins. Note that the binsize is defined by the resolution parameter. Default: 0. storage : str Storage mode for storing the coverage data can be 'ndarray', 'hdf5' or 'sparse'. Default: 'ndarray'. dtype : str Typecode to define the datatype to be used for storage. Default: 'float32'. dimmode : str Dimension mode can be 'first' or 'all'. If 'first', only the first element of size resolution is returned. Otherwise, all elements of size resolution spanning the interval are returned. Default: 'all'. overwrite : boolean Overwrite cachefiles. Default: False. datatags : list(str) or None List of datatags. Together with the dataset name, the datatags are used to construct a cache file. If :code:`cache=False`, this option does not have an effect. Default: None. aggregate : callable Aggregation operation for loading genomic array. Default: numpy.mean cache : boolean Indicates whether to cache the dataset. Default: False. store_whole_genome : boolean Indicates whether the whole genome or only selected regions should be loaded. If False, a bed-file with regions of interest must be specified. Default: False. channel_last : boolean Indicates whether the condition axis should be the last dimension or the first. For example, tensorflow expects the channel at the last position. Default: True. nan_to_num : boolean Indicates whether NaN values contained in the bigwig files should be interpreted as zeros. Default: True """ if pyBigWig is None: # pragma: no cover raise Exception( 'pyBigWig not available. ' '`create_from_bigwig` requires pyBigWig to be installed.') if regions is not None: gindexer = GenomicIndexer.create_from_file(regions, binsize, stepsize, flank) else: gindexer = None if isinstance(bigwigfiles, str): bigwigfiles = [bigwigfiles] if not store_whole_genome and not gindexer: raise ValueError( 'Either regions must be supplied or store_whole_genome must be True' ) if not store_whole_genome: # if whole genome should not be loaded gsize = { _iv_to_str(iv.chrom, iv.start, iv.end): iv.end - iv.start for iv in gindexer } else: # otherwise the whole genome will be fetched, or at least # a set of full length chromosomes if genomesize is not None: # if a genome size has specifically been given, use it. gsize = genomesize.copy() else: bwfile = pyBigWig.open(bigwigfiles[0], 'r') gsize = bwfile.chroms() if conditions is None: conditions = [ os.path.splitext(os.path.basename(f))[0] for f in bigwigfiles ] def _bigwig_loader(garray, aggregate): print("load from bigwig") for i, sample_file in enumerate(bigwigfiles): bwfile = pyBigWig.open(sample_file) for chrom in gsize: vals = np.zeros( (get_chrom_length(gsize[chrom], resolution), ), dtype=dtype) locus = _str_to_iv(chrom, template_extension=0) if len(locus) == 1: locus = locus + (0, gsize[chrom]) # when only to load parts of the genome for start in range(locus[1], locus[2], resolution): if garray._full_genome_stored: # be careful not to overshoot at the chromosome end end = min(start + resolution, gsize[chrom]) else: end = start + resolution x = np.asarray( bwfile.values(locus[0], int(start), int(end))) if nan_to_num: x = np.nan_to_num(x, copy=False) vals[(start - locus[1]) // resolution] = aggregate(x) garray[GenomicInterval(*locus), i] = vals return garray datatags = [name] + datatags if datatags else [name] datatags += ['resolution{}'.format(resolution)] cover = create_genomic_array(gsize, stranded=False, storage=storage, datatags=datatags, cache=cache, conditions=conditions, overwrite=overwrite, resolution=resolution, store_whole_genome=store_whole_genome, typecode=dtype, loader=_bigwig_loader, loader_args=(aggregate, )) return cls(name, cover, gindexer, padding_value=0, dimmode=dimmode, channel_last=channel_last)