Ejemplo n.º 1
0
def do(options):
    if (options.seed is not None):
        import numpy as np
        np.random.seed(options.seed) 
        from simdna import random
        random.seed(options.seed)
        
    outputFileName_core = util.addArguments("DensityEmbedding",
                        [util.ArgumentToAdd(options.prefix, "prefix"),
                         util.BooleanArgument(options.bestHit, "bestHit"),
                         util.ArrArgument(options.motifNames, "motifs"),
                         util.ArgumentToAdd(options.min_motifs, "min"),
                         util.ArgumentToAdd(options.max_motifs, "max"),
                         util.ArgumentToAdd(options.mean_motifs, "mean"),
                         util.FloatArgument(options.zero_prob, "zeroProb"),
                         util.ArgumentToAdd(options.seqLength, "seqLength"),
                         util.ArgumentToAdd(options.numSeqs, "numSeqs")])
    
    loadedMotifs = synthetic.LoadedEncodeMotifs(options.pathToMotifs, pseudocountProb=0.001)
    Constructor = synthetic.BestHitPwmFromLoadedMotifs if options.bestHit else synthetic.PwmSamplerFromLoadedMotifs   
    embedInBackground = synthetic.EmbedInABackground(
        backgroundGenerator=synthetic.ZeroOrderBackgroundGenerator(seqLength=options.seqLength) 
        , embedders=[
            synthetic.RepeatedEmbedder(
            synthetic.SubstringEmbedder(
                synthetic.ReverseComplementWrapper(
                    substringGenerator=Constructor(
                        loadedMotifs=loadedMotifs,motifName=motifName),
                    reverseComplementProb=options.rc_prob
                ),
                positionGenerator=synthetic.UniformPositionGenerator()),
            quantityGenerator=synthetic.ZeroInflater(synthetic.MinMaxWrapper(
                synthetic.PoissonQuantityGenerator(options.mean_motifs),
                theMax=options.max_motifs, theMin=options.min_motifs), zeroProb=options.zero_prob)
            )
            for motifName in options.motifNames 
        ]
    )
    sequenceSet = synthetic.GenerateSequenceNTimes(embedInBackground, options.numSeqs)
    synthetic.printSequences(outputFileName_core+".simdata", sequenceSet,
                             includeFasta=True, includeEmbeddings=True,
                             prefix=options.prefix)
Ejemplo n.º 2
0
def motif_density(motif_name,
                  seq_length,
                  num_seqs,
                  min_counts,
                  max_counts,
                  GC_fraction,
                  central_bp=None):
    """
  Returns sequences with motif density, along with embeddings array.
  """
    import simdna
    from simdna import synthetic
    loaded_motifs = synthetic.LoadedEncodeMotifs(simdna.ENCODE_MOTIFS_PATH,
                                                 pseudocountProb=0.001)
    substring_generator = synthetic.PwmSamplerFromLoadedMotifs(
        loaded_motifs, motif_name)
    if central_bp is not None:
        position_generator = synthetic.InsideCentralBp(central_bp)
    else:
        position_generator = synthetic.UniformPositionGenerator()
    quantity_generator = synthetic.UniformIntegerGenerator(
        min_counts, max_counts)
    embedders = [
        synthetic.RepeatedEmbedder(
            synthetic.SubstringEmbedder(
                synthetic.ReverseComplementWrapper(substring_generator),
                position_generator), quantity_generator)
    ]
    embed_in_background = synthetic.EmbedInABackground(
        synthetic.ZeroOrderBackgroundGenerator(
            seq_length, discreteDistribution=get_distribution(GC_fraction)),
        embedders)
    generated_sequences = tuple(
        synthetic.GenerateSequenceNTimes(embed_in_background,
                                         num_seqs).generateSequences())
    sequence_arr = np.array(
        [generated_seq.seq for generated_seq in generated_sequences])
    embedding_arr = [
        generated_seq.embeddings for generated_seq in generated_sequences
    ]
    return sequence_arr, embedding_arr
    def test_uniform_positions(self):
        pseudocount_prob = 0.001
        pwm_name = "CTCF_known1"
        num_sequences = 10000
        sequence_length = 50
        loaded_motifs = sn.LoadedEncodeMotifs(simdna.ENCODE_MOTIFS_PATH,
                                              pseudocountProb=pseudocount_prob)
        substring_generator = sn.PwmSamplerFromLoadedMotifs(
            loaded_motifs, pwm_name)
        position_generator = sn.UniformPositionGenerator()
        embedders = [
            sn.SubstringEmbedder(substring_generator, position_generator)
        ]
        embed_in_background = sn.EmbedInABackground(
            sn.ZeroOrderBackgroundGenerator(sequence_length,
                                            discreteDistribution={
                                                'A': 0.3,
                                                'C': 0.2,
                                                'G': 0.2,
                                                'T': 0.3
                                            }), embedders)
        generated_sequences = list(
            sn.GenerateSequenceNTimes(embed_in_background,
                                      num_sequences).generateSequences())

        motif_length = len(loaded_motifs.getPwm(pwm_name).getRows())
        start_pos_count = np.zeros(sequence_length - motif_length + 1)

        for seq in generated_sequences:
            assert len(seq.seq) == sequence_length
            embeddings = seq.embeddings
            for embedding in embeddings:
                assert (embedding.what.string ==
                        seq.seq[embedding.startPos:embedding.startPos +
                                len(embedding.what.string)])
                start_pos_count[embedding.startPos] += 1

        start_pos_count = start_pos_count / float(len(generated_sequences))
        np.testing.assert_almost_equal(start_pos_count,
                                       1.0 / len(start_pos_count), 2)
Ejemplo n.º 4
0
 def test_background_generator(self):
     random.seed(1234)
     np.random.seed(1234)
     seq_length = 100
     #for testing, not biologically realistic
     freqs = {'A': 0.1, 'C': 0.2, 'G': 0.3, 'T': 0.4} 
     embed_in_background = sn.EmbedInABackground(
         sn.ZeroOrderBackgroundGenerator(
          seq_length,
          discreteDistribution=freqs), []) 
     generated_sequences = sn.GenerateSequenceNTimes(
                            embed_in_background, 500).generateSequences() 
     generated_seqs = [seq.seq for seq in generated_sequences]
     char_count = defaultdict(lambda: 0)
     for seq in generated_seqs:
         assert len(seq) == seq_length 
         for char in seq:
             char_count[char] += 1 
     total_chars = sum(char_count.values()) 
     actual_freqs = {val: char_count[val]/float(total_chars)
                  for val in char_count}
     for key in freqs:
         np.testing.assert_almost_equal(actual_freqs[key], freqs[key], 2)
    def test_density_motif_embedding(self):
        random.seed(1234)
        np.random.seed(1234)
        min_counts = 2
        max_counts = 5
        pseudocount_prob = 0.001
        pwm_name = "CTCF_known1"
        num_sequences = 5000
        loaded_motifs = sn.LoadedEncodeMotifs(simdna.ENCODE_MOTIFS_PATH,
                                   pseudocountProb=pseudocount_prob)
        substring_generator = sn.PwmSamplerFromLoadedMotifs(
            loaded_motifs, pwm_name)
        position_generator = sn.UniformPositionGenerator()
        quantity_generator = sn.UniformIntegerGenerator(min_counts, max_counts)
        embedders = [
            sn.RepeatedEmbedder(
                sn.SubstringEmbedder(
                    sn.ReverseComplementWrapper(
                        substring_generator), position_generator),
                quantity_generator)]
        embed_in_background = sn.EmbedInABackground(
            sn.ZeroOrderBackgroundGenerator(
                500, discreteDistribution={'A':0.3,'C':0.2,
                                                  'G':0.2,'T':0.3}),
            embedders)
        generated_sequences = list(sn.GenerateSequenceNTimes(
            embed_in_background, num_sequences).generateSequences())
        assert len(generated_sequences) == num_sequences

        actual_pwm = np.array([[0.095290, 0.318729, 0.083242, 0.502738],
                         [0.182913, 0.158817, 0.453450, 0.204819],
                         [0.307777, 0.053669, 0.491785, 0.146769],
                         [0.061336, 0.876232, 0.023001, 0.039430],
                         [0.008762, 0.989047, 0.000000, 0.002191],
                         [0.814896, 0.014239, 0.071194, 0.099671],
                         [0.043812, 0.578313, 0.365827, 0.012048],
                         [0.117325, 0.474781, 0.052632, 0.355263],
                         [0.933114, 0.012061, 0.035088, 0.019737],
                         [0.005488, 0.000000, 0.991218, 0.003293],
                         [0.365532, 0.003293, 0.621295, 0.009879],
                         [0.059276, 0.013172, 0.553238, 0.374314],
                         [0.013187, 0.000000, 0.978022, 0.008791],
                         [0.061538, 0.008791, 0.851648, 0.078022],
                         [0.114411, 0.806381, 0.005501, 0.073707],
                         [0.409241, 0.014301, 0.557756, 0.018702],
                         [0.090308, 0.530837, 0.338106, 0.040749],
                         [0.128855, 0.354626, 0.080396, 0.436123],
                         [0.442731, 0.199339, 0.292952, 0.064978]])

        actual_pwm = actual_pwm*(1-pseudocount_prob) + pseudocount_prob/4
        np.testing.assert_almost_equal(np.sum(actual_pwm,axis=-1),1.0,6)
        np.testing.assert_almost_equal(
            actual_pwm,
            np.array(loaded_motifs.getPwm(pwm_name).getRows())) 
        letter_to_index = {'A':0, 'C':1, 'G':2, 'T':3}
        reconstructed_pwm_fwd = np.zeros_like(actual_pwm)
        reconstructed_pwm_rev = np.zeros_like(actual_pwm)
        quantity_distribution = defaultdict(lambda: 0) 
        total_fwd_embeddings = 0.0
        total_rev_embeddings = 0.0
        
        for seq in generated_sequences:
            embeddings = seq.embeddings
            quantity_distribution[len(embeddings)] += 1
            for embedding in embeddings:
                assert (embedding.what.string
                 ==seq.seq[embedding.startPos:
                       embedding.startPos+len(embedding.what.string)])
                if ('revComp' in embedding.what.getDescription()):
                    total_rev_embeddings += 1
                else:
                    total_fwd_embeddings += 1
                for char_idx, char in enumerate(embedding.what.string):
                    if ('revComp' in embedding.what.getDescription()):
                        arr = reconstructed_pwm_rev
                    else:
                        arr = reconstructed_pwm_fwd 
                    arr[char_idx][letter_to_index[char]] += 1

        total_embeddings = total_fwd_embeddings + total_rev_embeddings 
        np.testing.assert_almost_equal(
            total_fwd_embeddings/total_embeddings, 0.5, 2) 

        #normalize each column of reconstructed_pwm
        reconstructed_pwm_fwd = reconstructed_pwm_fwd/total_fwd_embeddings 
        reconstructed_pwm_rev = reconstructed_pwm_rev/total_rev_embeddings 
        np.testing.assert_almost_equal(actual_pwm, reconstructed_pwm_fwd, 2)
        np.testing.assert_almost_equal(actual_pwm,
                                       reconstructed_pwm_rev[::-1,::-1], 2)
       
        #test the quantities of motifs were sampled uniformly  
        for quantity in range(min_counts, max_counts+1):
            np.testing.assert_almost_equal(
             quantity_distribution[quantity]/float(num_sequences),
             1.0/(max_counts-min_counts+1),2)
Ejemplo n.º 6
0
def motifGrammarSimulation(options):
    pc = 0.001
    bestHit = options.bestHit
    pathToMotifs = options.pathToMotifs
    loadedMotifs = synthetic.LoadedEncodeMotifs(pathToMotifs,
                                                pseudocountProb=pc)
    motifName1 = options.motifName1
    motifName2 = options.motifName2
    seqLength = options.seqLength
    numSeq = options.numSeq
    generationSetting = options.generationSetting
    outputFileName = "motifGrammarSimulation_" + generationSetting + (
        "_bestHit" if bestHit else "")
    if (generationSetting is not generationSettings.singleMotif2):
        outputFileName += "_motif1-" + motifName1
    if (generationSetting is not generationSettings.singleMotif1):
        outputFileName += "_motif2-" + motifName2
    outputFileName += "_seqLength" + str(seqLength) + "_numSeq" + str(
        numSeq) + ".simdata"

    kwargs = {'loadedMotifs': loadedMotifs}
    if (bestHit):
        theClass = synthetic.BestHitPwmFromLoadedMotifs
    else:
        theClass = synthetic.PwmSamplerFromLoadedMotifs

    motif1Generator = theClass(motifName=motifName1, **kwargs)
    motif2Generator = theClass(motifName=motifName2, **kwargs)
    motif1Embedder = synthetic.SubstringEmbedder(
        substringGenerator=motif1Generator)
    motif2Embedder = synthetic.SubstringEmbedder(
        substringGenerator=motif2Generator)

    embedders = []
    if (generationSetting == generationSettings.allBackground
            or generationSetting == generationSettings.twoMotifs):
        namePrefix = "synthNeg"
    else:
        namePrefix = "synthPos"
    if (generationSetting == generationSettings.allBackground):
        pass
    elif (generationSetting in [
            generationSettings.singleMotif1, generationSettings.twoMotifs,
            generationSettings.singleMotif2
    ]):
        if (generationSetting == generationSettings.singleMotif1):
            embedders.append(motif1Embedder)
        elif (generationSetting == generationSettings.singleMotif2):
            embedders.append(motif2Embedder)
        elif (generationSetting == generationSettings.twoMotifs):
            embedders.append(motif1Embedder)
            embedders.append(motif2Embedder)
        else:
            raise RuntimeError("Unsupported generation setting: " +
                               generationSetting)
    elif (generationSetting in [
            generationSettings.twoMotifsFixedSpacing,
            generationSettings.twoMotifsVariableSpacing
    ]):
        if (generationSetting == generationSettings.twoMotifsFixedSpacing):
            separationGenerator = synthetic.FixedQuantityGenerator(
                options.fixedSpacingOrMinSpacing)
        elif (generationSetting == generationSettings.twoMotifsVariableSpacing
              ):
            separationGenerator = synthetic.UniformIntegerGenerator(
                minVal=options.fixedSpacingOrMinSpacing,
                maxVal=options.maxSpacing)
        else:
            raise RuntimeError("unsupported generationSetting:" +
                               generationSetting)
        embedders.append(
            synthetic.EmbeddableEmbedder(
                embeddableGenerator=synthetic.PairEmbeddableGenerator(
                    embeddableGenerator1=motif1Generator,
                    embeddableGenerator2=motif2Generator,
                    separationGenerator=separationGenerator)))
    else:
        raise RuntimeError("unsupported generationSetting:" +
                           generationSetting)

    embedInBackground = synthetic.EmbedInABackground(
        backgroundGenerator=synthetic.ZeroOrderBackgroundGenerator(seqLength),
        embedders=embedders,
        namePrefix=namePrefix)

    sequenceSet = synthetic.GenerateSequenceNTimes(embedInBackground, numSeq)
    synthetic.printSequences(outputFileName,
                             sequenceSet,
                             includeFasta=True,
                             includeEmbeddings=True)
Ejemplo n.º 7
0
def simulate_heterodimer_grammar(motif1, motif2, seq_length, min_spacing,
                                 max_spacing, num_pos, num_neg, GC_fraction):
    """
    Simulates two classes of sequences with motif1 and motif2:
        - Positive class sequences with motif1 and motif2 positioned
          min_spacing and max_spacing
        - Negative class sequences with independent motif1 and motif2 positioned
        anywhere in the sequence, not as a heterodimer grammar

    Parameters
    ----------
    seq_length : int, length of sequence
    GC_fraction : float, GC fraction in background sequence
    num_pos : int, number of positive class sequences
    num_neg : int, number of negatice class sequences
    motif1 : str, encode motif name
    motif2 : str, encode motif name
    min_spacing : int, minimum inter motif spacing
    max_spacing : int, maximum inter motif spacing

    Returns
    -------
    sequence_arr : 1darray
        Array with sequence strings.
    y : 1darray
        Array with positive/negative class labels.
    embedding_arr: list
        List of embedding objects.
    """
    import simdna
    from simdna import synthetic
    loaded_motifs = synthetic.LoadedEncodeMotifs(simdna.ENCODE_MOTIFS_PATH,
                                                 pseudocountProb=0.001)
    motif1_generator = synthetic.ReverseComplementWrapper(
        synthetic.PwmSamplerFromLoadedMotifs(loaded_motifs, motif1))
    motif2_generator = synthetic.ReverseComplementWrapper(
        synthetic.PwmSamplerFromLoadedMotifs(loaded_motifs, motif2))
    separation_generator = synthetic.UniformIntegerGenerator(
        min_spacing, max_spacing)
    embedder = synthetic.EmbeddableEmbedder(
        synthetic.PairEmbeddableGenerator(motif1_generator, motif2_generator,
                                          separation_generator))
    embed_in_background = synthetic.EmbedInABackground(
        synthetic.ZeroOrderBackgroundGenerator(
            seq_length, discreteDistribution=get_distribution(GC_fraction)),
        [embedder])
    generated_sequences = tuple(
        synthetic.GenerateSequenceNTimes(embed_in_background,
                                         num_pos).generateSequences())
    grammar_sequence_arr = np.array(
        [generated_seq.seq for generated_seq in generated_sequences])
    positive_embedding_arr = [
        generated_seq.embeddings for generated_seq in generated_sequences
    ]
    nongrammar_sequence_arr, _, negative_embedding_arr = simulate_multi_motif_embedding(
        [motif1, motif2], seq_length, 2, 2, num_neg, GC_fraction)
    sequence_arr = np.concatenate(
        (grammar_sequence_arr, nongrammar_sequence_arr))
    y = np.array([[True]] * num_pos + [[False]] * num_neg)
    embedding_arr = positive_embedding_arr + negative_embedding_arr
    return sequence_arr, y, embedding_arr
Ejemplo n.º 8
0
def simulate_multi_motif_embedding(motif_names, seq_length, min_num_motifs,
                                   max_num_motifs, num_seqs, GC_fraction):
    """
  Generates data for multi motif recognition task.

  Parameters
  ----------
  motif_names : list
      List of strings.
  seq_length : int
  min_num_motifs : int
  max_num_motifs : int
  num_seqs : int
  GC_fraction : float

  Returns
  -------
  sequence_arr : 1darray
      Contains sequence strings.
  y : ndarray
      Contains labels for each motif.
  embedding_arr: 1darray
      Array of embedding objects.
  """

    import simdna
    from simdna import synthetic
    loaded_motifs = synthetic.LoadedEncodeMotifs(simdna.ENCODE_MOTIFS_PATH,
                                                 pseudocountProb=0.001)

    def get_embedder(motif_name):
        substring_generator = synthetic.PwmSamplerFromLoadedMotifs(
            loaded_motifs, motif_name)
        return synthetic.SubstringEmbedder(
            synthetic.ReverseComplementWrapper(substring_generator),
            name=motif_name)

    embedders = [get_embedder(motif_name) for motif_name in motif_names]
    quantity_generator = synthetic.UniformIntegerGenerator(
        min_num_motifs, max_num_motifs)
    combined_embedder = [
        synthetic.RandomSubsetOfEmbedders(quantity_generator, embedders)
    ]
    embed_in_background = synthetic.EmbedInABackground(
        synthetic.ZeroOrderBackgroundGenerator(
            seq_length, discreteDistribution=get_distribution(GC_fraction)),
        combined_embedder)
    generated_sequences = tuple(
        synthetic.GenerateSequenceNTimes(embed_in_background,
                                         num_seqs).generateSequences())
    sequence_arr = np.array(
        [generated_seq.seq for generated_seq in generated_sequences])
    label_generator = synthetic.IsInTraceLabelGenerator(
        np.asarray(motif_names))
    y = np.array([
        label_generator.generateLabels(generated_seq)
        for generated_seq in generated_sequences
    ],
                 dtype=bool)
    embedding_arr = [
        generated_seq.embeddings for generated_seq in generated_sequences
    ]
    return sequence_arr, y, embedding_arr