Example #1
0
    def __init__(self,
                 name,
                 snvs_valid,
                 snvs_invalid,
                 indels_valid,
                 indels_invalid,
                 sensitivity=None,
                 precision=None):
        self.name = name
        self.sensitivity = random.uniform(
            0.5, 0.95) if not sensitivity else sensitivity
        self.precision = random.uniform(0.5,
                                        0.95) if not precision else precision

        goodfrac = self.sensitivity
        badfrac = self.sensitivity * (1. - self.precision) / self.precision

        self.snvs = random.sample(snvs_valid, int(goodfrac * len(snvs_valid)))
        self.indels = random.sample(indels_valid,
                                    int(goodfrac * len(indels_valid)))

        self.snvs += random.sample(
            snvs_invalid, min(int(badfrac * len(snvs_valid)),
                              len(snvs_invalid)))
        self.indels += random.sample(
            indels_invalid,
            min(int(badfrac * len(indels_valid)), len(indels_invalid)))

        self.snvs = sorted(self.snvs)
        self.indels = sorted(self.indels)
def build_sim(Ps, ms, es, incs, Mstar, Rstar):
    
    assert Nplanets==len(ms)==len(es)==len(Ps)==len(incs)
        
    Ws=2 * np.pi * rd.sample(Nplanets)
    ws=2 * np.pi * rd.sample(Nplanets)
    Ms=2 * np.pi * rd.sample(Nplanets)

    radii = np.zeros(Nplanets)
    for i in range(Nplanets):
        radii[i] = np.cbrt(Ps[i] * Ps[i] * ms[i] / earth_mass_p_solar_mass / Mstar / 3) / 2

    #set up simulation
    sim = rebound.Simulation()

    #add star
    sim.add(m=Mstar, r=Rstar)

    for i in range(Nplanets):
        sim.add(m=ms[i] / earth_mass_p_solar_mass, P=Ps[i] / year_p_reboundtime, e=es[i], inc=incs[i], Omega=Ws[i], omega=ws[i], M=Ms[i], r=radii[i]) #G=1 units!
    sim.move_to_com()
    
    sim.collision = 'line'
    sim.collision_resolve = collision
    sim.ri_whfast.keep_unsynchronized = 1
    sim.ri_whfast.safe_mode = 0
    
    sim.integrator = "whfast"
    sim.dt = np.sqrt(2) / 40 * sim.particles[1].P  # ~0.035355

    return sim
Example #3
0
    def update(self, u_t0, u_t1, x_t0):
        """
        param[in] u_t0 : particle state odometry reading [x, y, theta] at time (t-1) [odometry_frame]
        param[in] u_t1 : particle state odometry reading [x, y, theta] at time t [odometry_frame]
        param[in] x_t0 : particle state belief [x, y, theta] at time (t-1) [world_frame]
        param[out] x   : particle state belief [x, y, theta] at time t [world_frame]
        """
        ux_t0  = u_t0[0]
        uy_t0  = u_t0[1]
        uth_t0 = u_t0[2]
        ux_t1  = u_t1[0]
        uy_t1  = u_t1[1]
        uth_t1 = u_t1[2]

        del_rot1  = atan2(uy_t1-uy_t0,ux_t1-ux_t0) - uth_t0
        del_trans = math.sqrt((ux_t1-ux_t0)^2 + (uy_t1-uy_t0)^2)
        del_rot2  = uth_t1 - uth_t0 - del_rot1

        del_rot1  = del_rot1 - sample(0,self.a1*del_rot1^2 + self.a2*del_trans^2)
        del_trans = del_trans - sample(0, self.a3*del_trans^2 _ self.a4*(del_rot1^2 + del_rot2^2))
        del_rot2  = del_rot2 - sample(0, self.a1*del_rot2^2 + self.a2*del_trans^2)
        
        x_t1 = x_t0

        x_t1[0] = x_t0[0] + del_trans*math.cos(x_t0[2] + del_rot1)
        x_t1[1] = x_t0[1] + del_trans*math.cos(x_t0[2] + del_rot1)
        x_t1[2] = x_t0[2] + del_rot1 +del_rot2
        
        return x_t1
    def create_single_odorant_activation_matrix(self):
        """
        create_single_odorant_activation_matrix 
        """
        activation_matrix = np.zeros(
            (self.params['n_patterns'], self.params['n_or']))
        p1, p2, p3 = self.set_odorant_distribution_params()
        np.random.seed(self.params['seed_activation_matrix'])
        random.seed(self.params['seed_activation_matrix'])
        distances = np.zeros((self.params['n_patterns'], self.params['n_or']))
        n_min_active_OR = int(
            round(self.params['n_or'] * self.params['frac_min_active_OR']))
        n_max_active_OR = int(
            round(self.params['n_or'] * self.params['frac_max_active_OR']))
        n_above_thresh = np.zeros(
            self.params['n_patterns']
        )  # number of glomeruli expected to get an activation larger than thresh
        thresh = 0.01
        for pn in xrange(self.params['n_patterns']):
            n_active_OR = np.random.randint(n_min_active_OR, n_max_active_OR)
            activated_ORs = random.sample(range(self.params['n_or']),
                                          n_active_OR)
            while np.unique(activated_ORs).size != n_active_OR:
                activated_ORs = random.sample(range(self.params['n_or']),
                                              n_active_OR)
            for OR in activated_ORs:
                dist = self.odorant_odor_distance_distribution((p1, p2, p3))
                distances[pn, OR] = dist
                affinity = np.exp(
                    -(dist)**2 / self.
                    params['distance_affinity_transformation_parameter_exp'])
                if affinity > 1.:
                    affinity = 1.
                if affinity < 0.:
                    affinity = 0.
                activation_matrix[pn, OR] = affinity
                n_above_thresh[pn] = (activation_matrix[pn, :] >
                                      thresh).nonzero()[0].size

        print 'ActivationMatrix: per pattern number of activated glom: mean %.2f +- %.2f, \t%.2f +- %.2f percent' % (n_above_thresh.mean(), n_above_thresh.std(), \
                (n_above_thresh.mean() / float(self.params['n_or'])) * 100., (n_above_thresh.std() / float(self.params['n_or'])) * 100.)

        if self.params['OR_activation_normalization']:
            for pn in xrange(self.params['n_patterns']):
                # this normalization increases the likelihood of having ORs with a high affinity to
                # each given pattern --> receptors have specialized to odorants (?)
                #                if activation_matrix[pn, :].sum() == 0:
                #                    print '\n\tWARNING: activation_matrix.sum for pattern %d == 0' % (pn)
                activation_matrix[pn, :] /= activation_matrix[pn, :].max()
                if not activation_matrix[pn, :].sum() == 0:
                    activation_matrix[pn, :] /= activation_matrix[pn, :].sum()
#                pass

#            for OR in xrange(self.params['n_or']):
#                activation_matrix[:, OR] /= activation_matrix[:, OR].sum()

        print 'Activation matrix sum:', activation_matrix.sum()
        print "Activation matrix fn:", self.params['activation_matrix_fn']
        np.savetxt(self.params['activation_matrix_fn'], activation_matrix)
        return activation_matrix
Example #5
0
def part1():
    #Load the MNIST digit data
    M = loadmat("mnist_all.mat")
    f, axarr = plt.subplots(10, 10)

    for i in range(10):
        train_num = "train" + str(i)
        test_num = "test" + str(i)
        train_size = M[train_num].shape[0]
        test_size = M[test_num].shape[0]
        print(train_size)
        random.seed(i)
        rand_train_img = random.sample(range(0, train_size), 8)
        rand_test_img = random.sample(range(0, test_size), 2)
        
        # print(i)
        # print(rand_train_img)
        # print(rand_test_img)
        
        # Display 8 training images and 2 test images.
        for k in range(len(rand_train_img)):
            img = M[train_num][k].reshape((28,28))
            axarr[i, k].imshow(img, cmap=cm.gray)
            axarr[i, k].axis('off')
        
        
        for k in range(len(rand_test_img)):
            img = M[train_num][k + 8].reshape((28,28))
            axarr[i, k + 8].imshow(img, cmap=cm.gray)
            axarr[i, k + 8].axis('off')
        
    plt.show()
Example #6
0
 def k_plus(self, ace_prob):
     if self.prenode is not None and self.prenode.status == "m" and sample() < self.K_PLUS:
         # preNode is methylated and sample() gets smaller than K_PLUS
         return MHistoneWithDNAModel(inherited=True, inherited_hst=self)
     if self.nextnode is not None and self.nextnode.status == "m" and sample() < self.K_PLUS:
         # nextNode is methylated and sample() gets smaller than KPLUS
         return MHistoneWithDNAModel(inherited=True, inherited_hst=self)
     return self
    def __uniformMutation(self, population, npop):
        """
        Operateur de mutation
        """
        probaMutation = rd.sample((npop, self.__ndof))
        deltaX = self.__stepMut * (rd.sample((npop, self.__ndof)) - 0.5)
        population = population + deltaX * (probaMutation <= self.__rateMut)

        return population
Example #8
0
def creating_session(subsession):

    for player in subsession.get_players():
        # randomize to treatments
        if player.round_number == 1:
            player.participant.treatment = random.choice(
                ['Control', 'Emotional'])
        player.treatment = player.participant.treatment
        # print('set player.treatment to', player.treatment)

        if player.round_number > 1:
            prev_player = player.in_round(player.round_number - 1)

        if player.round_number > Constants.num_rounds / 2:
            player.sReward = 'LR'
            # print(os.getcwd())
            memelist = os.listdir('_static/LR')[1:-1]
            # ! if you do not put the [1:-1] it crashes because there is a
            # ! HIDDEN file inside of the mac folder called 'DS store' that breaks it lmao :D
            # print(memelist)
            pattern = r"meme(?P<number>\d{3})\.jpg"
            # you are telling the pattern of the files inside of the folder, in my case a string...
            # ...that says meme, then a THREE DIGIT number + .jpg, and then you group it by the number
            # m = re.match(pattern, "meme200.jpg")
            # print(m)
            # int(m.group("number"))
            numbers = [
                int(re.match(pattern, x).group("number")) for x in memelist
            ]
            # take all of the numbers from the image files and put them on a list
            vImages = random.sample(numbers, 6)
            # select 6 random numbers from the list, but they do not repeat each other
            player.iImgPost1 = vImages[0]
            player.iImgPost2 = vImages[1]
            player.iImgPost3 = vImages[2]
            player.iImgPost4 = vImages[3]
            player.iImgPost5 = vImages[4]
            player.iImgPost6 = vImages[5]
            # player.iImgPost6      = random.randint(low=101,high=len(os.listdir('_static/LR')))

        # depending on reward we check different images...
        else:
            player.sReward = 'HR'
            # dRange                  = range(1,len(os.listdir('_static/HR')))
            # vImages                 = random.sample(range(dRange), 6)
            vImages = random.sample(range(1, len(os.listdir('_static/HR'))), 6)
            #! make a subsample of 6 random images that are not the same and store the images in a vector
            player.iImgPost1 = vImages[0]
            player.iImgPost2 = vImages[1]
            player.iImgPost3 = vImages[2]
            player.iImgPost4 = vImages[3]
            player.iImgPost5 = vImages[4]
            player.iImgPost6 = vImages[5]
            #save image post in the variable as one of the positions of the vector

        player.iImgFeed = random.randint(1, 89)
Example #9
0
def sample_dehnen(N, M, a, core=False):
    # The factor M * 200^2 / 201^2 restricts the radius to 200 * a.
    radii = dehnen_inverse_cumulative(
        nprand.sample(N) * ((M * 40000) / 40401), M, a, core)
    thetas = np.arccos(nprand.sample(N) * 2 - 1)
    phis = 2 * pi * nprand.sample(N)
    xs = radii * sin(thetas) * cos(phis)
    ys = radii * sin(thetas) * sin(phis)
    zs = radii * cos(thetas)
    coords = np.column_stack((xs, ys, zs))
    return coords
Example #10
0
def set_bulge_positions():
  factor = 0.9*M_bulge
  radii = dehnen_inverse_cumulative(nprand.sample(N_bulge) * factor,
    M_bulge, a_bulge, bulge_core)
  thetas = np.arccos(nprand.sample(N_bulge)*2 - 1)
  phis = 2 * pi * nprand.sample(N_bulge)
  xs = radii * sin(thetas) * cos(phis)
  ys = radii * sin(thetas) * sin(phis)
  zs = radii * cos(thetas)
  coords = np.column_stack((xs, ys, zs))
  return coords
Example #11
0
def set_bulge_positions():
  factor = 0.9*M_bulge
  radii = dehnen_inverse_cumulative(nprand.sample(N_bulge) * factor,
    M_bulge, a_bulge, bulge_core)
  thetas = np.arccos(nprand.sample(N_bulge)*2 - 1)
  phis = 2 * pi * nprand.sample(N_bulge)
  xs = radii * sin(thetas) * cos(phis)
  ys = radii * sin(thetas) * sin(phis)
  zs = radii * cos(thetas)
  coords = np.column_stack((xs, ys, zs))
  return coords
Example #12
0
def sample_dehnen(N, M, a, core=False):
  # The factor M * 200^2 / 201^2 restricts the radius to 200 * a.
  radii = dehnen_inverse_cumulative(nprand.sample(N) *
    ((M*40000) / 40401), M, a, core)
  thetas = np.arccos(nprand.sample(N)*2 - 1)
  phis = 2 * pi * nprand.sample(N)
  xs = radii * sin(thetas) * cos(phis)
  ys = radii * sin(thetas) * sin(phis)
  zs = radii * cos(thetas)
  coords = np.column_stack((xs, ys, zs))
  return coords
Example #13
0
def next_genome_with_dna_model(hst_list, window, nuc_prob, ace_prob, p_off):
    """
    this method takes histone list and returns the next generation of them.
    """
    ahst_n = 0
    mhst_n = 0

    for i, hst in enumerate(hst_list):
        if -window // 2 <= hst.position <= window // 2:
            if hst.status == "a":
                ahst_n += 1
            elif hst.status == "m":
                mhst_n += 1

            for index in range(len(hst.CpGislandlist)):
                # p_on probability
                if hst.status == 'm' and sample() < 0.001133:
                    hst.CpGislandlist[index] = 1

                # p_off probability   
                if sample() < p_off:
                    hst.CpGislandlist[index] = 0

        hst = HistoneWithDNAModel.k(hst, ace_prob)

        hst_list[i] = hst

    p_bool = False
    if mhst_n > 2:
        p_bool = True

    t_bool = (ahst_n > 5) and (not p_bool)
    """
    WINDOW is size 10(11 histones note that there is E0 between E(-1) and E(1)), 
    so acetylated histones will be dominant if non-acetylated histones are less than 5.
    """
    # if transcription does not happend, then with k_nuc
    # probability, we recover E0 to be methylated.
    eext_bool = False
    if t_bool is False and sample() < nuc_prob:
        eext_bool = True

    # if in the locus we have more than two methylated histones,
    # then with 100% prob, we recover E0 histone to be
    # methylated. this is a histone memory part.
    if mhst_n > 2:
        eext_bool = True

    if eext_bool:
        center = len(hst_list) // 2
        hst_list[center] = MHistoneWithDNAModel(inherited=True, inherited_hst=hst_list[center])

    return hst_list, t_bool, p_bool
Example #14
0
def set_disk_positions(N, z0):
  radii = np.zeros(N)
  # The maximum radius is restricted to 20 kpc.
  sample = nprand.sample(N) * disk_radial_cumulative(20)
  for i, s in enumerate(sample):
    radii[i] = disk_radial_inverse_cumulative(s)
  zs = disk_height_inverse_cumulative(nprand.sample(N), z0)
  phis = 2 * pi * nprand.sample(N)
  xs = radii * cos(phis)
  ys = radii * sin(phis)
  coords = np.column_stack((xs, ys, zs))
  return coords
Example #15
0
def set_disk_positions(N, z0):
  radii = np.zeros(N)
  # The maximum radius is restricted to 60 kpc.
  sample = nprand.sample(N) * disk_radial_cumulative(60)
  for i, s in enumerate(sample):
    radii[i] = disk_radial_inverse_cumulative(s)
  zs = disk_height_inverse_cumulative(nprand.sample(N), z0)
  phis = 2 * pi * nprand.sample(N)
  xs = radii * cos(phis)
  ys = radii * sin(phis)
  coords = np.column_stack((xs, ys, zs))
  return coords
Example #16
0
 def __init__(self, genesis, network, nodeid, target=None):
     self.genesis = genesis
     self.network = network
     self.peers = []
     self.latencies = []
     self.nodeid = nodeid
     # A salt for this node, so all nodes don't produce the same hashes
     self.nodesalt = uint256(sha256(randint(2**63 - 1)))
     self.nonce = 0  # Will be increased in the mining process
     self.reset(target)
     # Geospatial location information
     self.latitude = pi * (1 / 2 - sample(1))
     self.longitude = 2 * pi * sample(1)
Example #17
0
 def generate_samples(self, model):
     '''
     Generate (s,a,r,s') pairs according to uniform distribution
     '''
     states = model.S()
     result = []
     for i in range(self._n_samples):
         s = random.sample(states, 1)[0]
         a = random.sample(model.A(s), 1)[0]
         r = model.R(s, a)
         s_p = util.functions.sample(model.T(s, a))
         result.append((s, a, r, s_p))
     return result
 def generate_samples(self, model):
     '''
     Generate (s,a,r,s') pairs according to uniform distribution
     '''
     states = model.S()
     result = []
     for i in range(self._n_samples):
         s = random.sample(states,1)[0]
         a = random.sample(model.A(s),1)[0]
         r = model.R(s,a)
         s_p = util.functions.sample( model.T(s,a) )
         result.append( (s,a,r,s_p) )
     return result
def train_recommender(y, lamb, n):
    m_users = y.shape[0]
    m_champs = y.shape[1]

    init_flat_theta = random.sample((m_users, n)).flatten() / 1000
    init_flat_x = random.sample((m_champs, n)).flatten() / 1000
    init_theta_and_x = np.concatenate((init_flat_x, init_flat_theta))

    costfn = gen_costfn(y, lamb, n, m_users, m_champs)

    theta_and_x = minimize(costfn, init_theta_and_x, method='BFGS',
                           options={'xtol': 1e-8, 'disp': True})
    theta, x = unpack(theta_and_x)
    return (theta, x)
    def create_activation_matrix_for_pattern_completion_training(
            self, n_active_OR):
        """
        Create random patterns with a fixed number of activated OR per pattern
        --> for pattern completion :
        n_active_OR = int(round(params['n_or'] * params['frac_max_active_OR']))
        --> for rivalry:
        n_active_OR = int(round(params['n_or'] * params['frac_min_active_OR']))
        """
        activation_matrix = np.zeros(
            (self.params['n_patterns'], self.params['n_or']))
        p1, p2, p3 = self.set_odorant_distribution_params()

        np.random.seed(self.params['seed_activation_matrix'])
        random.seed(self.params['seed_activation_matrix'])
        distances = np.zeros((self.params['n_patterns'], self.params['n_or']))
        for pn in xrange(self.params['n_patterns']):
            activated_ORs = random.sample(range(self.params['n_or']),
                                          n_active_OR)
            while np.unique(activated_ORs).size != n_active_OR:
                activated_ORs = random.sample(range(self.params['n_or']),
                                              n_active_OR)
            for OR in activated_ORs:
                # draw a random distance from the fitted distance distribution
                dist = self.odorant_odor_distance_distribution((p1, p2, p3))
                distances[pn, OR] = dist
                affinity = np.exp(
                    -(dist)**2 / self.
                    params['distance_affinity_transformation_parameter_exp'])
                if affinity > 1.:
                    affinity = 1.
                if affinity < 0.:
                    affinity = 0.
                activation_matrix[pn, OR] = affinity

        if self.params['OR_activation_normalization']:
            for pn in xrange(self.params['n_patterns']):
                # this normalization increases the likelihood of having ORs with a high affinity to
                # each given pattern --> receptors have specialized to odorants (?)
                activation_matrix[pn, :] /= activation_matrix[pn, :].max()
                if not activation_matrix[pn, :].sum() == 0:
                    activation_matrix[pn, :] /= activation_matrix[pn, :].sum()
                else:
                    print '\n\tWARNING: activation_matrix.sum for pattern %d == 0\nWill now quit, because that makes no sense' % (
                        pn)
                    exit(1)
        print 'Activation matrix sum:', activation_matrix.sum()
        print "Activation matrix fn:", self.params['activation_matrix_fn']
        np.savetxt(self.params['activation_matrix_fn'], activation_matrix)
        return activation_matrix
Example #21
0
def generate_lon_lat(size):
    """Generate arrays of longitude, latitude.

    Parameters
    ----------
    size : int
        length of arrays to generate

    Returns
    -------
    list of 2 arrays
    """

    return [random.sample(size) * 360 - 180, random.sample(size) * 180 - 90]
    def __polynomialMutation(self, population, npop):
        """
        Operateur de mutation
        """
        probaMutation = rd.sample((npop, self.__ndof))

        uMut = rd.sample((npop, self.__ndof))
        uinf_filter = uMut <= 0.5
        deltaMut = np.zeros_like(uMut)
        deltaMut[uinf_filter] = (2 * uMut[uinf_filter])**self.__alpham - 1
        deltaMut[~uinf_filter] = 1 - (2 *
                                      (1 - uMut[~uinf_filter]))**self.__alpham
        population = population + deltaMut * (probaMutation <= self.__rateMut)

        return population
Example #23
0
    def graphs(self):
        n = randint(100, 500)
        # n = 4
        print(n)
        a = sample((n, n))
        b = sample(n)
        a * 100
        b * 100
        a = list(a)
        b = list(b)

        self.carousel.add_widget(plot_time_threads(n, a, b))

        self.carousel.add_widget(plot_time_eps(n, a, b))

        self.carousel.add_widget(plot_time_msize())
Example #24
0
def set_bulge_positions():
  global bulge_cut_M
  bulge_cut_M = dehnen_cumulative(bulge_cut_r, M_bulge, a_bulge, gamma_bulge)
  print "%.0f%% of bulge mass cut by the truncation..." % \
        (100*(1-bulge_cut_M/M_bulge))
  if bulge_cut_M < 0.9*M_bulge:
    print "  \_ Warning: this is more than 10%% of the total bulge mass!"
  radii = dehnen_inverse_cumulative(nprand.sample(N_bulge) * bulge_cut_M,
    M_bulge, a_bulge, gamma_bulge)
  thetas = np.arccos(nprand.sample(N_bulge)*2 - 1)
  phis = 2 * pi * nprand.sample(N_bulge)
  xs = radii * sin(thetas) * cos(phis)
  ys = radii * sin(thetas) * sin(phis)
  zs = radii * cos(thetas)
  coords = np.column_stack((xs, ys, zs))
  return coords
Example #25
0
def createRandomHistoneList(percentage=50,A=1,
                            NUM_OF_HISTONE=81,BEFORE_PROMOTER=40,
                            K_PLUS=0.12,
                            K_MINUS=0.117,
                            K_PLUS2=0.12,
                            K_ACE=0.12):
    """
    percentage ... the probability of having methylated hitone.
    """
    dstList = []
    ratio = percentage/100  ## ratio should be float number between 0 and 1

    for i in range(NUM_OF_HISTONE):

        if(sample() < ratio):dstList.append(MHistone(position=i-BEFORE_PROMOTER,
                                                     K_PLUS=K_PLUS,
                                                     K_PLUS2=K_PLUS2,
                                                     K_MINUS=K_MINUS,
                                                     K_ACE=K_ACE,
                                                     A_bool=A,percentage=percentage))
        else:dstList.append(AHistone(position=i-BEFORE_PROMOTER,
                                     K_PLUS=K_PLUS,
                                     K_PLUS2=K_PLUS2,
                                     K_MINUS=K_MINUS,
                                     K_ACE=K_ACE,
                                     A_bool=A,percentage=percentage))
        # dstList[i-1].display()


        dstList[i-1].set_adjHistone(dstList[i])
    dstList[NUM_OF_HISTONE-1].set_adjHistone(dstList[0]) ## Connect the head to tail
    return dstList
    def successor(self, method='reverse'):
        if method == 'reverse':
            ind = sorted(random.sample([i for i, _ in enumerate(self.path)],
                                       2))
            new_path = self.path[:]
            new_path = new_path[:ind[0]] + new_path[
                ind[0]:ind[1]][::-1] + new_path[ind[1]:]
            return TravelingSalesmanProblem(new_path)
        elif method == 'permutation':
            new_path = self.path[:]
            random.shuffle(new_path)
            return TravelingSalesmanProblem(new_path)
        elif method == 'adjacent':
            successors = []
            for i in range(len(self.path) - 1):
                new_problem = self.copy()
                new_problem.path[i], new_problem.path[
                    i + 1] = new_problem.path[i + 1], new_problem.path[i]
                successors.append(new_problem)

            last_path = self.copy()
            last_path.path[0], last_path.path[-1] = last_path.path[
                -1], last_path.path[0]
            successors.append(last_path)
            return random.choice(successors)
        else:
            print('No valid method supplied')
            return False
Example #27
0
def ramd():
    sample_size = 500
    rn1 = npr.rand(sample_size, 3)
    rn2 = npr.randint(0, 10, sample_size)
    rn3 = npr.sample(size=sample_size)

    a = [0, 25, 50, 75, 100]

    rn4 = npr.choice(a, size=sample_size)

    fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2,
                                                 ncols=2,
                                                 figsize=(10, 8))

    ax1.hist(rn1, bins=25, stacked=True)
    ax1.set_title('rand')
    ax1.set_ylabel('frequency')

    ax2.hist(rn2, bins=25)
    ax2.set_title('randint')

    ax3.hist(rn3, bins=25)
    ax3.set_title('samople')
    ax3.set_ylabel("frequency")

    ax4.hist(rn4, bins=25)
    ax4.set_title('choice')
    plt.show()
Example #28
0
    def __init__(self, filename=None, winname=None, debug=False):
        self.filename = filename
        self.debug = debug

        if not winname:
            self.winname = "{:%Y%m%d_%H%M%S}_{}".format(
                datetime.now(), sample())
        else:
            self.winname = winname

        if filename:
            print("[Load param]", filename)
            with open(filename) as fp:
                config = json.load(fp)

            self.config = config

        if debug:
            cv.namedWindow(self.winname)
            if "default" in self.config:
                for trackbarname, [min_val, val, max_val
                                   ] in sorted(config["default"].items()):
                    self._create_default(trackbarname, min_val, val, max_val)
            if "kernel" in self.config:
                for trackbarname, [min_val, val, max_val
                                   ] in sorted(config["kernel"].items()):
                    self._cerate_kernel(trackbarname, min_val, val, max_val)
            self._create_save("save", 0, 0, 1)
Example #29
0
    def sample_relation(self, poi, bounding_box, perspective, landmark, step=0.02):
        """
        Sample a relation given a point of interest and landmark.
        Evaluate each relation and probabilisticaly choose the one that is likely to
        generate the poi given a landmark.
        """
        rel_scores = []
        rel_classes = []
        rels = []

        for s in [DistanceRelationSet, OrientationRelationSet, ContainmentRelationSet]:
            for rel in s.relations:
                rel_scores.append(self.evaluate_poi(poi, bounding_box, rel, perspective, landmark, step))
                rel_classes.append(rel)

        rel_scores = array(rel_scores)
        set_printoptions(threshold='nan')
        # print 'X',rel_scores
        rel_probabilities = rel_scores/sum(rel_scores)
        # print 'X',rel_probabilities
        index = rel_probabilities.cumsum().searchsorted( random.sample(1) )[0]
        # print 'X',index
        # print 'X',rel_probabilities.cumsum()

        return rel_classes[index], rel_probabilities[index], self.get_entropy(rel_probabilities)
Example #30
0
    def fit(
        self,
        trial: np.ndarray,
        show: bool = False,
        guess: np.ndarray = None,
    ) -> 'CoinMixture':
        """fits a coin mixture model via EM

        :param trial: m_flips each trial
        :type trial: np.ndarray
        :param show: to show the results of iteration, 
         defaults to False
        :type show: bool, optional
        :return: fitted object
        :rtype: CoinMixture
        """
        # intial guess for probabilities associated with each coin
        if guess is None:
            guess = sample(self.n_coins)
        theta = [0] * self.max_iter
        # iterate
        for i in range(self.max_iter):
            theta[i] = chain(guess)
            if show:
                print(f"#{i} ", ", ".join(f"{c:.4f}" for c in chain(guess)))

            # compute the e-step
            num_heads, num_tails = self.estep(trial, guess)

            # compute the m-step
            guess = self.mstep(num_heads, num_tails)

        self.theta = pd.DataFrame(theta)
        return self
Example #31
0
    def transform(self, ratio=None):
        shape = list(self.shape)

        if ratio is None:
            ratio = self.ratio

        # if n_rows is None:
        #     ratio = 0.5
        # elif n_rows < 1:
        #     ratio = n_rows
        # else:
        #     ratio = 1.0*n_rows/shape[0]
        
        shape[0] *= ratio
        
        ud_X = random.sample(shape) * self.gap_row + self.min_row
        X = self.X

        Y = np.zeros(len(X), dtype=np.int)
        ud_Y = np.ones(len(ud_X), dtype=np.int)
        
        data_X = np.concatenate((X, ud_X), axis=0)
        target_Y = np.concatenate((Y, ud_Y), axis=0)

        return data_X, target_Y
Example #32
0
def initialize(n,power):
  # Returns a Python dictionary representing a web
  # with n pages, and where each page k is linked to by
  # L_k random other pages.  The L_k are independent and
  # identically distributed random variables with a
  # shifted and truncated Pareto probability mass function
  # p(l) proportional to 1/(l+1)^power.

  # The representation used is a Python dictionary with
  # keys 0 through n-1 representing the different pages.
  # i[j][0] is the estimated PageRank, initially set at 1/n,
  # i[j][1] the number of outlinks, and i[j][2] a list of
  # the outlinks.

  # This dictionary is used to supply (key,value) pairs to
  # both mapper tasks defined below.

  # initialize the dictionary
  i = {} 
  for j in xrange(n): i[j] = [1.0/n,0,[]]
  
  # For each page, generate inlinks according to the Pareto
  # distribution. Note that this is somewhat tedious, because
  # the Pareto distribution governs inlinks, NOT outlinks,
  # which is what our representation is adapted to represent.
  # A smarter representation would give easy
  # access to both, while remaining memory efficient.
  for k in xrange(n):
    lk = paretosample(n+1,power)-1
    values = random.sample(xrange(n),lk)
    for j in values:
      i[j][1] += 1 # increment the outlink count for page j
      i[j][2].append(k) # insert the link from j to k
  return i
Example #33
0
def init_genome(percentage=50,
                hst_n=81,
                kp=0.176,
                km=0.117):
    """
    percentage ... the probability of having methylated hitone.
    this method returns a list of histone randomly generated with respect to
    the inputs.
    """
    before_promoter = hst_n // 2
    hst_list = []  # hst_list stores histones
    ratio = percentage / 100  # ratio should be float number between 0 and 1

    for i in range(hst_n):

        if sample() < ratio:
            hst_list.append(MHistone(position=i - before_promoter,
                                     kp=kp,
                                     km=km,
                                     )
                            )
        else:
            hst_list.append(UHistone(position=i - before_promoter,
                                     kp=kp,
                                     km=km,
                                     )
                            )

        hst_list[i - 1].set_adjhistone(hst_list[i])

    hst_list[0].prenode = None  # disjoint the edge histone to itself.
    return hst_list
Example #34
0
    def predict_node(node, inner_bins, root_bins, leaf_bins, is_root=False):
        """
        Predict label for current node and continue recursively
        """
        correct = total = 0

        if node.is_leaf:
            node.fprop = True
            left = right = None
        else:
            if not node.left.fprop:
                corr, tot, left = Baseline.predict_node(node.left, inner_bins, root_bins, leaf_bins)
                correct += corr
                total += tot
            if not node.right.fprop:
                corr, tot, right = Baseline.predict_node(node.right, inner_bins, root_bins, leaf_bins)
                correct += corr
                total += tot

        node.fprop = True

        bins = root_bins if is_root else leaf_bins if node.is_leaf else inner_bins
        label = np.digitize(sample(1), bins)[0]
        pred = Node(label)
        pred.word = node.word
        if node.is_leaf:
            pred.is_leaf = True
        else:
            pred.left = left
            pred.right = right
            left.parent = pred
            right.parent = pred

        return correct + (label == node.label), total + 1, pred
Example #35
0
def pg_polygons_wgs84():
    size = 1000
    x1, y1 = generate_lon_lat(size)
    x2, y2 = generate_lon_lat(size)

    # generate some fields in the data frame
    f = random.sample(size) * 360 - 180
    i = random.randint(-32767, 32767, size=size)
    ui = random.randint(0, 65535, size=size).astype("uint64")

    df = DataFrame(
        data={
            "x1": x1,
            "y1": y1,
            "x2": x2,
            "y2": y2,
            "f": f,
            "i": i,
            "ui": ui,
            "labels": i.astype("str"),
        })

    # Generate random triangles
    df["geometry"] = df[["x1", "y1", "x2", "y2"]].apply(
        lambda row: pg.polygons([[row.x1, row.y1], [row.x2, row.y1],
                                 [row.x2, row.y2], [row.x1, row.y1]]),
        axis=1,
    )

    return df
Example #36
0
def init_genome_with_dna_model(percentage=50,
                               hst_n=81,
                               kp=0.176,
                               km=0.117):
    """
    this method is a modified version of createRandomHistoneList
    used for Oct4 histones.
    """
    before_promoter = hst_n // 2
    hst_list = []  # hst_list stores histones
    ratio = percentage / 100  # ratio should be float number between 0 and 1

    for i in range(hst_n):

        if sample() < ratio:
            hst_list.append(MHistoneWithDNAModel(position=i - before_promoter,
                                                 kp=kp,
                                                 km=km,
                                                 )
                            )
        else:
            hst_list.append(UHistoneWithDNAModel(position=i - before_promoter,
                                                 kp=kp,
                                                 km=km,
                                                 )
                            )

        hst_list[i - 1].set_adjhistone(hst_list[i])

    hst_list[0].prenode = None  # disjoint the edge histone to itself.
    return hst_list
Example #37
0
def get_stock_list_two_strategies(strategy,amount):
    #define the stocks for each strategy
	stocks = stock_info[strategy]
	stock_list_multiple=[]
	# basic_data={}
	random_stocks= random.sample(stocks,2)
	# basic_data["strategy"]=strategy
	# stock_list_multiple.append(basic_data)
	for stock in random_stocks:
		stock_list={}
		print(stock)
		response_data= requests.get('https://api.iextrading.com/1.0/stock/'+stock+'/quote')
		response_week = requests.get('https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol='+stock+'&outputsize=compact&apikey=6Z2XPX3WLFSGNNPI')
		response_data2=response_data.json()
		response_week2=response_week.json()
		print(response_data2['companyName'])
		stock_list["symbolName"]=response_data2['symbol']
		stock_list["companyName"]=response_data2['companyName']
		stock_list["latestPrice"]=response_data2['latestPrice']
		stock_list["changePercentage"] =response_data2['changePercent']
		stock_list["investmentAmount"]=str(int(amount)/4)
		# stock_list["weeklyData"]=response_week2["Time Series (Daily)"]
		stock_list["weeklyData"]=response_week2["Time Series (Daily)"]
		stock_list["strategy"]=strategy
		stock_list_multiple.append(stock_list)
	# the stocks_list for the selected strategies
	return stock_list_multiple	
def initWords(phonemes,conceptCount,wordLengths):
    newWords = []
    for _ in range(conceptCount):
        newWords.append(frozenset(random.sample(phonemes, wordLengths))) # must be frozen (hashable) so networkx can make nodes out of words
    #for _ in range(wordCount):
    #    newWords.append([random.choice(phonemes) for _ in range(wordLengths)])
    return newWords
Example #39
0
def set_halo_positions():
  global halo_cut_M
  halo_cut_M = dehnen_cumulative(halo_cut_r, M_halo, a_halo, gamma_halo)
  print "%.0f%% of halo mass cut by the truncation..." % \
        (100*(1-halo_cut_M/M_halo))
  if halo_cut_M < 0.9*M_halo:
    print "  \_ Warning: this is more than 10%% of the total halo mass!"
  radii = dehnen_inverse_cumulative(nprand.sample(N_halo) * halo_cut_M,
    M_halo, a_halo, gamma_halo)
  thetas = np.arccos(nprand.sample(N_halo)*2 - 1)
  phis = 2 * pi * nprand.sample(N_halo)
  xs = radii * sin(thetas) * cos(phis)
  ys = radii * sin(thetas) * sin(phis)
  zs = radii * cos(thetas)
  coords = np.column_stack((xs, ys, zs))
  return coords
    def __init__(self, name, snvs_valid, snvs_invalid, indels_valid, indels_invalid, sensitivity=None, precision=None):
        self.name = name
        self.sensitivity = random.uniform(0.5, 0.95) if not sensitivity else sensitivity
        self.precision = random.uniform(0.5, 0.95) if not precision else precision

        goodfrac = self.sensitivity
        badfrac = self.sensitivity*(1.-self.precision)/self.precision

        self.snvs = random.sample(snvs_valid, int(goodfrac*len(snvs_valid)))
        self.indels = random.sample(indels_valid, int(goodfrac*len(indels_valid)))

        self.snvs += random.sample(snvs_invalid, min(int(badfrac*len(snvs_valid)), len(snvs_invalid)))
        self.indels += random.sample(indels_invalid, min(int(badfrac*len(indels_valid)), len(indels_invalid)))

        self.snvs = sorted(self.snvs)
        self.indels = sorted(self.indels)
Example #41
0
def pd_create_dataset(seed=None):
    """ make sample data """

    if seed == None:
        seed = random.randint(low=0, high=1000)
    random.seed(seed)

    pool1 = ['boy', 'girl']
    pool2 = ['blue', 'red', 'green', 'yellow', 'purple']
    pool3 = pd.date_range('1/1/2014', periods=1000)

    d = {
        'num': [random.randint(low=0, high=1000) for i in range(50)],
        'num2': [
            random.randint(low=0, high=1000) + random.sample()
            for i in range(50)
        ],
        'str':
        [pool1[random.randint(low=0, high=len(pool1))] for i in range(50)],
        'str2':
        [pool2[random.randint(low=0, high=len(pool2))] for i in range(50)],
        'date':
        [pool3[random.randint(low=0, high=len(pool3))] for i in range(50)]
    }

    df = pd.DataFrame(d)

    return seed, df
Example #42
0
def weightfunc():
    i_weight = np.zeros((10,784))
    for i in range(0,10):
        x = (1-(-1))*random.sample(784) - 1
        i_weight[i] = x
    weight_matrix=np.matrix(np.array(i_weight))
    return weight_matrix
Example #43
0
def normalMutation(population, delta_mut, rho_mut):
    npop, ndof = population.shape
    probaMutation = rd.sample((npop, ndof))
    deltaX = delta_mut * (rd.normal(size=(npop, ndof)))
    population = population + deltaX * (probaMutation <= rho_mut)

    return population
    def __SBXcrossover(self, selection, population, npop):
        couples = np.zeros((npop // 2, 2, self.__ndof))
        children = np.zeros_like(population)
        uCross = rd.sample((npop // 2, self.__ndof))

        betaCross = np.zeros_like(uCross)
        uinf_filter = uCross <= 0.5
        betaCross[uinf_filter] = (2 * uCross[uinf_filter])**(self.__alphac)
        betaCross[~uinf_filter] = (2 * (1 - uCross[~uinf_filter]))**(
            -self.__alphac)

        for i in range(npop // 2):
            k = i
            while k == i:
                k = rd.randint(0, npop // 2 - 1)
            couples[i] = [selection[i], selection[k]]

        x1 = couples[:, 0]
        x2 = couples[:, 1]

        children[:npop // 2] = 0.5 * ((1 - betaCross) * x1 +
                                      (1 + betaCross) * x2)
        children[npop // 2:] = 0.5 * ((1 + betaCross) * x1 +
                                      (1 - betaCross) * x2)

        return children
Example #45
0
    def forward(self, src, trg, teacher_forcing_ratio=0.5):
        # src = [src sent len, batch size]
        # trg = [trg sent len, batch size]
        # teacher_forcing_ratio is probability to use teacher forcing
        # e.g. if teacher_forcing_ratio is 0.75 we use ground-truth inputs 75% of the time

        # Again, now batch is the first dimension instead of zero
        max_len, batch_size = trg.shape
        trg_vocab_size = self.decoder.output_dim

        # tensor to store decoder outputs
        outputs = torch.zeros(max_len,
                              batch_size,
                              trg_vocab_size,
                              device=self.device)

        # last hidden state of the encoder is used as the initial hidden state of the decoder
        enc_states, hidden, cell = self.encoder(src)
        if USE_BIDIR_ENCODER:
            hidden = hidden.mean(0).unsqueeze(0)
            cell = cell.mean(0).unsqueeze(0)

        # first input to the decoder is the <sos> tokens
        input = trg[0]

        use_teacher_forcing = rnd.sample(max_len - 1) < teacher_forcing_ratio
        for t, teacher_forcing in enumerate(use_teacher_forcing, 1):
            #print(enc_states.shape, hidden.shape, cell.shape)
            output, hidden, cell = self.decoder(input, hidden, cell,
                                                enc_states)
            outputs[t] = output
            input = (trg[t] if teacher_forcing else output.max(1)[1])

        return outputs
Example #46
0
 def sample_landmark(class_, landmarks, poi):
     distances = array([lmk.distance_to(poi) for lmk in landmarks])
     scores = 1.0/(array(distances)**1.5 + class_.epsilon)
     scores[distances == 0] = 0
     lm_probabilities = scores/sum(scores)
     index = lm_probabilities.cumsum().searchsorted( random.sample(1) )[0]
     return index
Example #47
0
 def sample_point_trajector(self, bounding_box, relation, perspective, landmark, step=0.02):
     """
     Sample a point of interest given a relation and landmark.
     """
     probs, points = self.get_probabilities_box(bounding_box, relation, perspective, landmark)
     probs /= probs.sum()
     index = probs.cumsum().searchsorted( random.sample(1) )[0]
     return Landmark( 'point', Vec2( *points[index] ), None, Landmark.POINT )
Example #48
0
def set_disk_positions(N, z0):
  global disk_cut
  radii = np.zeros(N)
  disk_cut = disk_radial_cumulative(disk_cut_r)
  print "%.0f%% of disk mass cut by the truncation..." % \
        (100*(1-disk_cut))
  if disk_cut < 0.9:
    print "  \_ Warning: this is more than 10% of the total disk mass!"
  sample = nprand.sample(N) * disk_cut
  for i, s in enumerate(sample):
    radii[i] = disk_radial_inverse_cumulative(s)
  zs = disk_height_inverse_cumulative(nprand.sample(N), z0)
  phis = 2 * pi * nprand.sample(N)
  xs = radii * cos(phis)
  ys = radii * sin(phis)
  coords = np.column_stack((xs, ys, zs))
  return coords
Example #49
0
def project_size():
    seed = random.sample()
    if seed < 0.1:
        return 0
    elif seed < 0.85:
        return 1
    else:
        return 2
Example #50
0
 def sample_relation(self, sampled_landmark, poi):
     rel_scores = []
     for relation in self.relations:
         rel_scores.append( relation.probability(poi, sampled_landmark) )
     rel_scores = array(rel_scores)
     rel_probabilities = rel_scores/sum(rel_scores)
     index = rel_probabilities.cumsum().searchsorted( random.sample(1) )[0]
     return self.relations[index]
def variants_from_genome(fasta, n_snvs, n_indels, max_indel_size):
    """
    Reads a genome file (.fasta), selects n valid snvs and indels
    given the parameters.
    """
    valid_chromosomes = [str(i) for i in range(1, 22)] + ["X", "Y"]
    genome = readfasta(fasta)
    genome = [(chrom, seq) for chrom, seq in genome \
                            if chrom in valid_chromosomes]

    sizes = numpy.array([len(seq) for chrom, seq in genome])
    cum_sizes = numpy.cumsum(sizes)

    snv_starts = sorted(random.sample(range(cum_sizes[-1]), n_snvs))
    indel_starts = sorted(random.sample(range(cum_sizes[-1]), n_indels))

    return snvs_from_starts(genome, snv_starts, cum_sizes), \
            indels_from_starts(genome, indel_starts, cum_sizes, max_indel_size)
Example #52
0
 def k_minus(self):
     """
     the methylated histone will be get unmethylated by K_MINUS probability
     or
     the acetilated histone will be get unacetylated by K_MINUS probability
     return unmethylated histone object if the histone will get unmethylated
     """
     if(sample()<Histone.K_MINUS):return UHistone(copy=True,copy_histone=self)
     return self
Example #53
0
 def sample_poi(self, bounding_box, relation, perspective, landmark, step=0.02):
     """
     Sample a point of interest given a relation and landmark.
     """
     #points = landmark.representation.sample_points(step=step)
     #probs = self.get_probabilities_points(points, relation, perspective, landmark)
     probs, points = self.get_probabilities_box(bounding_box, relation, perspective, landmark)
     probs /= probs.sum()
     index = probs.cumsum().searchsorted( random.sample(1) )[0]
     return points.flatten()[index]
Example #54
0
def weighted_sample(iterable, weights, n=None):
    if len(iterable) != len(weights):
        raise Exception('Length mismatch')
    count = len(iterable) if n is None else n
    # Normalize weights
    wt_sum = float(sum(weights))
    aliases = array(w / wt_sum for w in weights).cumsum()
    # Get indices to include in sample
    indices = aliases.searchsorted(sample(count))
    return array(iterable[i] for i in indices)
Example #55
0
    def sample_landmark(self, landmarks, poi):
        ''' Weight by inverse of distance to landmark center and choose probabilistically  '''
        epsilon = 0.000001
        distances = array([lmk.representation.middle.distance_to(poi) for lmk in landmarks])
        scores = 1.0/(array(distances)**1.5 + epsilon)
        # scores[distances == 0] = 0
        lm_probabilities = scores/sum(scores)
        index = lm_probabilities.cumsum().searchsorted( random.sample(1) )[0]

        return landmarks[index], lm_probabilities[index], self.get_entropy(lm_probabilities)
Example #56
0
    def k_ace(self, ace_prob):
        # if histone has CpG island on it
        if 1 in self.CpGislandlist:
            # histone cannnot get acetylated
            return self

        else:
            if sample() < ace_prob:
                return AHistoneWithDNAModel(inherited=True, inherited_hst=self)
            else:
                return self
Example #57
0
File: models.py Project: kpj/Bioto
    def generate(self, **kwargs):
        """ Solves nonlinear system and returns solution
        """
        t = np.arange(0, kwargs['runs'], 1)
        x0 = npr.sample(len(self.graph))

        def func(X, t=0):
            return self.get_terms(X)

        res = odeint(func, x0, t)
        return res