Beispiel #1
0
def _random_motion_blur(img, kernel_size=7):
    choice = np.random.choice([0, 1], replace=True, p=[0.666, 0.334])
    kernel_size = int(
        halfnorm.rvs(loc=kernel_size, scale=kernel_size + 1, size=1)[0])
    kernel_size = kernel_size if kernel_size % 2 else kernel_size - 1

    if choice:
        # generating the kernel
        kernel_motion_blur = np.zeros((kernel_size, kernel_size))
        kernel_motion_blur[int(
            (kernel_size - 1) / 2), :] = np.ones(kernel_size)
        kernel_motion_blur = kernel_motion_blur / kernel_size

        deg = np.random.uniform(0, 180)
        kernel_motion_blur_rotated = rotate(kernel_motion_blur,
                                            deg,
                                            reshape=False)
        kernel_motion_blur_rotated /= kernel_motion_blur_rotated.sum()

        # applying the kernel to the input image
        output = cv2.filter2D(img, -1, kernel_motion_blur_rotated)

    else:
        output = img

    return output.clip(0.0, 255.0)
Beispiel #2
0
    def _setup(self, X):
        self.h, self.e, self.v = None, None, None
        if isinstance(X, np.ndarray):
            n, m = X.shape
            avg = np.sqrt(X.mean() / m)
            iterating = False
        else:
            x = next(X)
            m = len(x)
            avg = np.sqrt(x.mean() / m)
            X = chain([x], X)
            iterating = True

        self.n_features = m
        self.iterating = iterating

        self.W = np.abs(
            avg * halfnorm.rvs(size=(self.n_features, self.rank)) / np.sqrt(self.rank)
        )
        self.H = []

        if self.subspace_tracking:
            self.vnew = np.zeros_like(self.W)
        else:
            self.A = np.zeros((self.rank, self.rank))
            self.B = np.zeros((self.n_features, self.rank))

        return X
Beispiel #3
0
    def _setup(self, X, normalize=False):
        self.h, self.r = None, None
        if isinstance(X, np.ndarray):
            n, m = X.shape
            if normalize:
                self.X_min = X.min()
                self.X_max = X.max()
                self.normalize = normalize
                # actually scale the data to be between 0 and 1, not just close
                # to it..
                X = _normalize(X, ar_min=self.X_min, ar_max=self.X_max)
                # X = (X - self.X_min) / (self.X_max - self.X_min)
            avg = np.sqrt(X.mean() / m)
        else:
            if normalize:
                _logger.warning("Normalization with an iterator is not"
                                " possible, option ignored.")
            x = next(X)
            m = len(x)
            avg = np.sqrt(x.mean() / m)
            X = chain([x], X)

        self.nfeatures = m

        self.W = np.abs(avg * halfnorm.rvs(size=(self.nfeatures, self.rank)) /
                        np.sqrt(self.rank))

        self.A = np.zeros((self.rank, self.rank))
        self.B = np.zeros((self.nfeatures, self.rank))
        return X
Beispiel #4
0
    def _setup(self, corpus):
        """Infer info from the first document and initialize matrices.

        Parameters
        ----------
        corpus : iterable of list(int, float)
            Training corpus.

        """
        self._h, self._r = None, None
        first_doc_it = itertools.tee(corpus, 1)
        first_doc = next(first_doc_it[0])
        first_doc = matutils.corpus2csc([first_doc], len(self.id2word))
        self.w_std = np.sqrt(first_doc.mean() / (self.num_tokens * self.num_topics))

        self._W = np.abs(
            self.w_std
            * halfnorm.rvs(
                size=(self.num_tokens, self.num_topics), random_state=self.random_state
            )
        )

        is_great_enough = self._W > self.w_std * self.sparse_coef

        self._W *= is_great_enough | ~is_great_enough.all(axis=0)

        self._W = scipy.sparse.csc_matrix(self._W)

        self.A = scipy.sparse.csr_matrix((self.num_topics, self.num_topics))
        self.B = scipy.sparse.csc_matrix((self.num_tokens, self.num_topics))
Beispiel #5
0
    def _setup(self, corpus):
        """Infer info from the first document and initialize matrices.

        Parameters
        ----------
        corpus : iterable of list of (int, float), optional
            Training corpus.
            Can be either iterable of documents, which are lists of `(word_id, word_count)`,
            or a sparse csc matrix of BOWs for each document.
            If not specified, the model is left uninitialized (presumably, to be trained later with `self.train()`).

        """
        self._h = None

        if isinstance(corpus, scipy.sparse.csc.csc_matrix):
            first_doc = corpus.getcol(0)
        else:
            first_doc_it = itertools.tee(corpus, 1)
            first_doc = next(first_doc_it[0])
            first_doc = matutils.corpus2csc([first_doc], len(self.id2word))
        self.w_std = np.sqrt(first_doc.mean() / (self.num_tokens * self.num_topics))

        self._W = np.abs(
            self.w_std
            * halfnorm.rvs(
                size=(self.num_tokens, self.num_topics), random_state=self.random_state
            )
        )

        self.A = np.zeros((self.num_topics, self.num_topics))
        self.B = np.zeros((self.num_tokens, self.num_topics))
Beispiel #6
0
def gen_sample_ret():
    alpha = wald.rvs(loc=alpha_mean, scale=alpha_conc)
    beta = norm.rvs(loc=beta_mean, scale=beta_var)
    scale = np.random.exponential(scale=scale_rate)
    mean = halfnorm.rvs(loc=.1, scale=.08)
    ret = norminvgauss.rvs(alpha, beta, mean, scale)
    return ret
Beispiel #7
0
    def __random_disturbance(self, sto_vol: bool, rd_mu: float,
                             rd_sigma: float):

        if not sto_vol:
            return np.random.normal(size=(self.T, 1))
        else:
            # error handling for scale < 0, because negative volatilities
            # doesnt makes sense.
            return np.random.normal(
                loc=rd_mu * self.h,
                scale=rd_sigma * halfnorm.rvs(1) * np.sqrt(self.h),
                size=(self.T, 1)) * 100
        return
Beispiel #8
0
def _random_gaussian_blur(img, kernel_size=5):
    choice = np.random.choice([0, 1], replace=True, p=[0.666, 0.334])
    kernel_size = int(
        halfnorm.rvs(loc=kernel_size, scale=kernel_size + 1, size=1)[0])
    kernel_size = kernel_size if kernel_size % 2 else kernel_size - 1

    if choice:
        output = cv2.blur(img, (kernel_size, kernel_size))

    else:
        output = img

    return output.clip(0.0, 255.0)
Beispiel #9
0
def gen_sample_ret(period, size):
    alpha = wald.rvs(loc=alpha_mean, scale=alpha_conc)
    beta = norm.rvs(loc=beta_mean, scale=beta_var)
    scale = np.random.exponential(scale=scale_rate)
    scale = scale / period
    mean = halfnorm.rvs(loc=.1, scale=.1)
    mean = mean / period
    try:
        ret = norminvgauss.rvs(alpha, beta, mean, scale, size=size)
    except Exception as e:

        ret = norminvgauss.rvs(alpha, beta, mean, scale, size=size)
    return ret
Beispiel #10
0
    def _setup(self, v):
        """Infer info from the first batch and initialize the matrices.

        Parameters
        ----------
        v : `csc_matrix` with the shape (n_tokens, chunksize)
            Batch of bows.

        """
        self.w_std = np.sqrt(v.mean() / (self.num_tokens * self.num_topics))

        self._W = np.abs(self.w_std *
                         halfnorm.rvs(size=(self.num_tokens, self.num_topics),
                                      random_state=self.random_state))

        self.A = np.zeros((self.num_topics, self.num_topics))
        self.B = np.zeros((self.num_tokens, self.num_topics))
Beispiel #11
0
def dg_from_dist(n_samples=25, offset=-10.0):
    """
    Generate some fake results for MMGBSA then return mean and
    standard error as an uncertainty estimate.

    Parameters
    ----------
    n_samples: integer
        Number of samples to draw
    offset: float
        Offset from 0 - generally MMGBSA results are more negative than 
        experimental ones
    """

    target = -1 * halfnorm.rvs(size=1)[0] + offset

    sample = np.random.randn(n_samples) + target

    return np.mean(sample), np.sd(sample)/np.sqrt(n_samples)
Beispiel #12
0
    def _setup(self, v):
        """Infer info from the first batch and initialize the matrices.

        Parameters
        ----------
        v : `csc_matrix` with the shape (n_tokens, chunksize)
            Batch of bows.

        """
        self.w_std = np.sqrt(v.mean() / (self.num_tokens * self.num_topics))

        self._W = np.abs(
            self.w_std
            * halfnorm.rvs(
                size=(self.num_tokens, self.num_topics), random_state=self.random_state
            )
        )

        self.A = np.zeros((self.num_topics, self.num_topics))
        self.B = np.zeros((self.num_tokens, self.num_topics))
Beispiel #13
0
def generate_user(df):
    new_profile = pandas.DataFrame(columns=df.columns,
                                   index=[df.index[-1] + 1])
    new_profile[
        'Bios'] = "Twitteraholic. Extreme web fanatic. Food buff. Infuriatingly humble entrepreneur."
    # Adding random values for new data
    for i in new_profile.columns[1:]:
        if i in ['Religion', 'Politics']:
            new_profile[i] = numpy.random.choice(combined[i], 1, p=p[i])

        elif i == 'Age':
            new_profile[i] = halfnorm.rvs(loc=18, scale=8, size=1).astype(int)

        else:
            new_profile[i] = list(
                numpy.random.choice(combined[i], size=(1, 3), p=p[i]))

            new_profile[i] = new_profile[i].apply(
                lambda x: list(set(x.tolist())))

    return new_profile
Beispiel #14
0
 def rand_minibatch(self, size):
     '''
     get a minibatch of random exp for training
     use simple memory decay, i.e. sample with a left tail
     distribution to draw more from latest memory
     then append with the most recent, untrained experience
     '''
     memory_size = self.size()
     # increase to k if we skip training to every k time steps
     latest_batch_size = 1
     new_memory_ind = max(0, memory_size - latest_batch_size)
     old_memory_ind = max(0, new_memory_ind - 1)
     latest_inds = np.arange(new_memory_ind, memory_size)
     random_batch_size = size - latest_batch_size
     rand_inds = (
         old_memory_ind -
         halfnorm.rvs(size=random_batch_size,
                      scale=float(old_memory_ind) * 0.37).astype(int))
     inds = np.concatenate([latest_inds, rand_inds]).clip(0)
     minibatch = self.get_exp(inds)
     return minibatch
Beispiel #15
0
 def rand_minibatch(self, size):
     '''
     get a minibatch of random exp for training
     use simple memory decay, i.e. sample with a left tail
     distribution to draw more from latest memory
     then append with the most recent, untrained experience
     '''
     memory_size = self.size()
     new_exp_size = self.agent.train_per_n_new_exp
     if memory_size <= size or memory_size <= new_exp_size:
         inds = np.random.randint(memory_size, size=size)
     else:
         new_memory_ind = max(0, memory_size - new_exp_size)
         old_memory_ind = max(0, new_memory_ind - 1)
         latest_inds = np.arange(new_memory_ind, memory_size)
         random_batch_size = size - new_exp_size
         rand_inds = (old_memory_ind - halfnorm.rvs(
             size=random_batch_size,
             scale=float(old_memory_ind)*0.80).astype(int))
         inds = np.concatenate([rand_inds, latest_inds]).clip(0)
     minibatch = self.get_exp(inds)
     return minibatch
Beispiel #16
0
 def rand_minibatch(self, size):
     '''
     get a minibatch of random exp for training
     use simple memory decay, i.e. sample with a left tail
     distribution to draw more from latest memory
     then append with the most recent, untrained experience
     '''
     memory_size = self.size()
     new_exp_size = self.agent.train_per_n_new_exp
     if memory_size <= size or memory_size <= new_exp_size:
         inds = np.random.randint(memory_size, size=size)
     else:
         new_memory_ind = max(0, memory_size - new_exp_size)
         old_memory_ind = max(0, new_memory_ind - 1)
         latest_inds = np.arange(new_memory_ind, memory_size)
         random_batch_size = size - new_exp_size
         rand_inds = (
             old_memory_ind -
             halfnorm.rvs(size=random_batch_size,
                          scale=float(old_memory_ind) * 0.80).astype(int))
         inds = np.concatenate([rand_inds, latest_inds]).clip(0)
     minibatch = self.get_exp(inds)
     return minibatch
Beispiel #17
0
def half_normal_draw(sigma):
    return halfnorm.rvs(loc=0, scale=sigma)
    # Expected value of outcome
    mu = alpha + beta[0] * X1 + beta[1] * X2

    # Likelihood (sampling distribution) of observations
    Y_obs = pm.Normal("Y_obs", mu=mu, sigma=sigma, observed=Y)

    # maximum a posteriori (MAP) estimate:
    # map_estimate = pm.find_MAP(model=basic_model)
    # print(f"Map Estimate:{map_estimate}")

    times_consumed = []

    N = 100
    alphas = norm.rvs(size=N)
    betas = halfnorm.rvs(size=(N, 2))
    sigmas = halfnorm.rvs(size=N)

    print("=========================================")
    print(f"Tuning NUTS")
    print("=========================================")

    n_chains = 4
    init_trace = pm.sample(draws=1000, tune=1000, cores=n_chains)
    cov = np.atleast_1d(pm.trace_cov(init_trace))
    start = list(np.random.choice(init_trace, n_chains))
    potential = quadpotential.QuadPotentialFull(cov)
    step_size = init_trace.get_sampler_stats("step_size_bar")[-1]
    size = m.bijection.ordering.size
    step_scale = step_size * (size**0.25)
## Foreward array
num_points_f = np.zeros(Tnum).astype(np.int64)
correlation_f = np.zeros([num_redux, Tnum])

## Output array
CORRELATION = np.zeros([num_iterations * num_redux + 1, Tnum])
CORRELATION[0] = T

for ip in range(0, num_iterations):

    ## New flux array for calculation
    F1 = np.zeros(tnum) - 100
    for ii in range(0, tnum):
        if F[ii] != -100:
            if randint(0, 1) == 0:
                F1[ii] = F[ii] - halfnorm.rvs(scale=Fe_d[ii])
            else:
                F1[ii] = F[ii] + halfnorm.rvs(scale=Fe_u[ii])

    ## Forword
    for ii in range(0, Tnum):
        if ii == 0:
            arr_opt = F1
            arr_gam = F1
        else:
            arr_opt = F1[ii:]
            arr_gam = F1[:-ii]

        bool_arr = np.logical_and(arr_opt != -100,
                                  arr_gam != -100)  ## Ignore bad points
        num_points_f[ii] = int(
		
		F_GAM = np.zeros([num_iterations, TNUM])
		F_OPT = np.zeros([num_iterations, TNUM])
		F_RAT = np.zeros([num_iterations, TNUM])
		
		for jj in range(0,num_iterations):
		
			F_gam = np.zeros(TNUM)
			F_opt = np.zeros(TNUM)
			ratio_arr = np.zeros(TNUM)
			rand_points_1 = np.zeros([TNUM,2])
			rand_points_2 = np.zeros([TNUM,2])
			for ii in range(0,TNUM):
				## Gamma-ray
				if randint(0,1) == 0:
					F_gam[ii] = F_gam_ratio[ii] - halfnorm.rvs(scale=fe_d_gam_ratio[ii])
				else:
					F_gam[ii] = F_gam_ratio[ii] + halfnorm.rvs(scale=fe_u_gam_ratio[ii])
				
				## Optical
				F_opt[ii] = np.random.normal(F_opt_ratio[ii], fe_opt_ratio[ii])
				
				## ratio
				if randint(0,1) == 0:
					ratio_arr[ii] = gam_opt_ratio[ii] - halfnorm.rvs(scale=ratio_err_d[ii])
				else:
					ratio_arr[ii] = gam_opt_ratio[ii] + halfnorm.rvs(scale=ratio_err_u[ii])
			
			F_GAM[jj,:] = F_gam
			F_OPT[jj,:] = F_opt
			F_RAT[jj,:] = ratio_arr
Beispiel #21
0
import matplotlib.pyplot as plt

def const_arrival():  # Constant arrival distribution for generator 1
    return 1.0


def const_arrival2():
    return 2.0


def dist_size():
    return 2.0


if __name__ == '__main__':
    numargs = halfnorm.numargs
    [] = [0.7, ] * numargs
    rv = halfnorm()

    print("RV : \n", rv)
    quantile = np.arange(0.01, 1, 0.1)

    # Random Variates
    R = halfnorm.rvs(scale=2, size=10)
    print("Random Variates : \n", R)
    distribution = np.linspace(0, np.minimum(rv.dist.b, 3))
    print("Distribution : \n", distribution)

    plot = plt.plot(distribution, rv.pdf(distribution))
    plt.show()
Beispiel #22
0
x = np.linspace(halfnorm.ppf(0.01),
                halfnorm.ppf(0.99), 100)
ax.plot(x, halfnorm.pdf(x),
       'r-', lw=5, alpha=0.6, label='halfnorm pdf')

# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.

# Freeze the distribution and display the frozen ``pdf``:

rv = halfnorm()
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

# Check accuracy of ``cdf`` and ``ppf``:

vals = halfnorm.ppf([0.001, 0.5, 0.999])
np.allclose([0.001, 0.5, 0.999], halfnorm.cdf(vals))
# True

# Generate random numbers:

r = halfnorm.rvs(size=1000)

# And compare the histogram:

ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
ax.legend(loc='best', frameon=False)
plt.show()
def generate_game_difficulty_data(players=100,
                                  levels=10,
                                  max_sessions=20,
                                  max_attempts=10):
    """
    """
    data = {
        'player_id': [],
        'level': [],
        'session': [],
        'num_success': [],
        'num_attempts': [],
    }
    true_parameters = {
        'player_id': [],
        'levels_id': [],
        'level_difficulty': [],
        'player_ability': [],
        'delta': [],
        'probability_success': []
    }
    players_ability = []
    for player in range(players):

        player_ability = np.random.normal(
            np.random.normal(0, 0.5, 1)[0],
            halfnorm.rvs(loc=0, scale=1, size=1)[0], 1)[0]
        players_ability.append(player_ability)

    levels_difficulty = []
    for level in range(levels):

        level_difficulty = np.random.normal(
            np.random.normal(0, 0.5, 1)[0],
            halfnorm.rvs(loc=0, scale=1, size=1)[0], 1)[0]
        levels_difficulty.append(level_difficulty)

    for player, ability in enumerate(players_ability):

        for level, difficulty in enumerate(levels_difficulty):

            delta = ability - difficulty
            p_success = compute_sigmoid(delta)

            true_parameters['player_id'].append(player)
            true_parameters['levels_id'].append(level)
            true_parameters['level_difficulty'].append(difficulty)
            true_parameters['player_ability'].append(ability)
            true_parameters['delta'].append(delta)
            true_parameters['probability_success'].append(p_success)

            n_sessions = np.random.randint(1, max_sessions, 1)[0]
            for session in range(n_sessions):

                attempts = np.random.randint(1, max_attempts, 1)[0]
                data['player_id'].append(player)
                data['level'].append(level)
                data['session'].append(session)
                data['num_success'].append(binom.rvs(n=attempts, p=p_success))
                data['num_attempts'].append(attempts)

    true_parameters = pd.DataFrame(true_parameters)
    df = pd.DataFrame(data)
    return df, true_parameters
st.header("Finding a Partner with AI Using NaiveBayes, KNN and SVM")
st.write("Use Machine Learning to Find the Top Dating Profile Matche")
# st.write(combined)
image = Image.open('roshan_graffiti.png')
st.image(image, use_column_width=True)
new_profile = pd.DataFrame(columns=df.columns, index=[df.index[-1] + 1])
random_vals = st.checkbox(
    "Check here if you would like random values for yourself instead")
if random_vals:

    for i in new_profile.columns[1:]:
        if i in ['When it comes to love']:
            new_profile[i] = np.random.choice(combined[i], 1, p=p[i])

        elif i == 'Age':
            new_profile[i] = halfnorm.rvs(loc=18, scale=8, size=1).astype(int)

        else:
            new_profile[i] = list(
                np.random.choice(combined[i], size=(1, 2), p=p[i]))

            new_profile[i] = new_profile[i].apply(
                lambda x: list(set(x.tolist())))

else:
    for i in new_profile.columns:
        new_profile[i] = st.selectbox(f"Enter your choice for {i}", combined)
        if i in ['When it comes to love']:
            new_profile[i] = st.selectbox(f"Enter your choice for {i}",
                                          combined[i])
        # bool_arr = np.logical_and(bool_arr, arr_gam>0)
        num_points_f[ii] = int(np.sum(bool_arr))

        if int(num_points_f[ii]) > 3:
            arr_opt = arr_opt[bool_arr]
            arr_opt_err_d = arr_opt_err_d[bool_arr]
            arr_opt_err_u = arr_opt_err_u[bool_arr]
            arr_gam = arr_gam[bool_arr]
            arr_gam_err_d = arr_gam_err_d[bool_arr]
            arr_gam_err_u = arr_gam_err_u[bool_arr]

            for ij in range(0, num_points_f[ii]):
                #Optical ray points are two half normal distributions
                # print(arr_opt[ij], arr_opt_err_d[ij], arr_opt_err_u[ij])
                if randint(0, 1) == 0:
                    point_opt = arr_opt[ij] - halfnorm.rvs(
                        scale=arr_opt_err_d[ij])
                else:
                    point_opt = arr_opt[ij] + halfnorm.rvs(
                        scale=arr_opt_err_u[ij])

                #Gamma ray points are two half normal distributions
                if randint(0, 1) == 0:
                    point_gam = arr_gam[ij] - halfnorm.rvs(
                        scale=arr_gam_err_d[ij])
                else:
                    point_gam = arr_gam[ij] + halfnorm.rvs(
                        scale=arr_gam_err_u[ij])

                if ij == 0:
                    all_points = np.array([[point_opt, point_gam]])
                else:
Beispiel #26
0
CORRELATION = np.zeros([2 * num_iterations * num_redux + 1, 2 * Tnum - 1])
T_b = T[::-1]
TT = np.concatenate((-T_b[:-1], T))
CORRELATION[0] = TT

for ip in range(0, num_iterations):

    ## New flux arrays for calculation
    F1_gam = np.zeros(tnum) - 100
    F1_opt = np.zeros(tnum) - 100
    for ii in range(0, tnum):

        ## Gamma-ray
        if F_gam[ii] != -100:
            if randint(0, 1) == 0:
                F1_gam[ii] = F_gam[ii] - halfnorm.rvs(scale=fe_gam_d[ii])
            else:
                F1_gam[ii] = F_gam[ii] + halfnorm.rvs(scale=fe_gam_u[ii])

        ## Optical
        if F_opt[ii] != -100:
            F1_opt[ii] = np.random.normal(F_opt[ii], fe_opt[ii])

    ## Forword
    for ii in range(0, Tnum):
        if ii == 0:
            arr_opt = F1_opt
            arr_gam = F1_gam
        else:
            arr_opt = F1_opt[ii:]
            arr_gam = F1_gam[:-ii]
Beispiel #27
0
 def __call__(self, sample):
     out_size = sample['SignalWaterfalls'].shape
     noise = halfnorm.rvs(size=out_size)
     sample['Waterfalls'] = sample['SignalWaterfalls'] + noise
     return sample
Beispiel #28
0
def half_normal_draw(sigma):
    return halfnorm.rvs(loc=0,scale=sigma)
Beispiel #29
0
def refining_profile_data(df):
    # Removing the numerical data
    df = df[['Bios']]

    # Creating Lists for the Categories

    # Probability dictionary
    p = {}

    # Movie Genres
    movies = [
        'Adventure', 'Action', 'Drama', 'Comedy', 'Thriller', 'Horror',
        'RomCom', 'Musical', 'Documentary'
    ]

    p['Movies'] = [0.28, 0.21, 0.16, 0.14, 0.09, 0.06, 0.04, 0.01, 0.01]

    # TV Genres
    tv = [
        'Comedy', 'Drama', 'Action/Adventure', 'Suspense/Thriller',
        'Documentaries', 'Crime/Mystery', 'News', 'SciFi', 'History'
    ]

    p['TV'] = [0.30, 0.23, 0.12, 0.12, 0.09, 0.08, 0.03, 0.02, 0.01]

    # Religions (could potentially create a spectrum)
    religion = [
        'Catholic', 'Christian', 'Jewish', 'Muslim', 'Hindu', 'Buddhist',
        'Spiritual', 'Other', 'Agnostic', 'Atheist'
    ]

    p['Religion'] = [
        0.16, 0.16, 0.01, 0.19, 0.11, 0.05, 0.10, 0.09, 0.07, 0.06
    ]

    # Music
    music = [
        'Rock', 'HipHop', 'Pop', 'Country', 'Latin', 'EDM', 'Gospel', 'Jazz',
        'Classical'
    ]

    p['Music'] = [0.30, 0.23, 0.20, 0.10, 0.06, 0.04, 0.03, 0.02, 0.02]

    # Sports
    sports = [
        'Football', 'Baseball', 'Basketball', 'Hockey', 'Soccer', 'Other'
    ]

    p['Sports'] = [0.34, 0.30, 0.16, 0.13, 0.04, 0.03]

    # Politics (could also put on a spectrum)
    politics = [
        'Liberal', 'Progressive', 'Centrist', 'Moderate', 'Conservative'
    ]

    p['Politics'] = [0.26, 0.11, 0.11, 0.15, 0.37]

    # Social Media
    social = [
        'Facebook', 'Youtube', 'Twitter', 'Reddit', 'Instagram', 'Pinterest',
        'LinkedIn', 'SnapChat', 'TikTok'
    ]

    p['Social Media'] = [0.36, 0.27, 0.11, 0.09, 0.05, 0.03, 0.03, 0.03, 0.03]

    # Age (generating random numbers based on half normal distribution)
    age = halfnorm.rvs(loc=18, scale=8, size=df.shape[0]).astype(int)

    # Lists of Names and the list of the lists
    categories = [movies, tv, religion, music, politics, social, sports, age]

    names = [
        'Movies', 'TV', 'Religion', 'Music', 'Politics', 'Social Media',
        'Sports', 'Age'
    ]

    combined = dict(zip(names, categories))

    # Establishing random values for each category

    # Looping through and assigning random values
    for name, cats in combined.items():
        if name in ['Religion', 'Politics']:
            # Picking only 1 from the list
            df[name] = numpy.random.choice(cats, df.shape[0], p=p[name])

        elif name == 'Age':
            # Generating random ages based on a normal distribution
            df[name] = cats
        else:
            # Picking 3 from the list
            try:
                df[name] = list(
                    numpy.random.choice(cats,
                                        size=(df.shape[0], 1, 3),
                                        p=p[name]))
            except Exception as ex:
                print(ex)
                df[name] = list(
                    numpy.random.choice(cats, size=(df.shape[0], 1, 3)))

            df[name] = df[name].apply(lambda x: list(set(x[0].tolist())))

    df['Religion'] = pandas.Categorical(df.Religion,
                                        ordered=True,
                                        categories=[
                                            'Catholic', 'Christian', 'Jewish',
                                            'Muslim', 'Hindu', 'Buddhist',
                                            'Spiritual', 'Other', 'Agnostic',
                                            'Atheist'
                                        ])

    df['Politics'] = pandas.Categorical(df.Politics,
                                        ordered=True,
                                        categories=[
                                            'Liberal', 'Progressive',
                                            'Centrist', 'Moderate',
                                            'Conservative'
                                        ])

    return df
LNA = 0.
LNA_low = 0.
LNA_high = 0.
# LNA_p = np.zeros(num_it)

BETA = np.zeros(num_it)
BETA_low = np.zeros(num_it)
BETA_high = np.zeros(num_it)
BETA_p = np.zeros(num_it)
for kk in range(0, num_it):

    logflux_rand = np.zeros(len(lnE))
    for jj in range(0, len(lnE)):
        if randint(0, 1) == 0:
            logflux_rand[jj] = logflux[jj] - halfnorm.rvs(
                scale=logflux_err_d[jj])
        else:
            logflux_rand[jj] = logflux[jj] + halfnorm.rvs(
                scale=logflux_err_u[jj])

    dlogflux_rand = (logflux_rand[1:] - logflux_rand[:-1]) / (lnE[1:] -
                                                              lnE[:-1])

    beta, lnA = np.linalg.lstsq(MAT, logflux_rand, rcond=None)[0]
    beta_low, lnA_low = np.linalg.lstsq(MAT_low,
                                        logflux_rand[ENERGY < 1.e4],
                                        rcond=None)[0]
    if len(lnE[ENERGY >= 1.e4]) > 1:
        beta_high, lnA_high = np.linalg.lstsq(MAT_high,
                                              logflux_rand[ENERGY >= 1.e4],
                                              rcond=None)[0]
 def halfnormal(num_samples: int) -> npt.NDArray[np.float64]:
     return tp.cast(
         npt.NDArray[np.float64],
         halfnorm.rvs(scale=1, size=num_samples)
     )
Beispiel #32
0
 def func():
     return halfnorm.rvs(scale=np.sqrt(np.pi / 2), size=1)
Beispiel #33
0
 def sample(self):
     X = halfnorm.rvs(size=self.size)
     return X