예제 #1
0
def f3TruncNormRVSnp(parameters):
    N = parameters['N']
    target = parameters['target']
    rv1, rv2, rv3 = ndarray(shape = (N,), dtype=float), ndarray(shape = (N,), dtype=float), ndarray(shape = (N,), dtype=float)

    # if parameters['ncpu']:
    #     ncpu = parameters['ncpu']
    # else:
    #     ncpu = mp.cpu_count()
    #
    # pool = mp.Pool(ncpu)
    # workers = []
    if not parameters['distribution']:
        print 'No distribution set...abort'
        exit(1)
    elif parameters['distribution'] == 'truncnorm':
        a1, b1 = (parameters['min_intrv1'] - parameters['mu1']) / parameters['sigma1'], (parameters['max_intrv1'] - parameters['mu1']) / parameters['sigma1']
        a2, b2 = (parameters['min_intrv2'] - parameters['mu2']) / parameters['sigma2'], (parameters['max_intrv2'] - parameters['mu2']) / parameters['sigma2']
        a3, b3 = (parameters['min_intrv3'] - parameters['mu3']) / parameters['sigma3'], (parameters['max_intrv3'] - parameters['mu3']) / parameters['sigma3']
        rv1 = truncnorm(a1, b1, loc=parameters['mu1'], scale=parameters['sigma1']).rvs(N)
        rv2 = truncnorm(a2, b2, loc=parameters['mu2'], scale=parameters['sigma2']).rvs(N)
        rv3 = truncnorm(a3, b3, loc=parameters['mu3'], scale=parameters['sigma3']).rvs(N)
    elif parameters['distribution'] == 'norm':
        rv1 = norm(loc=parameters['mu1'], scale=parameters['sigma1']).rvs(N)
        rv2 = norm(loc=parameters['mu2'], scale=parameters['sigma2']).rvs(N)
        rv3 = norm(loc=parameters['mu3'], scale=parameters['sigma3']).rvs(N)
    elif parameters['distribution'] == 'uniform':
        rv1 = uniform(loc=parameters['mu1'], scale=parameters['sigma1']).rvs(N)
        rv2 = uniform(loc=parameters['mu2'], scale=parameters['sigma2']).rvs(N)
        rv3 = uniform(loc=parameters['mu3'], scale=parameters['sigma3']).rvs(N)
    elif parameters['distribution'] == 'beta':
        rv1 = beta(a=parameters['min_intrv1'], b=parameters['max_intrv1'], loc=parameters['mu1'], scale=parameters['sigma1']).rvs(N)
        rv2 = beta(a=parameters['min_intrv2'], b=parameters['max_intrv2'], loc=parameters['mu2'], scale=parameters['sigma2']).rvs(N)
        rv3 = beta(a=parameters['min_intrv3'], b=parameters['max_intrv3'], loc=parameters['mu3'], scale=parameters['sigma3']).rvs(N)
    elif parameters['distribution'] == 'triang':
        rv1 = triang(loc=parameters['min_intrv1'], scale=parameters['max_intrv1'], c=parameters['mu1']).rvs(N)
        rv2 = triang(loc=parameters['min_intrv2'], scale=parameters['max_intrv2'], c=parameters['mu2']).rvs(N)
        rv3 = triang(loc=parameters['min_intrv3'], scale=parameters['max_intrv3'], c=parameters['mu3']).rvs(N)
    else:
        print 'Distribution not recognized...abort'
        exit(1)

    if parameters['scaling']:
        #scale the values of Qs in the allowed range such that sum(Q_i) = A
        r = ABS(parameters['Q1']) + ABS(parameters['Q2']) + ABS(parameters['Q3'])
        if r == 0.0:
            r = 1.

        # rounding the values, the sum could exceed A
        Q1 = ABS(parameters['Q1']) * parameters['A'] / r
        Q2 = ABS(parameters['Q2']) * parameters['A'] / r
        Q3 = parameters['A'] - Q1 - Q2
    else:
        # print "scaling = False"
        Q1 = parameters['Q1']
        Q2 = parameters['Q2']
        Q3 = parameters['Q3']

    return _f3(rv1, rv2, rv3, Q1, Q2, Q3, target)
예제 #2
0
    def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):
        super(Inception3, self).__init__()
        self.aux_logits = aux_logits
        self.transform_input = transform_input
        self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
        self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
        self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
        self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
        self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
        self.Mixed_5b = InceptionA(192, pool_features=32)
        self.Mixed_5c = InceptionA(256, pool_features=64)
        self.Mixed_5d = InceptionA(288, pool_features=64)
        self.Mixed_6a = InceptionB(288)
        self.Mixed_6b = InceptionC(768, channels_7x7=128)
        self.Mixed_6c = InceptionC(768, channels_7x7=160)
        self.Mixed_6d = InceptionC(768, channels_7x7=160)
        self.Mixed_6e = InceptionC(768, channels_7x7=192)
        if aux_logits:
            self.AuxLogits = InceptionAux(768, num_classes)
        self.Mixed_7a = InceptionD(768)
        self.Mixed_7b = InceptionE(1280)
        self.Mixed_7c = InceptionE(2048)
        self.fc = nn.Linear(2048, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
                import scipy.stats as stats
                stddev = m.stddev if hasattr(m, 'stddev') else 0.1
                X = stats.truncnorm(-2, 2, scale=stddev)
                values = torch.Tensor(X.rvs(m.weight.data.numel()))
                values = values.view(m.weight.data.size())
                m.weight.data.copy_(values)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
    def sampleTheParameterFromPrior(self, sampled_models):
        ret = []
 
        for i in range(self.nbatch):
            #print "sampleTheParameterFromPrior", i, sampled_models[i], self.models[ sampled_models[i] ].name, self.models[ sampled_models[i] ].nparameters

			reti = [ 0 for it in range(self.models[ sampled_models[i] ].nparameters) ]
			mean_n = [ 0 for it in range(self.models[ sampled_models[i] ].nparameters) ]
			var_n = [ 0 for it in range(self.models[ sampled_models[i] ].nparameters) ]

			for n in range(self.models[ sampled_models[i] ].nparameters):
				if self.models[ sampled_models[i] ].prior[n][0] == 0: 
					reti[n]=self.models[ sampled_models[i] ].prior[n][1]

				if self.models[ sampled_models[i] ].prior[n][0] == 1: 
					reti[n]=rnd.normal( loc=self.models[ sampled_models[i] ].prior[n][1],
										scale=numpy.sqrt(self.models[ sampled_models[i] ].prior[n][2]) )
		
				if self.models[ sampled_models[i] ].prior[n][0] == 2: 
					reti[n]=rnd.uniform( low=self.models[ sampled_models[i] ].prior[n][1],
										 high=self.models[ sampled_models[i] ].prior[n][2])

				if self.models[ sampled_models[i] ].prior[n][0] == 3: 
					reti[n]=rnd.lognormal(mean=self.models[ sampled_models[i] ].prior[n][1],
										  sigma=numpy.sqrt(self.models[ sampled_models[i] ].prior[n][2]) )
								  
				if self.models[ sampled_models[i] ].prior[n][0] == 4: 
					reti[n]=rnd.uniform( low=self.models[ sampled_models[i] ].prior[n][1],high=self.models[ sampled_models[i] ].prior[n][2])
			
				if self.models[ sampled_models[i] ].prior[n][0] == 5: 
						reti[n]=self.models[ sampled_models[i] ].prior[n][1]     

				if self.models[ sampled_models[i] ].prior[n][0] == 6: 
						reti[n]=rnd.uniform( low=self.models[ sampled_models[i] ].prior[n][1],high=self.models[ sampled_models[i] ].prior[n][2])

						
			begincount = 0
			for n in range(self.models[ sampled_models[i] ].nparameters):
				if self.models[ sampled_models[i] ].prior[n][0] == 4: 
					mean_n[begincount] = reti[n]
					begincount = begincount + 1
					
			begincountv = 0
			for n in range(self.models[ sampled_models[i] ].nparameters):
				if self.models[ sampled_models[i] ].prior[n][0] == 6: 
					var_n[begincountv]  = reti[n]
					begincountv = begincountv + 1

			count = 0
			for n in range(self.models[ sampled_models[i] ].nparameters):
				if self.models[ sampled_models[i] ].prior[n][0] == 5: 
					#reti[n]= mean_n[count]
					reti[n]=stats.truncnorm( (0 - mean_n[int(self.models[ sampled_models[i] ].prior[n][2])])/var_n[int(self.models[ sampled_models[i] ].prior[n][2])],(1000000 - mean_n[int(self.models[ sampled_models[i] ].prior[n][2])])/var_n[int(self.models[ sampled_models[i] ].prior[n][2])],loc=mean_n[int(self.models[ sampled_models[i] ].prior[n][2])],scale=var_n[int(self.models[ sampled_models[i] ].prior[n][2])] ).rvs(1) 
					count = count + 1 
	     
            
			ret.append( reti[:] )

            
        return [x[:] for x in ret]
예제 #4
0
파일: random.py 프로젝트: ainafp/pyhrf
def truncRandn(size, mu=0., sigma=1., a=0., b=inf):

    if b == inf:
        return rpnorm(size, mu, sigmaC) + a
    else:
        tn = truncnorm(a, b, mu, sigma)
        return tn.rvs(size)
예제 #5
0
def _test_npdf_trunc(mean, dev, min, max):
    print("Normal(%s, %s, min=%s, max=%s)" % (mean, dev, min, max))
    options['pdf']['samples'] = 1000
    c = NormalPDF(mean=mean, dev=dev, min=min, max=max)
    if min != None:
        a = (float(min) - mean) / dev
    else:
        a = -4.
    if max != None:
        b = (float(max) - mean) / dev
    else:
        b = 4.
    sfunc = stats.truncnorm(a, b, loc=mean, scale=dev)
    x = c.x
    y = sfunc.pdf(x)
    rmse = 100.0 * np.sqrt(np.mean((c.y - y)**2)) / (np.max(c.y) - np.min(c.y))
    print("PDF for %d points:" % len(x))
    print("\tRMSE (%d points)=%s %%" % (len(x), rmse))
    assert rmse < .01

    # now compare linear interpolated pdf versus reference
    ref_num_points = 100000

    x = np.linspace(sfunc.ppf(.000001),sfunc.ppf(.999999), ref_num_points)
    y = sfunc.pdf(x)
    interp_y = c.pdf(x)
    rmse = 100.0 * np.sqrt(np.mean((interp_y - y)**2))/ (np.max(y) - np.min(y))
    print("\tRMSE (%d points)=%s %%" % (ref_num_points,rmse))
    assert rmse < .05

    """
예제 #6
0
def single_trunc_norm(mean, std, min, max):
    # Need to scale the clipping values ....
    a = (min - mean) / float(std)
    b = (max - mean) / float(std)

    value = stats.truncnorm(a, b, loc=mean, scale=std).rvs()[0]
    return value
예제 #7
0
def gen_prb (n, mu, sigma, lower=0, upper=1):
    '''Generate probability from normal distribution in the range [0,1].
    '''
    import scipy.stats as stats
    X = stats.truncnorm(
         (lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
    return X.rvs(n)
예제 #8
0
def test_tau_normal():
    mu = .02
    sigma = 0.004
    a = 0
    b = np.inf
    tau_m_dist = sps.truncnorm(float(a - mu) / sigma, float(b - mu)/sigma, loc=mu, scale=sigma)
    singlepop(4.7064469636898467, tau_m=(tau_m_dist,50))
예제 #9
0
    def __init__(self, dropout=0, num_classes=0, transform_input=False):
        super(BaseInception, self).__init__()
        self.transform_input = transform_input
        self.dropout=dropout
        self.num_classes = num_classes
        self.Conv2d_1 = BasicConv2d(3, 32, kernel_size=3, stride=2, padding=1)

        self.Mixed_2a = InceptionA(32, 32, [32, 32], [32, 32, 32], 32)
        self.Mixed_2b = InceptionB(128, 32, [32, 32])
        self.Mixed_3a = InceptionA(192, 64, [64, 64], [64, 64, 64], 64)
        self.Mixed_3b = InceptionB(256, 64, [64, 64])
        self.Mixed_4a = InceptionA(384, 96, [96, 96], [96, 96, 96], 96)
        self.Mixed_4b = InceptionB(384, 96, [96, 96])

        self.fc = BasicFc(576, 512)
        if self.num_classes:
            self.classifier = nn.Linear(512, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
                import scipy.stats as stats
                stddev = m.stddev if hasattr(m, 'stddev') else 0.1
                X = stats.truncnorm(-2, 2, scale=stddev)
                values = torch.Tensor(X.rvs(m.weight.data.numel()))
                m.weight.data.copy_(values)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
예제 #10
0
파일: embed.py 프로젝트: frkhit/proxy-nca
def bn_inception_weight_init(weight):
    import scipy.stats as stats
    stddev = 0.001
    X = stats.truncnorm(-2, 2, scale=stddev)
    values = torch.Tensor(
        X.rvs(weight.data.numel())
    ).resize_(weight.size())
    weight.data.copy_(values)
예제 #11
0
파일: RNG.py 프로젝트: avonmoll/ebola-sim
def Normal(mean, std):
    #TODO : write our own version of this if possible!
    #return np.random.normal(mean,std)
    if std == 0:
        return mean
    lower, upper = 0,1000
    tnorm = stats.truncnorm((lower-mean)/std, (upper-mean)/std, mean, std)
    return tnorm.rvs()
예제 #12
0
파일: stats.py 프로젝트: l-k-/faker
 def __init__(self, mean=0, variance=1, min=None, max=None):
     std_dev = math.sqrt(variance)
     if (min is not None) and (max is not None):
         self.rand_var = stats.truncnorm((min - mean) / std_dev, (max - mean) / std_dev, loc=mean, scale=std_dev)
     else:
         if (min is not None) or (max is not None):
             raise Exception('Must specify either both max and min value, or neither')
         self.rand_var = stats.norm(loc=mean, scale=std_dev)
예제 #13
0
 def test_truncnorm_prior(self):
     """check truncnorm RV"""
     msg = "truncnorm prior: incorrect test {0}"
     test_vals = [-0.1, 0.0, 0.5, 1.0, 1.1]
     mean, std, low, up = 0.9, 0.1, 0.0, 1.0
     a, b = (low - mean) / std, (up - mean) / std
     correct = truncnorm(loc=mean, scale=std, a=a, b=b)
     for ii, xx in enumerate(test_vals):
         assert self.gPrior.pdf(xx) == correct.pdf(xx), msg.format(ii)
예제 #14
0
def test_truncated_scalar_gaussian_lb():
    tn0_test = TruncatedScalarGaussian(lb=0)
    tn0_true = truncnorm(0, np.Inf)

    print "E[TN(0,inf)]:\t", tn0_test.expected_x()
    print "E[TN(0,inf)]:\t", tn0_true.mean()
    assert np.allclose(tn0_test.expected_x(), tn0_true.mean())

    print "Var[TN(0,inf)]:\t", tn0_test.variance_x()
예제 #15
0
파일: para.py 프로젝트: nealbob/regrivermod
    def aproximate_shares(self, nonoise=False):
        
        home = '/home/nealbob'
        model = '/Dropbox/Model/'
        
        with open(home + model + 'sharemodel.pkl', 'rb') as f:
           tree = pickle.load(f)
           f.close()

        temp = removekeys(self.para_list, ['sig_eta', 'rho_eps', 'delta0', 'LL'])

        X = np.array([temp[p] for p in temp])
        Y = tree.predict(X)

        yRS = 0
        if self.sr == 'RS':
            if self.HL == 1:
                y = Y[0,2]
            else:
                y = Y[0,0]
        else:
            if self.HL == 1:
                y = Y[0,3]
                yRS = Y[0,2]
            else:
                y = Y[0,1]
                yRS = Y[0,0]
                
        ### for the CS-HL scenario chapter7
        yhl = Y[0,3]
        
        ######################### !!!!
        sig = 0.025
        ######################### !!!!

        if nonoise:
            self.Lambda_high = y
            self.Lambda_high_RS = yRS
        else:
            self.Lambda_high = truncnorm((0.001 - y) / sig, (0.999 - y) / sig, loc=y, scale=sig).rvs()
            self.Lambda_high_RS = truncnorm((0.001 - yRS) / sig, (0.999 - yRS) / sig, loc=yRS, scale=sig).rvs()

        self.para_list['Lambda_high'] = self.Lambda_high
        self.para_list['Lambda_high_RS'] = self.Lambda_high_RS
예제 #16
0
def noisy_theta_rv(prior_type, theta_i, theta_j):
    """ Returns the random variable object for the noisy theta """
    args, kwargs = noisy_theta_rv_args(prior_type, theta_i, theta_j)
    if prior_type == 'uniform':
        return stats.uniform(*args, **kwargs)
    elif prior_type == 'normal':
        return stats.truncnorm(*args, **kwargs)
    elif prior_type == 'beta':
        return stats.beta(*args, **kwargs)
    raise ValueError('Invalid prior type')
예제 #17
0
def generate_health_utility(age):
    # to generate mean closer to 0.851886 calulated based on the paper from
    # http://www.sciencedirect.com/science/article/pii/S016164200100971X
    # we need to set mu = 1.5 to get truncated/negative-skew distribution
    lower = 0
    upper = 1
    sigma = 0.35
    mu = 1.5
    health_utility = stats.truncnorm((lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
    return health_utility.rvs()
예제 #18
0
def GetDirection(x, scale, param_ranges):
    res = {}
    for key in param_ranges.keys():
        r = param_ranges[key]
        x0 = x[key]
        direction = truncnorm(a = (r[0] - x0)/(r[1] - r[0])/scale, 
                b = (r[1] - x0)/(r[1] - r[0])/scale).rvs(1)[0]
        direction = direction*scale*(r[1] - r[0])
        res[key] = x0 + direction
    return res
예제 #19
0
 def draw_from_conditional_distribution(self,parentsvalues,size=1):
     # returns random samples from the conditional distribution given the values of the parents
     # input parameters:
     #    - parentsvalues, list of floats: values of the parents
     #    - size, integer (optional): number of i.i.d. samples drawn
     if len(parentsvalues) == 0:
         rvtemp=truncnorm(self.lowerlimit,self.upperlimit,loc=0,scale=np.sqrt(self.variancenottruncated))
         vals=rvtemp.rvs(size=size)
     else:
         raise NotImplementedError
     return vals
    def sampleTheParameter(self, sampled_models):
        if self.debug == 2:print "\t\t\t***sampleTheParameter"
        ret = []

        for i in range(self.nbatch):
            np = self.models[ sampled_models[i] ].nparameters
            reti = [ 0 for it in range(np) ]
            #print '\n\t\t\tsampleTheParameter, model np prior:', sampled_models[i], self.models[ sampled_models[i] ].name, np, self.models[ sampled_models[i] ].prior
            
            prior_prob = -1
            while prior_prob <= 0 :

				# sample putative particle from previous population
				p = sample_particle(self.nparticles, sampled_models[i], self.margins_prev, self.model_prev, self.weights_prev )
			
				for nn in range(np):
					#print reti[nn], self.parameters_prev[ p ][nn]
					reti[nn] = self.parameters_prev[ p ][nn]

					
				prior_prob = self.perturbfn( reti, self.models[ sampled_models[i] ].prior, self.kernels[sampled_models[i]], self.kernel_type, self.special_cases[sampled_models[i]] )
				mean_n = [ 0 for it in range(self.models[ sampled_models[i] ].nparameters) ]
				var_n = [ 0 for it in range(self.models[ sampled_models[i] ].nparameters) ]

				begincount = 0
				for n in range(self.models[ sampled_models[i] ].nparameters):
					if self.models[ sampled_models[i] ].prior[n][0] == 4: 
						mean_n[begincount] = reti[n]
						begincount = begincount + 1
						
				begincountv = 0
				for n in range(self.models[ sampled_models[i] ].nparameters):
					if self.models[ sampled_models[i] ].prior[n][0] == 6: 
						var_n[begincountv]  = reti[n]
						begincountv = begincountv + 1

				count = 0
				for n in range(self.models[ sampled_models[i] ].nparameters):
					if self.models[ sampled_models[i] ].prior[n][0] == 5: 
						#reti[n]= mean_n[count]
						pos = int(self.models[ sampled_models[i] ].prior[n][2])
						#print "pos", pos
						#print "var", var_n[pos]
						reti[n]=stats.truncnorm( (0 - mean_n[pos])/abs(var_n[pos]),(1000000 - mean_n[pos])/abs(var_n[pos]),loc=mean_n[pos],scale=abs(var_n[pos]) ).rvs(1) 
						count = count + 1 

				if self.debug == 2:print "\t\t\tsampled p prob:", prior_prob
				if self.debug == 2:print "\t\t\tnew:", reti
				if self.debug == 2:print "\t\t\told:", self.parameters_prev[p]

            ret.append( reti )

        return [x[:] for x in ret]
예제 #21
0
    def _get_initial_ball(self, guess, chains):
        s = 0.05
        a = np.array([self.hs_bounds[0], self.alpha_bounds[0], self.beta_bounds[0]])
        b = np.array([self.hs_bounds[1], self.alpha_bounds[1], self.beta_bounds[1]])

        a = self._get_cuts(a, guess, np.abs(s*guess))
        b = self._get_cuts(b, guess, np.abs(s*guess))
        stacked_val = np.empty((chains, len(guess)))
        for i, (g, A, B) in enumerate(zip(guess, a, b)):
            stacked_val[:, i] = truncnorm(A, B, loc=g, scale=np.abs(s*g)).rvs(chains)

        return stacked_val
예제 #22
0
def agent_type_rv(prior_type):
    """ Returns the random variable corresponding to the agent prior type. """
    if prior_type == 'uniform':
        return stats.uniform()
    elif prior_type == 'normal':
        # [-sqrt(0.5), sqrt(0.5)] is the range of the truncnorm *before* scaling.
        # loc is the mean of the distribution
        # scale is the standard deviation of the distribution
        return stats.truncnorm(
            -math.sqrt(0.5), math.sqrt(0.5), loc=0.5, scale=math.sqrt(0.5))
    elif prior_type == 'beta':
        return stats.beta(2, 2)
    raise ValueError('Invalid agent type prior')
    def UpdateLatents(self, this_model_parameters, sampled_models):
        ret = []
 
        for i in range(self.nbatch):
            #print "sampleTheParameterFromPrior", i, sampled_models[i], self.models[ sampled_models[i] ].name, self.models[ sampled_models[i] ].nparameters

			reti = [ 0 for it in range(self.models[ sampled_models[i] ].nparameters) ]
			mean_n = [ 0 for it in range(self.models[ sampled_models[i] ].nparameters) ]
			var_n = [ 0 for it in range(self.models[ sampled_models[i] ].nparameters) ]

			begincount = 0
			for n in range(self.models[ sampled_models[i] ].nparameters):
				if self.models[ sampled_models[i] ].prior[n][0] == 4: 
					mean_n[begincount] = this_model_parameters[i][n]
					begincount = begincount + 1
					
			begincountv = 0
			for n in range(self.models[ sampled_models[i] ].nparameters):
				if self.models[ sampled_models[i] ].prior[n][0] == 6: 
					var_n[begincountv]  = reti[n]
					begincountv = begincountv + 1

			count = 0
			for n in range(self.models[ sampled_models[i] ].nparameters):
				if self.models[ sampled_models[i] ].prior[n][0] == 0:
					reti[n]= this_model_parameters[i][n]
			
				if self.models[ sampled_models[i] ].prior[n][0] == 1:
					reti[n]= this_model_parameters[i][n]
				
				if self.models[ sampled_models[i] ].prior[n][0] == 2:
					reti[n]= this_model_parameters[i][n]
				
				if self.models[ sampled_models[i] ].prior[n][0] == 3:
					reti[n]= this_model_parameters[i][n]
				
				if self.models[ sampled_models[i] ].prior[n][0] == 4:
					reti[n]= this_model_parameters[i][n]
				
				if self.models[ sampled_models[i] ].prior[n][0] == 5: 
					#reti[n]= mean_n[count]
					reti[n]=stats.truncnorm( (0 - mean_n[int(self.models[ sampled_models[i] ].prior[n][2])])/var_n[int(self.models[ sampled_models[i] ].prior[n][2])],(1000000 - mean_n[int(self.models[ sampled_models[i] ].prior[n][2])])/var_n[int(self.models[ sampled_models[i] ].prior[n][2])],loc=mean_n[int(self.models[ sampled_models[i] ].prior[n][2])],scale=var_n[int(self.models[ sampled_models[i] ].prior[n][2])] ).rvs(1) 
					count = count + 1 
					
				if self.models[ sampled_models[i] ].prior[n][0] == 6:
					reti[n]= this_model_parameters[i][n]
	
			ret.append( reti[:] )

            
        return [x[:] for x in ret]
예제 #24
0
파일: simulate.py 프로젝트: h2oloopan/dc
def createHyperbolicWorker(n, r, p, v, c):
	#create n workers
	#r is the number of runs to reach 0.5 quality

	#create RS
	lower = 0
	upper = float('inf')
	mu = r['mu']
	sigma = r['sigma']
	RS = stats.truncnorm((lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
	rs = RS.rvs(n)
	#create XS in between 0 and 1
	lower = 0
	upper = float('inf')
	mu = p['mu']
	sigma = p['sigma']
	PS = stats.truncnorm((lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
	ps = PS.rvs(n)

	#create availability
	vs = []
	if v is None:
		for i in range(0, len(ps)):
			vs.append(1)
	else:
		lower = 0
		upper = 1
		mu = v['mu']
		sigma = v['sigma']
		VS = stats.truncnorm((lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
		vs = VS.rvs(n)

	workers = []
	for i in range(0, len(ps)):
		w = Worker(str(uuid.uuid1()), 0, ps[i], rs[i], c, vs[i])
		workers.append(w)

	return workers
    def RandomNonnegativeNormal(muValue, varValue):
        """ Generate a sample from the non-negative truncated N(mu,var) """

        assert varValue > 0
        sdValue = math.sqrt(varValue)
        a = -muValue/sdValue
        
        # Using RandomTruncatedStandardNorm
#         rv = NumericalHelper.RandomTruncatedStandardNormal(a)
#         return rv*sdValue + muValue
    
        # Using scipy 
        rv = truncnorm(a, float('inf'), loc=muValue, scale=sdValue)
        return rv.rvs()
예제 #26
0
파일: para.py 프로젝트: nealbob/regrivermod
    def aproximate_shares_ch7(self, nonoise=False):
        
        home = '/home/nealbob'
        model = '/Dropbox/Model/'
        
        if self.HL == 1:
            y = self.CSHL_c + self.CSHL_b * self.N_high
        else:
            y = self.CS_c + self.CS_b * self.N_high
        
        yhl = self.CSHL_c + self.CSHL_b * self.N_high
        self.yhl = yhl
        self.Lambda_high_HL = truncnorm((0.001 - yhl) / 0.05, (0.999 - yhl) / 0.05, loc=yhl, scale=0.05).rvs()

        if nonoise:
            self.Lambda_high = y
        else:
            self.Lambda_high = truncnorm((0.001 - y) / 0.05, (0.999 - y) / 0.05, loc=y, scale=0.05).rvs()
        self.y = y

        self.para_list['Lambda_high'] = self.Lambda_high

        print 'Lambda_high_hat: ' + str(y) + ', Lambda high: ' + str(self.Lambda_high)
예제 #27
0
def example(show=False, save=False):

    # Settings:
    t0 = 0.0
    dt = 0.0001
    dv = 0.0001
    tf = 0.1
    update_method = "gmres"
    tol = 1e-14

    # Run simulation:
    mu = 0.02
    sigma = 0.004
    b = np.inf
    a = 0
    tau_m_dist = sps.truncnorm(float(a - mu) / sigma, float(b - mu) / sigma, loc=mu, scale=sigma)

    total = 0
    vals, probs = descretize(tau_m_dist, 100)
    for ii, (val, prob) in enumerate(zip(vals, probs)):
        print ii
        network = get_network(dv=dv, update_method=update_method, tol=tol, tau_m=val)
        network.run(dt=dt, tf=tf, t0=t0)
        i1 = network.population_list[1]
        total += i1.firing_rate_record[-1] * prob

    print total

    # # Visualize:
    # i1 = network.population_list[1]
    #
    # fig, ax = plt.subplots(figsize=(3,3))
    #
    # i1.plot(ax=ax)
    # plt.xlim([0,tf])
    # plt.ylim(ymin=0)
    # plt.xlabel('Time (s)')
    # plt.ylabel('Firing Rate (Hz)')
    # fig.tight_layout()
    # if save == True: plt.savefig('./singlepop.png')
    #
    #
    #
    # if show == True:                        # pragma: no cover
    #     fig = plt.gcf()                     # pragma: no cover
    #     window = fig.canvas.manager.window  # pragma: no cover
    #     window.raise_()                     # pragma: no cover
    #     plotting.show()

    return i1.t_record, i1.firing_rate_record
예제 #28
0
def sample(dists):
    """
    Samples parameters from list of distributions specified by dists
    dists -- list of 2 or 4 element arrays
        2 elements signify left and right bounds of a uniform distribution
        4 elements signify left, right, mean, and variance of a truncated normal distribution
    Order of parameters: lambda1, lambda2, xi1, xi2, mu3, kappa1, kappa2
    """
    params = []
    tnorm_l1 = truncnorm(a=dists[0][0], b=dists[0][1], loc=dists[0][2], scale=dists[0][3]**.5)
    tnorm_l2 = truncnorm(a=dists[1][0], b=dists[1][1], loc=dists[1][2], scale=dists[1][3]**.5)
    for i in range(len(dists)):
        distribution = dists[i]
        if len(distribution) == 2:
            left, right = distribution
            param = random.uniform(left, right)
        elif len(distribution) == 4:
            if i==0:
                param = tnorm_l1.rvs(size=1)[0]
            if i==1:
                param = tnorm_l2.rvs(size=1)[0]
        params.append(param)
    return params
예제 #29
0
    def __init__(self, mu, sigma, a, b):
        """
        Constructor
        @param mu: expectation value
        @param sigma: standard deviation
        @param a: lower boundary
        @param b: upper boundary
        """
        super(TNormal, self).__init__()
        self.__mu = float(mu)
        self.__sigma = float(sigma)
        self.__a, self.__b = a, b

        # truncated standard normal
        a, b = self.__trans(a), self.__trans(b)
        self._dist = truncnorm(a, b)
예제 #30
0
def generate_group_membership_probabilities(num_hosts, mean, std_dev, avg_group_size = 0):
    a , b = a, b = (0 - mean) / std_dev, (1 - mean) / std_dev
    midpoint_ab = (b + a) / 2
    scale = 1 / (b - a)
    location = 0.5 - (midpoint_ab * scale)
    print 'Mean: ' + str(mean) + ' StdDev: ' + str(std_dev)
    print 'a: ' + str(a) + ' b: ' + str(b) + ' loc: ' + str(location) + ' scale: ' + str(scale)
    rv = truncnorm(a, b, loc=location, scale=scale)
    rvs = rv.rvs(num_hosts)
    if avg_group_size > 0:
        rvs_sum = sum(rvs)
        rvs = [p / (rvs_sum/float(avg_group_size)) for p in rvs]
        rvs_sum = sum(rvs)
        rvs = [p / (rvs_sum/float(avg_group_size)) for p in rvs]
    print 'Average group size: ' + str(sum(rvs))
    return rvs
예제 #31
0
######### Local Vars
FOCAL_CODE = 37386
ORIEN_CODE = 274

IMG_EXTENSIONS = [
    '.jpg', '.JPG', '.jpeg', '.JPEG', 'tiff',
    '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
RAW_EXTENSIONS = [
    '.ARW', '.arw', '.CR2', 'cr2',
]
lower, upper = 0., 1.
mu, sigma = 0.5, 0.2
# generate random numbers for random crop
rand_gen = stats.truncnorm(
    (lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)

######### Util functions
def is_image_file(filename):
    return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)

def is_raw_file(filename):
    return any(filename.endswith(extension) for extension in RAW_EXTENSIONS)

def read_wb_lv(device):
    if device == "sony":
        white_lv = 16383
        black_lv = 512
    elif device == "iphone":
        white_lv = 4367
        black_lv = 528
예제 #32
0
파일: normal.py 프로젝트: luyang1210/ppml
def truncnormal(min, max, scale, size):
    return asarray(truncnorm(a=min, b=max, scale=scale).rvs(size))
예제 #33
0
파일: pdfs.py 프로젝트: zsarnoczay/uqFEM
 def log_pdf_eval(self, x):
     lp = stats.truncnorm((self.a - self.mu) / self.sig,
                          (self.b - self.mu) / self.sig,
                          loc=self.mu,
                          scale=self.sig).logpdf(x)
     return lp
예제 #34
0
    # os.unlink(my_file)
else:
    print('no such file:%s' % my_file)

dimension = 7  # 7个维度
task_num = 200  # 200个基础task

mu_sigma_list = [(50, 15.9), (30, 3.9), (49, 6.7), (63, 12.5), (13, 6.1),
                 (54, 20.6), (31, 12.2)]

task_matrix = np.zeros([dimension, task_num])
count = 0
for the_mu, the_sigma in mu_sigma_list:
    lower, upper = the_mu - 2 * the_sigma, the_mu + 2 * the_sigma  # 截断在[μ-2σ, μ+2σ]
    X = stats.truncnorm((lower - the_mu) / the_sigma,
                        (upper - the_mu) / the_sigma,
                        loc=the_mu,
                        scale=the_sigma)
    task_matrix[count, :] = X.rvs(task_num)
    count += 1

earn_list = []
time_list = []
attr_mu_sigma_list = [[60, 10, 'earn_list'], [600, 100, 'time_list']]

for earn_mu, earn_sigma, i in attr_mu_sigma_list:
    l, u = earn_mu - 2 * earn_sigma, earn_mu + 2 * earn_sigma
    E = stats.truncnorm((l - earn_mu) / earn_sigma, (u - earn_mu) / earn_sigma,
                        loc=earn_mu,
                        scale=earn_sigma)
    the_list = eval(i)
    for j in list(E.rvs(task_num)):
예제 #35
0
#Dilution rate
D = 0.1
#Most of Kot's stuff is based on D=0.1
#If you set epsilon=0, there is no forcing.

####################

#Forcing amplitude
epsilon = 0.2
STOC_EPS = False  #turn on and off stochastic epsilon
if STOC_EPS:
    #create the random variable you want for epsilon here. E.g.
    #stats.truncnorm(min,max,mu,sig2)
    #stats.gamma(k,loc=0,scale=theta)
    eps_rv = stats.truncnorm(0, 3, epsilon, 0.15)

####################
#T and omega control the period of the forcing
#(Note that this section does nothing if epsilon=0)
T = 24  #T = 100.0

omega = 2. * np.pi / D / T
#omega = 5.0*np.pi/6.0 #T is not used here. This is equivalent to T=24.

STOC_T = False  #turn on and off stochastic forcing period
if STOC_T:
    #this function will replace the omega definition above
    def omega_f(T_arg):
        return 2. * np.pi / D / T_arg

#defining mesh to get cellcenters
Lx1 = 1.  # always put . after 1
Lx2 = 1.  # always put . after 1
mesh = Grid2D(
    nx=nx1, ny=nx2, dx=Lx1 / nx1, dy=Lx2 /
    nx2)  # with nx1*nx2 number of cells/cellcenters/pixels/pixelcenters
cellcenters = mesh.cellCenters.value.T  # (nx1*nx2,2) matrix
np.save('cellcenters_nx1=' + str(nx1) + '_nx2=' + str(nx2) + '.npy',
        cellcenters)

# https://stackoverflow.com/questions/18441779/how-to-specify-upper-and-lower-limits-when-using-numpy-random-normal
# X = truncnorm((lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
#define the distribution of truncated normals for lengthscales
l1rv = truncnorm((0.07 - 0.1) / 0.03, (0.13 - 0.1) / 0.03, 0.1, 0.03)
l2rv = truncnorm((0.47 - 0.5) / 0.03, (0.53 - 0.5) / 0.03, 0.5, 0.03)

lx1s = l1rv.rvs(num_samples)
lx2s = l2rv.rvs(num_samples)
ls = np.array(zip(lx1s, lx2s))

#define matrices to save results
inputs = np.zeros((num_samples, nx1 * nx2))
outputs = np.zeros((num_samples, nx1 * nx2))

start = time.time()
#generate samples
for i in xrange(num_samples):
    #display
    if (i + 1) % 100 == 0:
예제 #37
0
 def __init__(self, bounds, mean, std):
     a = (bounds[:, 0] - mean) / std
     b = (bounds[:, 1] - mean) / std
     self.rv = truncnorm(a, b, loc=mean, scale=std)
예제 #38
0
def variance_scaling_initializer(shape, fan_in, factor=2.0, seed=None):
    sigma = np.sqrt(factor / fan_in)
    #x = stats.truncnorm(-max_val*sigma, max_val*sigma, loc=0, scale=sigma)
    return stats.truncnorm(-2, 2, loc=0, scale=sigma).rvs(shape)
예제 #39
0
파일: backbone.py 프로젝트: sorrowyn/C-Tran
    def __init__(self,
                 num_classes=312,
                 aux_logits=True,
                 transform_input=False,
                 n_attributes=0,
                 bottleneck=False,
                 expand_dim=0,
                 three_class=False,
                 connect_CY=False):
        """
        Args:
        num_classes: number of main task classes
        aux_logits: whether to also output auxiliary logits
        transform input: whether to invert the transformation by ImageNet (should be set to True later on)
        n_attributes: number of attributes to predict
        bottleneck: whether to make X -> A model
        expand_dim: if not 0, add an additional fc layer with expand_dim neurons
        three_class: whether to count not visible as a separate class for predicting attribute
        """
        super(Inception3, self).__init__()
        self.aux_logits = aux_logits
        self.n_attributes = n_attributes
        self.bottleneck = bottleneck
        self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
        self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
        self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
        self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
        self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
        self.Mixed_5b = InceptionA(192, pool_features=32)
        self.Mixed_5c = InceptionA(256, pool_features=64)
        self.Mixed_5d = InceptionA(288, pool_features=64)
        self.Mixed_6a = InceptionB(288)
        self.Mixed_6b = InceptionC(768, channels_7x7=128)
        self.Mixed_6c = InceptionC(768, channels_7x7=160)
        self.Mixed_6d = InceptionC(768, channels_7x7=160)
        self.Mixed_6e = InceptionC(768, channels_7x7=192)
        if aux_logits:
            self.AuxLogits = InceptionAux(768, num_classes)
        self.Mixed_7a = InceptionD(768)
        self.Mixed_7b = InceptionE(1280)
        self.Mixed_7c = InceptionE(2048)

        # self.all_fc = nn.ModuleList() #separate fc layer for each prediction task. If main task is involved, it's always the first fc in the list

        # if connect_CY:
        #     self.cy_fc = FC(n_attributes, num_classes, expand_dim)
        # else:
        #     self.cy_fc = None

        # if self.n_attributes > 0:
        #     if not bottleneck: #multitasking
        #         self.all_fc.append(FC(2048, num_classes, expand_dim))
        #     for i in range(self.n_attributes):
        #         self.all_fc.append(FC(2048, 1, expand_dim))
        # else:
        #     self.all_fc.append(FC(2048, num_classes, expand_dim))

        for m in self.modules():
            if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
                import scipy.stats as stats
                stddev = m.stddev if hasattr(m, 'stddev') else 0.1
                X = stats.truncnorm(-2, 2, scale=stddev)
                values = torch.as_tensor(X.rvs(m.weight.numel()),
                                         dtype=m.weight.dtype)
                values = values.view(m.weight.size())
                with torch.no_grad():
                    m.weight.copy_(values)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
예제 #40
0
def test_ConsequenceFunction_sample_unit_DV():
    """
    Test if the function samples the DV distribution properly. Note that we
    have already tested the sampling algorithm in the uq module, so we will not
    do a thorough verification of the samples here, but rather check for errors
    in the inputs that would typically lead to significant mistakes in the
    results.
    """
    test_quants = [0.5, 1.0, 1.5, 2.0, 2.5]

    # create a Random Variable with 3 correlated decision variables
    dims = 3
    ref_mean = [1., 1., 0.]
    ref_std = [0.4, 0.3, 0.2]
    ref_rho = np.ones((dims, dims)) * 0.8
    np.fill_diagonal(ref_rho, 1.0)

    ref_mean[2] = np.exp(ref_mean[2])

    # prepare lower truncation limits at 0 for all...
    tr_lower = np.zeros(dims).tolist()
    # and an upper limit at 2 sigma for the second
    tr_upper = [np.inf, 1.6, np.inf]

    RV_reg = RandomVariableRegistry()

    for i, (name, dist, theta, beta) in enumerate(
            zip(['A', 'B', 'C'], ['normal', 'normal', 'lognormal'], ref_mean,
                ref_std)):
        RV_reg.add_RV(
            RandomVariable(name=name,
                           distribution=dist,
                           theta=[theta, beta],
                           truncation_limits=[tr_lower[i], tr_upper[i]]))

    RV_reg.add_RV_set(
        RandomVariableSet('set_A', [RV_reg.RV[rv] for rv in ['A', 'B', 'C']],
                          ref_rho))
    RV_reg.generate_samples(sample_size=1000)

    # first test sampling for each decision variable
    for r_i, tag in enumerate(['A', 'B', 'C']):

        # use fixed value for 'B' and bounded linear for the other two
        if tag == 'B':
            f_median = prep_constant_median_DV(10.)
        else:
            f_median = prep_bounded_linear_median_DV(median_max=20.0,
                                                     median_min=2.0,
                                                     quantity_lower=1.0,
                                                     quantity_upper=2.0)

        # create the consequence function
        conseq_function = ConsequenceFunction(DV_median=f_median,
                                              DV_distribution=RV_reg.RV[tag])

        for qnt in test_quants:
            samples = conseq_function.sample_unit_DV(quantity=qnt,
                                                     sample_size=1000)

            # transform the results to log space for 'C' to facilitate testing
            if tag == 'C':
                samples = np.log(samples)
                ref_mu = np.log(f_median(qnt))
                ref_min = np.log(max(np.nextafter(0, 1), tr_lower[r_i]))
                ref_max = np.log(max(np.nextafter(0, 1), tr_upper[r_i]))
                a = (ref_min - np.log(ref_mean[r_i])) / ref_std[r_i]
                b = (ref_max - np.log(ref_mean[r_i])) / ref_std[r_i]
                ref_max = ref_mu * b
            else:
                ref_mu = f_median(qnt)
                ref_min = tr_lower[r_i]
                a = (ref_min - ref_mean[r_i]) / ref_std[r_i]
                b = (tr_upper[r_i] - ref_mean[r_i]) / ref_std[r_i]
                ref_max = ref_mu * b

            trNorm = truncnorm(
                a=a,
                b=b,
                loc=ref_mu,
                scale=ref_std[r_i] if tag == 'C' else ref_std[r_i] * ref_mu)
            ref_samples = trNorm.rvs(size=1000)

            # test the means and coefficients of variation
            assert np.mean(samples) == pytest.approx(np.mean(ref_samples),
                                                     rel=0.1)
            assert np.std(samples) == pytest.approx(np.std(ref_samples),
                                                    rel=0.15)

            # test the limits
            assert np.min(samples) > ref_min
            assert np.max(samples) < ref_max
 def get_truncated_normal(mean=50, sd=20, low=0, upp=100):
     return truncnorm((low - mean) / sd, (upp - mean) / sd,
                      loc=mean,
                      scale=sd)
예제 #42
0
# plot
import matplotlib.pyplot as plt

np.random.seed(10)

lower = 0
upper = 100
mu = 50
sigma = 50


def evaluate(X):
    return np.sin(X).reshape(-1) + np.random.normal(0, .3, len(X)).reshape(-1)


X = truncnorm((lower - mu) / sigma, (upper - mu) / sigma, loc=mu,
              scale=sigma).rvs(1000).reshape(-1, 1)
y = evaluate(X)

x_pred = np.linspace(0, 2000, 1000).reshape(-1, 1)
y_true = evaluate(x_pred)
# test
Cs = [1e-4, 1e-3, 1e-2, 1e-1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
ys = []
for C in Cs:
    print "evaluating", C
    y_pred = SVR(C=C).fit(X, y).predict(x_pred)
    ys.append(mean_squared_error(y_true, y_pred))
plt.plot(Cs, ys)
plt.show()
quit()
plt.plot(x_pred, y_pred, 'g', label='c=1e-2')
    def __init__(
        self,
        estimator: Union["CLASSIFIER_LOSS_GRADIENTS_TYPE",
                         "OBJECT_DETECTOR_TYPE"],
        norm: Union[int, float, str] = np.inf,
        eps: Union[int, float, np.ndarray] = 0.3,
        eps_step: Union[int, float, np.ndarray] = 0.1,
        max_iter: int = 100,
        targeted: bool = False,
        num_random_init: int = 0,
        batch_size: int = 32,
        random_eps: bool = False,
        tensor_board: Union[str, bool] = False,
        verbose: bool = True,
    ) -> None:
        """
        Create a :class:`.ProjectedGradientDescentCommon` instance.

        :param estimator: A trained classifier.
        :param norm: The norm of the adversarial perturbation supporting "inf", np.inf, 1 or 2.
        :param eps: Maximum perturbation that the attacker can introduce.
        :param eps_step: Attack step size (input variation) at each iteration.
        :param random_eps: When True, epsilon is drawn randomly from truncated normal distribution. The literature
            suggests this for FGSM based training to generalize across different epsilons. eps_step is
            modified to preserve the ratio of eps / eps_step. The effectiveness of this method with PGD
            is untested (https://arxiv.org/pdf/1611.01236.pdf).
        :param max_iter: The maximum number of iterations.
        :param targeted: Indicates whether the attack is targeted (True) or untargeted (False).
        :param num_random_init: Number of random initialisations within the epsilon ball. For num_random_init=0
            starting at the original input.
        :param batch_size: Size of the batch on which adversarial samples are generated.
        :param tensor_board: Activate summary writer for TensorBoard: Default is `False` and deactivated summary writer.
                             If `True` save runs/CURRENT_DATETIME_HOSTNAME in current directory. Provide `path` in type
                             `str` to save in path/CURRENT_DATETIME_HOSTNAME.
                             Use hierarchical folder structure to compare between runs easily. e.g. pass in ‘runs/exp1’,
                             ‘runs/exp2’, etc. for each new experiment to compare across them.
        :param verbose: Show progress bars.
        """
        super().__init__(
            estimator=estimator,  # type: ignore
            norm=norm,
            eps=eps,
            eps_step=eps_step,
            targeted=targeted,
            num_random_init=num_random_init,
            batch_size=batch_size,
            minimal=False,
            tensor_board=tensor_board,
        )
        self.max_iter = max_iter
        self.random_eps = random_eps
        self.verbose = verbose
        ProjectedGradientDescentCommon._check_params(self)

        if self.random_eps:
            if isinstance(eps, (int, float)):
                lower, upper = 0, eps
                var_mu, sigma = 0, (eps / 2)
            else:
                lower, upper = np.zeros_like(eps), eps
                var_mu, sigma = np.zeros_like(eps), (eps / 2)

            self.norm_dist = truncnorm((lower - var_mu) / sigma,
                                       (upper - var_mu) / sigma,
                                       loc=var_mu,
                                       scale=sigma)
        w_s_mu = 5
        w_s = 5
        # witness subsample size

        t = 0.0  # total time as given by poisson arrival process
        turn = 0  # index for turns

        # payoff rates for different interactions
        reward = 0.3  # reward of crime
        punishment = 0.6  # punishment of conviction
        image = 0.2  # credibility reduction
        payoff = np.array([reward, punishment, image])

        X = stats.truncnorm((1 - w_s_mu) / 1.33, (20 - w_s_mu) / 1.33,
                            loc=w_s_mu,
                            scale=0.5)

        # population array where majority of manipulation occurs
        pop = np.array(
            [paladin, informant, villain, apathetic, total, t, turn])

        history = np.copy(pop)  # data array, keeps record of each round
        yada = False
        while yada == False:
            #w_s = int(round(X.rvs(1)[0]))
            if (((pop[0] == 0) and (pop[1] == 0))
                    or ((pop[2] == 0) and (pop[3] == 0))):
                if ((pop[0] == 0) and (pop[1] == 0)):
                    yada = True
                    dystopia = dystopia + 1
예제 #45
0
def truncated_centered_gaussian(low, high, scale=2.0):
    mu = (low + high) / 2.0
    d = (high - mu)
    return stats.truncnorm(-scale, scale, loc=mu, scale=d / float(scale))
예제 #46
0
    def _impute_one_feature(self,
                            X_filled,
                            mask_missing_values,
                            feat_idx,
                            neighbor_feat_idx,
                            estimator=None,
                            fit_mode=True):
        """Impute a single feature from the others provided.

        This function predicts the missing values of one of the features using
        the current estimates of all the other features. The ``estimator`` must
        support ``return_std=True`` in its ``predict`` method for this function
        to work.

        Parameters
        ----------
        X_filled : ndarray
            Input data with the most recent imputations.

        mask_missing_values : ndarray
            Input data's missing indicator matrix.

        feat_idx : int
            Index of the feature currently being imputed.

        neighbor_feat_idx : ndarray
            Indices of the features to be used in imputing ``feat_idx``.

        estimator : object
            The estimator to use at this step of the round-robin imputation.
            If ``sample_posterior`` is True, the estimator must support
            ``return_std`` in its ``predict`` method.
            If None, it will be cloned from self._estimator.

        fit_mode : boolean, default=True
            Whether to fit and predict with the estimator or just predict.

        Returns
        -------
        X_filled : ndarray
            Input data with ``X_filled[missing_row_mask, feat_idx]`` updated.

        estimator : estimator with sklearn API
            The fitted estimator used to impute
            ``X_filled[missing_row_mask, feat_idx]``.
        """
        if estimator is None and fit_mode is False:
            raise ValueError("If fit_mode is False, then an already-fitted "
                             "estimator should be passed in.")

        if estimator is None:
            estimator = clone(self._estimator)

        missing_row_mask = mask_missing_values[:, feat_idx]
        if fit_mode:
            X_train = safe_indexing(X_filled[:, neighbor_feat_idx],
                                    ~missing_row_mask)
            y_train = safe_indexing(X_filled[:, feat_idx],
                                    ~missing_row_mask)
            estimator.fit(X_train, y_train)

        # if no missing values, don't predict
        if np.sum(missing_row_mask) == 0:
            return X_filled, estimator

        # get posterior samples if there is at least one missing value
        X_test = safe_indexing(X_filled[:, neighbor_feat_idx],
                               missing_row_mask)
        if self.sample_posterior:
            mus, sigmas = estimator.predict(X_test, return_std=True)
            imputed_values = np.zeros(mus.shape, dtype=X_filled.dtype)
            # two types of problems: (1) non-positive sigmas
            # (2) mus outside legal range of min_value and max_value
            # (results in inf sample)
            positive_sigmas = sigmas > 0
            imputed_values[~positive_sigmas] = mus[~positive_sigmas]
            mus_too_low = mus < self._min_value
            imputed_values[mus_too_low] = self._min_value
            mus_too_high = mus > self._max_value
            imputed_values[mus_too_high] = self._max_value
            # the rest can be sampled without statistical issues
            inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high
            mus = mus[inrange_mask]
            sigmas = sigmas[inrange_mask]
            a = (self._min_value - mus) / sigmas
            b = (self._max_value - mus) / sigmas

            if scipy.__version__ < LooseVersion('0.18'):
                # bug with vector-valued `a` in old scipy
                imputed_values[inrange_mask] = [
                    stats.truncnorm(a=a_, b=b_,
                                    loc=loc_, scale=scale_).rvs(
                                        random_state=self.random_state_)
                    for a_, b_, loc_, scale_
                    in zip(a, b, mus, sigmas)]
            else:
                truncated_normal = stats.truncnorm(a=a, b=b,
                                                   loc=mus, scale=sigmas)
                imputed_values[inrange_mask] = truncated_normal.rvs(
                    random_state=self.random_state_)
        else:
            imputed_values = estimator.predict(X_test)
            imputed_values = np.clip(imputed_values,
                                     self._min_value,
                                     self._max_value)

        # update the feature
        X_filled[missing_row_mask, feat_idx] = imputed_values
        return X_filled, estimator
예제 #47
0
 def make(cls, shape, dtype, mean=0, std=0.05, min_stds=-2, max_stds=2):
     dist = truncnorm(min_stds, max_stds)
     x = dist.rvs(np.prod(shape)).reshape(shape)
     x = x * std + mean
     return x.astype(dtype)
예제 #48
0
def truncated_normal(mean, sd, low, upp):
    """
    Normal distribution
    """
    return truncnorm((low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)
 def get_truncnorm_sample(self, lower, upper=200, mu=0, sigma=1, n=1):
     X = truncnorm((lower - mu) / sigma, (upper - mu) / sigma,
                   loc=mu,
                   scale=sigma)
     samples = X.rvs(n)
     return int(samples)
예제 #50
0
        return self._splits != value

    def __enter__(self):
        self._len2cnt = {}
        self._lengths = []
        self._counts = []
        self._splits = []
        self._lidxs = []
        return self

    def __exit__(self, exception_type, exception_value, traceback):
        if exception_type is not None:
            raise exception_type(exception_value)
        return True


#***************************************************************
if __name__ == '__main__':
    """ """

    from nparser import Configurable
    from nparser.misc.bucketer import Bucketer

    from scipy.stats import truncnorm
    with Bucketer(5) as bucketer:
        print(bucketer.compute_splits([[0] *
                                       np.int(truncnorm(0, 10, scale=5).rvs())
                                       for _ in range(1000)]),
              file=sys.stderr)
        bucketer.plot()
예제 #51
0
def truncated_normal(mean=0, sd=1, low=0, upp=10):
    return truncnorm((low - mean) / sd,
                     (upp - mean) / sd, 
                     loc=mean, 
                     scale=sd)
예제 #52
0
    # initialization step
    for agent in range(N):
        for arm in range(M):
            alpha = arm_means[arm] * (arm_means[arm] *
                                      (1 - arm_means[arm]) / var - 1)
            beta = (1 - arm_means[arm]) * (arm_means[arm] *
                                           (1 - arm_means[arm]) / var - 1)
            if distribs[agent][arm] == 1:
                X[1][agent][arm] = np.random.binomial(
                    size=1, n=1,
                    p=arm_means[arm])  #np.random.beta(alpha, beta)
            elif distribs[agent][arm] == 2:
                X[1][agent][arm] = np.random.beta(alpha, beta)
            else:
                X[1][agent][arm] = truncnorm((0 - arm_means[arm]) / sigma,
                                             (1 - arm_means[arm]) / sigma,
                                             loc=arm_means[arm],
                                             scale=sigma).rvs()
            n[1][agent][arm] += 1
            m[1][agent][arm] += 1
            z[1][agent][arm] = X[1][agent][arm]
            x[1][agent][arm] = X[1][agent][arm]

    for t in range(1, T):  # loop over time
        for agent in range(N):  # loop through all agents
            candidates = []  # candidate arms to choose
            Q = []  # corresponds to Q in paper

            for arm in range(M):
                if n[t][agent][arm] <= m[t][agent][
                        arm] - M:  # check decision making criteria
                    candidates.append(arm)
예제 #53
0
파일: pdfs.py 프로젝트: zsarnoczay/uqFEM
 def generate_rns(self, N):
     return stats.truncnorm((self.a - self.mu) / self.sig,
                            (self.b - self.mu) / self.sig,
                            loc=self.mu,
                            scale=self.sig).rvs(N)
예제 #54
0
import numpy as np
import scipy.stats as stats
import matplotlib as mpl
import matplotlib.pyplot as plt

real_state = 9
realised_sig_mean = 9
lb = 0
ub = 10
state_space = np.arange(lb,ub+1, 1)
action_space = state_space
type = state_space
sigdis = stats.truncnorm(a=(lb - realised_sig_mean),
                         b=(ub - realised_sig_mean), loc=realised_sig_mean, scale=1)
priorb = stats.uniform(lb, ub)
def bay_up(prior, cond_prob, sig_prob):
    posterior = cond_prob*prior/sig_prob
    return posterior
def utility(prob, expected, realise): return -prob*abs(expected - realise)
posterior = []
for state in state_space:
    conp = sigdis.pdf(state)
    pri = priorb.pdf(state)
    sigp = pri
    posterior.append(bay_up(pri, conp, sigp))
norm_post = [float(i)/sum(posterior) for i in posterior] # normalized sum to 1
# for i in range(11):
#     print(i, norm_post[i])
prob_sigb = []
for i, state in enumerate(state_space):
    conprobb = sigdis.pdf(state)
 def _get_truncated_normal(self, mean=0, sd=1, low=-1, upp=1):
     return truncnorm( (low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd )
예제 #56
0
def normalize(mean, std_dev, low, up):
    l = (low / std_dev) - (mean / std_dev)
    h = (up / std_dev) - (mean / std_dev)
    # print(l)
    # print(h)
    return truncnorm(l, h, loc=mean, scale=std_dev)
예제 #57
0
def generate_ManningN_from_distribution(n_min, n_max, n_mean, n_std, nSamples):
    """
    Generate Manning's n values from a distribution. Currently the distribution is a truncated normal distribution.

    Parameters
    ----------
    n_min : float
        minimum value of Manning's n
    n_max : float
        maximum value of Manning's n
    n_mean : float
        mean value of Manning's n
    n_std : float
        std of Manning's n
    nSamples : int
        number of samples to generate

    Returns
    -------

    """

    #convert the user specified min, mean, max, and std to [a, b] on standard normal distribution
    a, b = (n_min - n_mean) / n_std, (n_max - n_mean) / n_std
    #print("a,b =", a, b)

    #call scipy's truncnorm to generate the samples (on the standard normal distribution
    samples = truncnorm(a=a, b=b, scale=1.0).rvs(size=nSamples)

    #convert the random values back to the normal distribution scale
    samples = samples * n_std + n_mean

    #print("samples = ", samples)
    print("min, max, and mean of samples = ", np.min(samples), np.max(samples), np.mean(samples))

    #make a plot to show the sample distribution
    plt.hist(samples, 11, facecolor='gray', alpha=0.75)

    # set the limit for the x and y axes
    plt.xlim([n_min, n_max])
    #plt.ylim([0, 45])

    # set x and y axes label and font size
    plt.xlabel('Manning\'s n', fontsize=16)
    plt.ylabel('Count', fontsize=16)

    # show the ticks on both axes and set the font size
    plt.tick_params(axis='both', which='major', labelsize=12)

    # set axis label format
    plt.gca().xaxis.set_major_formatter(StrMethodFormatter('{x:,.3f}'))
    plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))

    # show title and set font size
    plt.title('Histogram of sampled Manning\'s n values', fontsize=16)

    # show legend, set its location, font size, and turn off the frame
    #plt.legend(loc='lower left', fontsize=14, frameon=False)
    plt.show()

    return samples
예제 #58
0
파일: sim-04-sars.py 프로젝트: momacs/pram
def TN(a, b, mu, sigma, n=None):
    return truncnorm((a - mu) / sigma, (b - mu) / sigma, mu, sigma).rvs(n)
예제 #59
0
from scipy.stats import truncnorm

# Setup Faker
fake = Faker()
fake.add_provider(internet)
fake.add_provider(user_agent)
fake.add_provider(profile)

# Normally distribute ages between 18 and 100 with a mean age of 32.
age_min = 18
age_max = 100
age_mean = 32
age_sd = 15

age_dist = truncnorm((age_min - age_mean) / age_sd,
                     (age_max - age_mean) / age_sd,
                     loc=age_mean,
                     scale=age_sd)

# Persona combinations ordered from strongest affinity to latent interest.
personas = [
    'apparel_housewares_accessories', 'housewares_apparel_electronics',
    'footwear_outdoors_apparel', 'outdoors_footwear_housewares',
    'electronics_beauty_outdoors', 'beauty_electronics_accessories',
    'jewelry_accessories_beauty', 'accessories_jewelry_apparel'
]


class UserPool:
    def __init__(self):
        self.users = []
        self.active = []
예제 #60
0
    def __init__(self,
                 num_classes=1000,
                 aux_logits=True,
                 transform_input=False,
                 inception_blocks=None,
                 init_weights=None):
        super(Inception3, self).__init__()
        if inception_blocks is None:
            inception_blocks = [
                BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD,
                InceptionE, InceptionAux
            ]
        if init_weights is None:
            warnings.warn(
                'The default weight initialization of inception_v3 will be changed in future releases of '
                'torchvision. If you wish to keep the old behavior (which leads to long initialization times'
                ' due to scipy/scipy#11299), please set init_weights=True.',
                FutureWarning)
            init_weights = True
        assert len(inception_blocks) == 7
        conv_block = inception_blocks[0]
        inception_a = inception_blocks[1]
        inception_b = inception_blocks[2]
        inception_c = inception_blocks[3]
        inception_d = inception_blocks[4]
        inception_e = inception_blocks[5]
        inception_aux = inception_blocks[6]

        self.aux_logits = aux_logits
        self.transform_input = transform_input
        # self.Conv2d_1a_3x3 = conv_block(3, 32, kernel_size=3, stride=2)
        self.Conv2d_1a_3x3 = conv_block(3,
                                        32,
                                        kernel_size=3,
                                        stride=1,
                                        padding=35)
        # self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3)
        self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3, padding=1)
        self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1)
        self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1)
        self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3, padding=1)
        self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.Mixed_5b = inception_a(192, pool_features=32)
        self.Mixed_5c = inception_a(256, pool_features=64)
        self.Mixed_5d = inception_a(288, pool_features=64)
        self.Mixed_6a = inception_b(288)
        self.Mixed_6b = inception_c(768, channels_7x7=128)
        self.Mixed_6c = inception_c(768, channels_7x7=160)
        self.Mixed_6d = inception_c(768, channels_7x7=160)
        self.Mixed_6e = inception_c(768, channels_7x7=192)
        if aux_logits:
            self.AuxLogits = inception_aux(768, num_classes)
        self.Mixed_7a = inception_d(768)
        self.Mixed_7b = inception_e(1280)
        self.Mixed_7c = inception_e(2048)
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.dropout = nn.Dropout()
        self.fc = nn.Linear(2048, num_classes)
        if init_weights:
            for m in self.modules():
                if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
                    import scipy.stats as stats
                    stddev = m.stddev if hasattr(m, 'stddev') else 0.1
                    X = stats.truncnorm(-2, 2, scale=stddev)
                    values = torch.as_tensor(X.rvs(m.weight.numel()),
                                             dtype=m.weight.dtype)
                    values = values.view(m.weight.size())
                    with torch.no_grad():
                        m.weight.copy_(values)
                elif isinstance(m, nn.BatchNorm2d):
                    nn.init.constant_(m.weight, 1)
                    nn.init.constant_(m.bias, 0)