예제 #1
0
	def __init__(self, inputs, outputs, hiddenUnits, learningRate, momentumRate):
		self.H = hiddenUnits
		self.K = outputs
		self.D = inputs + 1
		self.lRate = learningRate
		self.mRate = momentumRate
		
		self.V = []
		self.W = []


		#Init W to small random numbers
		for h in range(self.H):
			nextRow = []
			for j in range(self.D):
				 nextRow.append(_uniform(-1,1))
			self.W.append(nextRow)
			
		#Init V to small random numbers
		for i in range(self.K):
			nextRow = []
			for h in range(self.H):
				nextRow.append(_uniform(-1,1))
			self.V.append(nextRow)
			
		
		self.lastDeltaW = []
		for h in range(self.H):
			self.lastDeltaW.append([0 for j in range(self.D)])
예제 #2
0
    def __init__(self,
                NNPreproc, 
                inputs, 
                outputs, 
                hiddenUnits, 
                learningRate=0.3, 
                momentumRate=0., 
                adaptiveLearning=False,
                adaptiveHistory=5,
                adaptiveAlpha=0.05,
                adaptiveBeta=2.0):
        
        if inputs < 1:
            raise ValueError, 'Network must have at least one input'
        
        if outputs < 1:
            raise ValueError, 'Network must have at least one output'
            
        if hiddenUnits < 1:
            raise ValueError, 'Network must have at least one hidden unit'
        
        if learningRate <= 0:
            raise ValueError, 'Learning rate must be above 0'
        
        if momentumRate < 0:
            raise ValueError, 'Momentum rate must be at least 0'
        
        
        self.H = hiddenUnits + 1 #need to have plus 1 for 1 in hidden layer
        self.K = outputs
        self.D = inputs + 1 #need to have plus 1 for 1 in input vector
        self.lRate = learningRate
        self.mRate = momentumRate
        self.aLearning = adaptiveLearning
        self.aHistory = adaptiveHistory
        self.aAlpha = adaptiveAlpha
        self.aBeta = adaptiveBeta
        self.aErrors = []
        self.o = []
        self.tanhMultiplier = 3.4318/3.
        self.NNPreproc = NNPreproc

        
        #Init V(hidden layer to output layer weights)  to small random numbers
        self.V = [[_uniform(-1,1)/100. for h in range(self.H)] \
                    for k in range(self.K)]
        
        
        #Init W(intput layer to hidden layer weights) to small random numbers
        self.W = [[_uniform(-1,1)/100. for d in range(self.D)] \
                    for h in range(self.H-1)]
            
        #Init last step to 0
        self.lastDeltaW = [[0 for d in range(self.D)] for h in range(self.H)]
예제 #3
0
    class Model(DiseaseModel):
        def __init__(self):
            DiseaseModel.__init__(self, [markerIndex], model)

        if model == DiseaseModel.HAPLOTYPE_MODEL:

            def __call__(self, allele):
                if allele == 0: return _uniform(0.0, 1.0) < wildTypeRisk
                else: return _uniform(0.0, 1.0) < mutantRisk
        else:

            def __call__(self, (a1, a2)):
                if (a1, a2) == (0, 0):
                    return _uniform(0.0, 1.0) < homozygoteWildTypeRisk
                elif a1 == 0 or a2 == 0:
                    return _uniform(0.0, 1.0) < heterozygoteRisk
                else:
                    return _uniform(0.0, 1.0) < homozygoteMutantRisk
예제 #4
0
파일: networks.py 프로젝트: sztal/sda-model
def rewire_edges(A, p=0.01, directed=False, copy=False):
    """Randomly rewire edges in an adjacency matrix with given probability.

    Parameters
    ----------
    A : (N, N) array_like
        An adjacency matrix.
    p : float
        Rewiring probability.
    directed : bool
        Is the graph directed.
    copy : bool
        Should copy of the adjacency array be returned.
    """
    if copy:
        A = A.copy()
    E = get_edgelist(A, directed=directed)
    loop = range(0, E.shape[0]) if directed else range(0, E.shape[0], 2)
    for u in loop:
        rand = _uniform(0, 1)
        if rand <= p:
            i, j = E[u, :2]
            if not directed and rand <= p / 2:
                new_i = j
            else:
                new_i = i
            idx = np.nonzero(np.where(A[new_i, :] == 0, 1, 0))[0]
            idx = idx[idx != new_i]
            if idx.size == 0:
                continue
            new_j = _choice(idx)
            A[i, j] = 0
            A[new_i, new_j] = 1
            if not directed:
                A[j, i] = 0
                A[new_j, new_i] = 1
    return A
예제 #5
0
    def test_random_mse(self):
        """`loss.mse`: Randomized Validator.

        Tests the behavior of `mse` by feeding it randomly generated arguments.

        Raises:
            AssertionError: If `mse` needs debugging.

        """
        for i in range(self.n_tests):
            Y = _random_matrix((self.n, 1), max_val=self.max_mean)
            """float: Random-valued observations."""
            Y_hat = _random_matrix((self.n, 1), max_val=self.max_mean)
            """float: Random-valued predictions."""
            delta_Y = abs(Y - Y_hat)
            """float: Distance between predictions and observations."""
            squared_sum_delta_Y = _np.linalg.norm(delta_Y[1:, 0])**2
            """float: Sum of the squares of all `delta_Y` values."""

            # To ensure that the coercion does not result in the square of a
            # negative number, we can use the mean of the upper-bound
            # `squared_sum_delta_Y` as insurance that the computation will only
            # work with positive numbers.
            err = _uniform((squared_sum_delta_Y + 1.0) / self.n,
                           (squared_sum_delta_Y + self.max_mean) / self.n)
            """float: MSE to coerce."""

            # Coerce MSE by changing the first prediction to a strategic choice
            # and mathematically guaranteeing
            Y_hat[0, 0] = (_np.sqrt(self.n * err - squared_sum_delta_Y) -
                           Y[0, 0]) * -1.0

            result = mse(Y, Y_hat)
            """float: Test input."""

            self.assertAlmostEqual(result, err)
예제 #6
0
 def __call__(self, allele): 
     if allele==0: return _uniform(0.0,1.0) < wildTypeRisk
     else:         return _uniform(0.0,1.0) < mutantRisk
예제 #7
0
    def __init__(self, 
                inputs, 
                outputs, 
                hiddenUnits, 
                learningRate=0.3, 
                momentumRate=0., 
                regression=True,
                adaptiveLearning=False,
                adaptiveHistory=5,
                adaptiveAlpha=0.05,
                adaptiveBeta=2.0):
        """
        inputs - number of input this NN has
        outputs - number of outputs this NN has
        hiddenUnits - number of hiddenUnits to use in this NN
        learningRate - size of step in error space
        momentumRate - if momentumRate > 0 then steps in error space will 
                        have momentum from preivous epocs
        regression - if False assume a classification network and 
                        apply softmax function to outputs before 
                        they are returned
        adaptiveLearning - if True, backpropagation will use an adaptive 
                        learning rate.  The initial rate will be the 
                        value from learningRate
        adaptiveHistory - number of epochs to determine average error to
                        set learningRate
        adaptiveAlpha - if the average error rate decreases, learning 
                        rate will increase by this amount
        adaptiveBeta - if the average error rate increases, learning rate
                        will decrease by a factor of this value
        """
        
        if inputs < 1:
            raise ValueError, 'Network must have at least one input'
        
        if outputs < 1:
            raise ValueError, 'Network must have at least one output'
            
        if hiddenUnits < 1:
            raise ValueError, 'Network must have at least one hidden unit'
        
        if learningRate <= 0:
            raise ValueError, 'Learning rate must be above 0'
        
        if momentumRate < 0:
            raise ValueError, 'Momentum rate must be at least 0'
        
        
        self.H = hiddenUnits + 1 #need to have plus 1 for 1 in hidden layer
        self.K = outputs
        self.D = inputs + 1 #need to have plus 1 for 1 in input vector
        self.lRate = learningRate
        self.mRate = momentumRate
        self.aLearning = adaptiveLearning
        self.aHistory = adaptiveHistory
        self.aAlpha = adaptiveAlpha
        self.aBeta = adaptiveBeta
        self.aErrors = []
        self.regression = regression

        
        #Init V(hidden layer to output layer weights)  to small random numbers
        self.V = [[_uniform(-1,1)/100. for h in range(self.H)] \
                    for k in range(self.K)]
        
        
        #Init W(intput layer to hidden layer weights) to small random numbers
        self.W = [[_uniform(-1,1)/100. for d in range(self.D)] \
                    for h in range(self.H-1)]
            
        #Init last step to 0
        self.lastDeltaW = [[0 for d in range(self.D)] for h in range(self.H)]
예제 #8
0
 def __call__(self, allele):
     if allele == 0: return _uniform(0.0, 1.0) < wildTypeRisk
     else: return _uniform(0.0, 1.0) < mutantRisk
예제 #9
0
def noise_generator(frequency, sample_rate):
    while True:
        yield _uniform(-1, 1)
예제 #10
0
def uniform(minimum, maximum):
    """Identical to uniform found in random module."""
    return _uniform(minimum, maximum)