Example #1
0
def regression(alpha, matrix, epsilon):
        
        a=.1  #How do we determine a and b?
        b=10
        size= matrix.shape[1]-1 #don't want to include last column (y's)
        theta= cf.randomThetas(a,b,size)
        
      
        temp= np.copy(theta)
        derVector=np.ones(size)
        currentRowIndex= 0
        grad= 199 #Just to ensure enters while loop
        iterations=0
       
        while iterations < 10000: # Need to update iteration value
                iterations+=1
                theta= np.copy(temp) #When you enter loop update theta to new values Note: this could also be at end of while loop just depends.  
                currentRow= np.array(matrix[currentRowIndex:currentRowIndex+1,:matrix.shape[1]-1]) #get row from matrix make into array
                currentRow= currentRow[0] # Want to make 1d array currently 2d 
                
                rowDeviation = cf.deviation(currentRow, theta)
                for i in range(size):
                        derVector[i]=cf.costDeriv(rowDeviation, theta[i])
                        temp[i]= theta[i] -(float(alpha)/size)*derVector[i] 
                        
                if currentRowIndex==matrix.shape[0]-1:
                        courrentRowIndex=0
                else: 
                        currentRowIndex+=1
                print costFunction(theta, currentRow, matrix, size)
        return theta
Example #2
0
    def __init__(self, input, rng, n_input, n_reduced, n_reconstructed, sparsity_param, beta=0.01, activation=theano.tensor.nnet.sigmoid):
        self.input = input
        self.sparsity_param = sparsity_param
        self.beta = beta

        self.reducedLayer = ReducedLayer(
            input=input,
            rng=rng,
            activation=activation,
            n_input=n_input,
            n_reduced=n_reduced
        )

        self.reconstructionLayer =  ReconstructionLayer(
            input=self.reducedLayer.output,
            rng=rng,
            activation=activation,
            n_reduced=n_reduced,
            n_reconstructed=n_reconstructed
        )  

        self.params = self.reducedLayer.params + self.reconstructionLayer.params
        self.reconstruction = self.reconstructionLayer.output

        self.cost = Cost.squared_error_loss(self.reducedLayer.input, self.reconstructionLayer.output)
        self.kul_leib = self.beta * Cost.kullback_leibler_divergence(self.sparsity_param, self.reducedLayer.output)
Example #3
0
    def Optimal_Data(self, c_list, R_list, thres_CR):
        self.c_list = c_list
        self.R_list = R_list
        self.thres_CR = thres_CR
        res = pd.DataFrame()
        print('Generate optimal test plans data for ' + self.name + ' RDT')
        cR_list = np.array([(c, R) for c in self.c_list for R in self.R_list])
        CR_list = []
        #PR_list = []
        AP_list = []
        RDT_list = []
        RG_list = []
        RG_exp_list = []
        WS_list = []
        WS_exp_list = []
        WS_failureprob_list = []
        cost_list = []
        n_optimal_list = []
        for i in range(len(cR_list)):
            self.c = cR_list[i][0]
            self.R = cR_list[i][1]
            self.n = Optimal.Optimal(self.name, self.pi).Optimal_Sample_Size(self.c, self.R, self.thres_CR)
            n_optimal_list.append(self.n)
            CR_list.append(Risk.Risk(self.name, n = self.n, c = self.c, pi = self.pi, R = self.R).Consumer_Risk())
            #PR_list.append(Risk.Risk(self.name, n = self.n, c = self.c, pi = self.pi, R = self.R).Producer_Risk())
            AP_list.append(Risk.Risk(self.name, n = self.n, c = self.c, pi = self.pi, R = self.R).Acceptance_Prob())
            RDT_list.append(Cost.Cost(self.name, n = self.n, c = self.c, pi = self.pi, R = self.R).RDT(self.cost_fix, self.cost_var))
            RG_list.append(Cost.Cost(self.name, n = self.n, c = self.c, pi = self.pi, R = self.R).Reliability_Growth(self.cost_reliability_growth))
            RG_exp_list.append(RG_list[i] * (1 - AP_list[i]))
            WS = Cost.Cost(self.name, n = self.n, c = self.c, pi = self.pi, R = self.R).Warranty(self.sales_volume, self. cost_warranty)
            WS_list.append(WS[0])
            WS_failureprob_list.append(WS[1])
            WS_exp_list.append(WS_list[i] * AP_list[i])
            cost_list.append(RDT_list[i] + RG_exp_list[i] + WS_exp_list[i])

        res['n'] = n_optimal_list
        res['c'] = cR_list[:, 0]
        res['R'] = cR_list[:, 1]
        res['CR'] = CR_list
        #res['PR'] = PR_list
        res['AP'] = AP_list
        res['RDT_Cost'] = RDT_list
        res['Reliabiltiy_Growth_Cost'] = RG_list
        res['Reliabiltiy_Growth_Cost_exp'] = RG_exp_list
        res['Warranty_Services_Cost'] = WS_list
        res['Warranty_Services_Failure_Probability'] = WS_failureprob_list
        res['Warranty_Services_Cost_exp'] = WS_exp_list
        res['Cost_exp'] = cost_list

        return res
Example #4
0
    def __init__(self,
                 input,
                 labels,
                 n_in=None,
                 n_out=None,
                 weights=None,
                 bias=None):
        if n_in is not None and n_out is not None:
            weights = np.zeros((n_in, n_out), dtype=theano.config.floatX)
            bias = value = np.zeros((n_out, ), dtype=theano.config.floatX)

        self.weights = theano.shared(value=weights,
                                     name='weights',
                                     borrow=True)
        self.bias = theano.shared(value=bias, name='bias', borrow=True)

        self.input = input
        self.labels = labels
        self.predictedHotOne = T.nnet.softmax(
            T.dot(self.input, self.weights) + self.bias)
        self.predictions = T.argmax(self.predictedHotOne, axis=1)

        self.params = [self.weights, self.bias]
        self.cost = Cost.negative_log_likelihood(self.predictedHotOne,
                                                 self.labels)
        self.missclassified = T.mean(T.neq(self.predictions, self.labels))
Example #5
0
    def __init__(self, input, labels, n_in=None, n_out=None, weights=None, bias=None):
        if n_in is not None and n_out is not None:
            weights = np.zeros(
                (n_in, n_out),
                dtype=theano.config.floatX
            )
            bias = value=np.zeros(
                (n_out,),
                dtype=theano.config.floatX
            )

        self.weights = theano.shared(
            value=weights,
            name='weights',
            borrow=True
        )
        self.bias = theano.shared(
            value=bias,
            name='bias',
            borrow=True
        )

        self.input = input
        self.labels = labels
        self.predictedHotOne = T.nnet.softmax(T.dot(self.input, self.weights) + self.bias)
        self.predictions = T.argmax(self.predictedHotOne, axis=1)

        self.params = [self.weights, self.bias]
        self.cost = Cost.negative_log_likelihood(self.predictedHotOne, self.labels)
        self.missclassified = T.mean(T.neq(self.predictions, self.labels))
 def evaluate(self, solution):
     y = solution.variables
     solution.objectives[:] = [
         -Res.Res(y, self.d, self.hStar),
         Cost.Cost(y, self.d)
     ]
     solution.constraints[:] = Constraint.Constraint(y, self.d, self.hStar)
Example #7
0
 def fitness(self, x):
     from epanettools import epanet2 as et
     from epanettools.epanettools import EPANetSimulation, Node, Link, Network, Nodes, \
         Links, Patterns, Pattern, Controls, Control  # import all elements needed
     d = EPANetSimulation('/home/varsha/Documents/Project.inp')
     f1 = Cost.Cost(x)
     f2 = PRI.PRI(x, d)
     return [f1, f2]
Example #8
0
def regression (alpha, epsilon, gamma, secondsToRun, data, ridge):
	if not ridge:
		gamma = 0
	size = data[0].size - 1
	thetas = Cost.randomThetas(0,0, size) ## make a random list of thetas
	cur = 0
	timeout = time.time() + secondsToRun
	itera = 0
	previousCost = 0
	while True:
		va = np.asarray(data[cur])
		v = va[0]
		thetaT = np.zeros(size)
		dev = Cost.deviation(v, thetas)
		cost = Cost.costFunction(thetas, data, gamma)
		if abs(cost - previousCost) < epsilon or timeout < time.time():
			break
		for i in range(size):
			thetaT[i] = (1 - alpha * gamma) * thetas[i] - alpha * Cost.costDeriv(dev, v[i])
			
		
		thetas = thetaT
		
		if cur < data.shape[0] - 1: # I don't think this is the number you want. You want the number of rows right?
			cur += 1
		else:
			cur = 0
			itera += 1
		##print costFunction(thetas, data[cur], )
	##print cur
	##plt.plot(range(len(lst)), lst,'ro')
	##plt.axis([0, len(lst), 0, 40])
	##plt.show()  ##plot the size of gradient
		temp = cost
		previousCost = cost
	return thetas
Example #9
0
# coding=utf-8
import requests
import json
import Cost
import Hashgain

amount = int(raw_input('Please input the amount of machine:'))

x = Cost.initialize(amount)
y = float(Hashgain.daily_profit(amount))
days = float(x[1])/(y - float(x[0]))

print "Your net profit per day is %.2f" % (y - float(x[0]))
print "Your gross profit is %.2f " % y
print "The first profit will be got after %.1f days" % days
Example #10
0
 def evaluate(self, solution):
     y = solution.variables
     solution.objectives[:] = [PRI.PRI(y, self.d), Cost.Cost(y, self.d)]
     solution.constraints[:] = Constraint.Constraint(y, self.d)
Example #11
0
 def cost(self, groundTruth):
     return Cost.negative_log_likelihood(self.predictedHotOne, groundTruth)
Example #12
0
 def buildCostMap(self,testMap):
     cost=Cost()
     costMap=cost.buildCostMap(testMap)
     return costMap
Example #13
0
File: test.py Project: ershook/hw1
def main():
	a = np.ones(10)
	print cf.deviation(a, a)
	print cf.gradient(10, np.array([1,2,3]))
Example #14
0
import numpy
import Cost, GradientDescent

m = 10000
s = 0
W_perfect = numpy.matrix([2, 1])
b_perfect = float(1.5)

X1_input = numpy.matrix(numpy.random.uniform(-100, 100, (m, 1)))
X2_input = numpy.array(X1_input) * numpy.array(X1_input)
X_input = numpy.append(X1_input, X2_input, axis=1)
y_input = numpy.matrix(
    numpy.random.normal((X_input @ W_perfect.T) + b_perfect, s))

cost = Cost.MSE(X_input, y_input)
[W_gd, b_gd, nIter] = GradientDescent.Solve(cost,
                                            learning_rate=1e-1,
                                            max_iterations=1e8,
                                            epsilon=1e-16,
                                            debug_step=10000)
[W_as, b_as] = cost.AnalyticSolve()

print('**********************************************')
print('Input parameters:', W_perfect, b_perfect)
print('Gradient descent:', W_gd, b_gd, '(' + str(nIter) + ')')
print('Analytical      :', W_as, b_as)
print('**********************************************')

data = ''
for i in range(X_input.shape[0]):
Example #15
0
 def buildCostMap(self, testMap):
     cost = Cost()
     costMap = cost.buildCostMap(testMap)
     return costMap
Example #16
0
# coding=utf-8
import requests
import json
import Cost
import Hashgain

amount = int(raw_input('Please input the amount of machine:'))

x = Cost.initialize(amount)
y = float(Hashgain.daily_profit(amount))
days = float(x[1]) / (y - float(x[0]))

print "Your net profit per day is %.2f" % (y - float(x[0]))
print "Your gross profit is %.2f " % y
print "The first profit will be got after %.1f days" % days
import sys
import matplotlib.pyplot as plt
import Cost

imgL_path = "data/left.png"
imgR_path = "data/right.png"
inputL_img = imread(imgL_path).astype(np.float32)
inputR_img = imread(imgR_path).astype(np.float32)

D = 192
imgL = torch.FloatTensor(inputL_img).cuda()
imgR = torch.FloatTensor(inputR_img).cuda()
imgL = torch.mean(imgL, 2, keepdim=True)
imgR = torch.mean(imgR, 2, keepdim=True)
cost = torch.zeros_like(imgL)
cost = torch.unsqueeze(cost, 2)
cost = cost.repeat(1, 1, D, 1)
print(cost.shape)

torch.cuda.synchronize()
start_time = time.time()
Cost.Intrinsic_Cost(imgL, imgR, cost, 7)
torch.cuda.synchronize()
deltatime = time.time() - start_time
print("SAD Time: " + str(deltatime))

cost = torch.squeeze(cost, 3)
depth = torch.argmin(cost, 2)
depth = depth.cpu().numpy()
plt.imsave("data/depth.png", depth, cmap='plasma', vmin=0, vmax=D)