Exemple #1
0
    def weightSelecter(self, train=False):
        classi = self.classifier
        classi.weight = (1 / float(classi.actualOutput.shape[0])) * np.ones(
            shape=(classi.actualOutput.shape[0], 1))
        best = 5.0
        for i in range(0, self.maxIteration):
            for j in range(0, len(classi.layers)):
                classi.layers[j].cofficient = np.random.rand(
                    classi.layers[j].cofficient.shape[0],
                    classi.layers[j].cofficient.shape[1])
            classi.findEstimates()
            if classi.cost < best:
                best = classi.cost
                self.prediction = classi.predict(self.test)

            classifierWeight = np.log((2.5 - classi.cost) / classi.cost)
            self.classifierWeight.append(classifierWeight)
            costMatrix = (self.classifier.analyseObservation(
                self.trainCopy))['cost'].as_matrix()
            y = sigmoid(classifierWeight * (costMatrix.reshape(
                (costMatrix.shape[0], 1)) > 0.55).astype(int))
            if (np.isnan(y)).any(): print "weight comes out to be nan"
            newWeight = classi.weight * y
            classi.weight = newWeight / sum(newWeight)

        print best, classi.input.shape[0]
        return self.prediction
Exemple #2
0
def costMatrix(p):
    mid = len(p) / 2
    p1 = p[0:mid]
    p2 = p[mid:2 * mid]
    prob = np.transpose(np.matrix(np.vstack((p1, p2))))
    a = (sigmoid(np.dot(data, prob)))
    b = 1 - np.sum(a, axis=1)
    probability = np.matrix(np.hstack((a, b)))
    logp = np.log(probability)
    cost = -np.sum(np.multiply(target, logp), axis=1)
    return cost
Exemple #3
0
def sum1(p):
    mid = len(p) / 2
    p1 = p[0:mid]
    p2 = p[mid:2 * mid]
    prob = np.transpose(np.matrix(np.vstack((p1, p2))))
    t = np.squeeze(
        np.array(
            np.ones(shape=(data.shape[0], 1)) -
            np.sum((sigmoid(np.dot(data, prob))), axis=1)))
    #print t
    return t
Exemple #4
0
    def nniterate(self, train=False):
        classi = self.classifier
        classi.weight = (1 / float(classi.actualOutput.shape[0])) * np.ones(
            shape=(classi.actualOutput.shape[0], 1))
        for i in range(0, self.maxIteration):
            for j in range(0, len(classi.layers)):
                classi.layers[j].cofficient = np.random.rand(
                    classi.layers[j].cofficient.shape[0],
                    classi.layers[j].cofficient.shape[1])
            classi.findEstimates()
            classifierWeight = np.log((2.5 - classi.cost) / classi.cost)
            tes = classi.predict(self.test)
            self.prediction = tes * classifierWeight + self.prediction
            self.classifierWeight.append(classifierWeight)

            costMatrix = (self.classifier.analyseObservation(
                self.trainCopy))['cost'].as_matrix()
            y = sigmoid(classifierWeight * (costMatrix.reshape(
                (costMatrix.shape[0], 1)) > 0.55).astype(int))
            if (np.isnan(y)).any(): print "weight comes out to be nan"
            newWeight = classi.weight * y
            classi.weight = newWeight / sum(newWeight)
        self.prediction = self.prediction.div(np.sum(self.prediction, axis=1),
                                              axis=0)
        pred = self.prediction
        if train:
            self.trainCopy.index = self.prediction.index
            pred = self.prediction.join(self.trainCopy,
                                        how='left',
                                        rsuffix='_t')
            pred['cost'] = np.log(pred['high']) * pred['high_t'] + np.log(
                pred['medium']) * pred['medium_t'] + np.log(
                    pred['low']) * pred['low_t']
            print np.sum(pred['cost']
                         ) / self.trainCopy.shape[0], self.trainCopy.shape[0]

        return pred
Exemple #5
0
        np.array(
            np.ones(shape=(data.shape[0], 1)) -
            np.sum((sigmoid(np.dot(data, prob))), axis=1)))
    #print t
    return t


cons = ({
    'type': 'ineq',
    'fun': lambda p: sum1(p)
}, {
    'type':
    'ineq',
    'fun':
    lambda p: np.squeeze(
        np.array(sigmoid(np.dot(data, p[0:len(var)].reshape(len(var), 1)))))
}, {
    'type':
    'ineq',
    'fun':
    lambda p: np.squeeze(
        np.array(
            sigmoid(np.dot(data, p[len(var):2 * len(var)].reshape(len(var), 1))
                    )))
})

# t= np.random.rand(len(var),3)
# r=np.sum(sigmoid(np.dot(data,t)),axis=1,keepdims=True)-np.ones(shape=(data.shape[0],1))
pinitialize = np.random.rand(len(var) * 2)
pinitialize[0] = -6.9
pinitialize[0 + len(var)] = -6.9