Exemplo n.º 1
0
 def learn(self, Xtrain, ytrain):
     self.weights = np.ones(Xtrain.shape[1])
     centropy = utils.cross_entropy(Xtrain, ytrain, self.weights) + self.regloss(self.regularizer)
     converged = False
     iters = 0
     while not converged:
         output = utils.sigmoid(np.dot(Xtrain, self.weights))
         error = output - ytrain
         self.weights = self.weights - self.alpha * (np.dot(Xtrain.T, error) + self.reggradient(self.regularizer))
         newcentropy = utils.cross_entropy(Xtrain, ytrain, self.weights) + self.regloss(self.regularizer)
         if abs(centropy - newcentropy) <= self.ep:
             converged = True
         centropy = newcentropy
         iters += 1
         if iters > self.max_iter:
             converged = True
    def __init__(self):

        with tf.name_scope('input'):
            self.x = tf.placeholder(tf.float32, shape = [None, IMG_WIDTH, IMG_HEIGHT, NUM_CHANNELS])
            self.y = tf.placeholder(tf.float32, shape = [None, IMG_WIDTH, IMG_HEIGHT, NUM_CLASSES])
            self.keep_prob = tf.placeholder(tf.float32)
            self.phase_train = tf.placeholder(tf.bool)

        with tf.name_scope('logits'):
            logits, self.variables = conv_net_model(self.x, self.keep_prob, self.phase_train)

        with tf.name_scope('cost'):
            self.cost = self._get_cost(logits)

        with tf.name_scope('cross_entropy_sum'):
            self.cross_entropy_sum = tf.reduce_sum(\
                                        tf.nn.softmax_cross_entropy_with_logits(\
                                            logits=tf.reshape(logits, [-1, NUM_CLASSES]),
                                            labels=tf.reshape(self.y, [-1, NUM_CLASSES])))

        with tf.name_scope('cross_entropy'):
        # Only computed for the sake of summary in TensorBoard
            self.cross_entropy = tf.reduce_mean(utils.cross_entropy(tf.reshape(self.y, [-1, NUM_CLASSES]),
                                    tf.reshape(utils.pixel_wise_softmax_2(logits), [-1, NUM_CLASSES])))

        with tf.name_scope('softmax_predicter'):
            #self.predicter = utils.pixel_wise_softmax_2(logits)
            self.predicter = tf.nn.softmax(logits, dim = 3)

        with tf.name_scope('accuracy'):
            self.correct_pred = tf.equal(tf.argmax(self.predicter, 3), tf.argmax(self.y, 3))
            self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
Exemplo n.º 3
0
 def learn(self, Xtrain, ytrain):
     self.weights = np.ones(Xtrain.shape[1])
     centropy = utils.cross_entropy(Xtrain, ytrain, self.weights)
     converged = False
     iters = 0
     while not converged:
         output = utils.sigmoid(np.dot(Xtrain, self.weights))
         # if use log-likelihood instead of cross-entropy, then take gradient, error = -error, updata rule is w = w + \delta(w)
         error = output - ytrain
         self.weights = self.weights - self.alpha * np.dot(Xtrain.T, error)
         newcentropy = utils.cross_entropy(Xtrain, ytrain, self.weights)
         if abs(centropy - newcentropy) <= self.ep:
             converged = True
         centropy = newcentropy
         iters += 1
         if iters > self.max_iter:
             converged = True
Exemplo n.º 4
0
    def loss(self, prediction_list, cache_list, label):
        ce_tuple = tuple(cross_entropy(prediction_list, label))
        ce_loss = sum(ce_tuple)

        argmax = lambda t: th.max(t, 1)[1]
        category_tuple = tuple(argmax(p) for p in prediction_list)
        indicator_tuple = tuple(c == label for c in category_tuple)
        reward, rl_loss = 0, 0
        for indicator, cache in reversed(zip(indicator_tuple, cache_list)):
            reward = reward + indicator
            rl_loss = rl_loss + self._location_network.loss(
                reward.float(), cache)

        value = (ce_loss + rl_loss) / self._T
        return value, ce_tuple
Exemplo n.º 5
0
b2=np.zeros((1,layer3))

epochs=1000
i=epochs
 
from utilities import forward_pass,relu,sigmoid,cross_entropy,sigmoid_backprop,relu_backprop

#TRAINING

lr=1
while(i>0):
    z1=forward_pass(X_train,w1,b1)
    a1=relu(z1)
    z2=forward_pass(a1,w2,b2)
    out=sigmoid(z2)
    loss=cross_entropy(y_train,out)
    dw2,db2=sigmoid_backprop(out,y_train,a1)
    w2=w2-lr*dw2
    b2=b2-lr*db2
    dw1,db1=relu_backprop(db2,w2,z1,X_train)
    w1=w1-lr*dw1
    b1=b1-lr*db1
    i-=1


#TESTING
    
z1=forward_pass(X_test,w1,b1)
a1=relu(z1)
z2=forward_pass(a1,w2,b2)
out=sigmoid(z2)