Exemplo n.º 1
0
    def generate_triplet_adaptive(dataset, bpr_optimizer):
        """
        generate negative instance using adaptive sampling
        sample from a pre-defined exponential distribution
        """
        t_i = random.choice(dataset.Abstract_Graph.nodes())
        neg_list = list(set(dataset.paper_abstract) - \
                        set(dataset.Abstract_Graph.neighbors(t_i)) - set([t_i]))

        # given a_i, sample its neighbor based on its weight value
        # idea of edge sampling
        neig_list = dataset.Abstract_Graph.neighbors(t_i)
        weight_list = [dataset.Abstract_Graph[t_i][nbr]['weight']
                       for nbr in neig_list]
        norm_weight_list = [float(w) / sum(weight_list)
                            for w in weight_list]
        t_j = np.random.choice(neig_list, 1, p=norm_weight_list)[0]

        # sample negative instance based on pre-defined exponential distribution
        t_t = 0
        if len(neg_list) > 0:
            norm_soft = softmax([bpr_optimizer.predict_score(t_i, ne, "dabstract")
                                 for ne in neg_list])
            t_t = np.random.choice(neg_list, 1, p=norm_soft)[0]
        else:
            t_t = np.random.choice(neig_list)
        yield t_i, t_j, t_t
Exemplo n.º 2
0
def softmax_forward_block(a_prev, w, b):
    h = np.dot(w, a_prev) + b
    a = softmax(h)

    cache = {'a_prev': a_prev, 'w': w, 'b': b}

    return a, cache
Exemplo n.º 3
0
    def run_op(self, argmap):
        import original
        import augmented

        # Original data just returns the actual samples
        origdata = utility.PickleData(original, 'sample').load()

        # Augmented saves a bunch more stuff.
        _, augmdata, tknprbs, loggies = utility.PickleData(
            augmented, 'sample').load()

        utility.assert_zero(origdata - augmdata)
        print(
            "Success, checked data of size {} against original sample".format(
                origdata.shape))

        for bi in range(augmdata.shape[0]):
            for pi in range(1, augmdata.shape[1]):
                logits = loggies[bi, :, pi]
                token = augmdata[bi, pi]
                probs = utility.softmax(logits)

                aprb = probs[token]
                bprb = tknprbs[bi, pi]

                utility.assert_small(aprb - bprb, epsilon=1e-4)

        print(
            "Checked correspondence between logit values and token probabilities"
        )
Exemplo n.º 4
0
 def feed_forward(self, X, y):
     '''
 Implementation of the Feedforward
 '''
     # Fonction d'activation
     g = lambda x: ut.tanh(x)
     Z = [None] * len(self.layer_sizes)
     input_layer = X
     for i in range(len(self.hidden_layer_sizes) + 1):
         # Multiplying input_layer by weights for this layer
         Z[i + 1] = np.dot(self.weights[i], input_layer) + self.bias[i]
         # Activation Function
         if (i == len(self.hidden_layer_sizes)):
             # Just for output layer softmax()
             # For calculating the loss
             self.A[i] = ut.softmax(Z[i + 1])
             # Derivative of softmax, returns a matrix
             self.df[i] = ut.softmax_gradient(Z[i + 1])
         else:
             # for the other layers tanh()
             self.A[i], self.df[i] = g(Z[i + 1])
         # Current output_layer will be next input_layer
         input_layer = self.A[i]
     error = ut.cross_entropy_loss(self.A[-1], y)
     return error, self.A[-1]
Exemplo n.º 5
0
    def generate_triplet_adaptive(dataset, bpr_optimizer):
        """
        generate negative instance using adaptive sampling
        sample from a pre-defined exponential distribution
        """
        d_i = random.choice(dataset.paper_list)
        neg_list = list(set(dataset.coauthor_list) - \
                        set(dataset.paper_authorlist_dict[d_i]))

        while True:
            if dataset.paper_authorlist_dict[d_i] != []:
                a_j = random.choice(dataset.paper_authorlist_dict[d_i])

                # sample negative instance based on pre-defined exponential distribution
                norm_soft = softmax([
                    bpr_optimizer.predict_score(d_i, ne, "pd")
                    for ne in neg_list
                ])
                a_t = np.random.choice(neg_list, 1, p=norm_soft)[0]
                yield d_i, a_j, a_t
                break

            else:
                d_i = random.choice(dataset.paper_list)
                neg_list = list(set(dataset.coauthor_list) - \
                                set(dataset.paper_authorlist_dict[d_i]))
Exemplo n.º 6
0
def compute_cost(X, Y, w, b, lambd, regularized):
    m = Y.shape[-1]
    Z = np.dot(w, X) + b
    A = softmax(Z)
    cost = -np.sum(Y * np.log(A)) / m

    if regularized == 1:
        cost += lambd * np.linalg.norm(w, 1) / m  # L1 Regularization
    elif regularized == 2:
        cost += lambd * np.linalg.norm(w) / m  # L2 Regularization

    return cost
    def fine_tune(self):
        # print self.input.shape
        # print self.W.shape

        output = numpy.dot(self.input, self.W.T) + self.b
        hidden_possible = softmax(output)
        d_y = self.label - hidden_possible

        print '     loss : ' + str(numpy.sum(d_y ** 2))

        self.W += self.lr * numpy.dot(d_y.T, self.input) / self.data_size
        # self.W += (self.learning_rate * numpy.dot(d_y.T, self.input) - self.learning_rate * 0.1 * self.W) / 10000

        self.b += self.lr * numpy.mean(d_y, axis=0)
Exemplo n.º 8
0
    def predict(self, X, Y):
        m = X.shape[-1]

        w = self.parameters['w']
        b = self.parameters['b']
        Z = np.dot(w, X) + b
        A = softmax(Z)

        self.Y_p = np.argmax(A, axis=0)
        Y_label = np.argmax(Y, axis=0)
        correct = (self.Y_p == Y_label)

        self.accuracy = np.sum(correct) / m

        return self.accuracy
Exemplo n.º 9
0
def softmax_propagate(X, Y, w, b, lambd, regularized):
    m = Y.shape[-1]
    Z = np.dot(w, X) + b
    A = softmax(Z)

    cost = compute_cost(X, Y, w, b, lambd, regularized)

    dw = np.dot((A - Y), X.T) / m + 2 * lambd * w / m
    db = np.sum((A - Y), axis=1, keepdims=True) / m

    if regularized == 2:
        dw += 2 * lambd * w / m
    elif regularized == 1:
        dw += lambd * np.sign(w) / m

    grad = {'dw': dw, 'db': db}

    return grad, cost
Exemplo n.º 10
0
    def inverse_predict(self):
        print 'Logistic Regression : self.W'
        print self.W
        print numpy.max(self.W)
        print numpy.min(self.W)
        print numpy.mean(self.W)
        print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'

        inverse_input = numpy.dot((1,0,0,0,0,0,0,0,0,0), self.W)
        # print inverse_input.shape
        # print inverse_input
        inverse_input_possible = softmax(inverse_input)
        inverse_input_sample = numpy_rng.binomial(n=1, p=inverse_input_possible)
        print inverse_input_sample

        f = open('inverse_log.txt', 'w')
        cPickle.dump(inverse_input_sample, f)
        f.close()

        return inverse_input_sample
    def get_emotion(self, face_gray):
        # Preprocess input image for emotion detection
        image_data = utility.resize_and_pad(face_gray, self.size_w,
                                            self.size_h, 0)
        image_data = np.array(image_data, dtype=np.float32)
        image_data = np.resize(image_data, self.input_shape)

        # Detect emotion
        result = self.session.run(None, {self.inputs[0].name: image_data})

        # Postprocess output data and draw emotion label
        scores = result[0][0]
        for i in range(len(scores)):
            scores[i] = max(scores[i],
                            1e-9)  # convert negative value to be 1e-9
        scores = utility.softmax(scores)
        class_index = np.argmax(scores)
        confidence = scores[class_index]
        color = self.colors[class_index]
        emotion = self.labels[class_index]
        return emotion, confidence, color
Exemplo n.º 12
0
    def generate_triplet_adaptive(dataset, bpr_optimizer):
        """
        generate negative instance using adaptive sampling
        sample from a pre-defined exponential distribution
        """
        a_i = random.choice(dataset.C_Graph.nodes())
        neg_list = list(set(dataset.coauthor_list) - \
                   set(dataset.C_Graph.neighbors(a_i)) - set([a_i]))

        # given a_i, sample its neighbor based on its weight value
        # idea of edge sampling
        neig_list = dataset.C_Graph.neighbors(a_i)
        weight_list = [dataset.C_Graph[a_i][nbr]['weight']
                       for nbr in neig_list]
        norm_weight_list = [float(w) / sum(weight_list)
                            for w in weight_list]
        a_j = np.random.choice(neig_list, 1, p=norm_weight_list)[0]

        # sample negative instance based on pre-defined exponential distribution
        norm_soft = softmax([bpr_optimizer.predict_score(a_i, ne, "pp")
                             for ne in neg_list])
        a_t = np.random.choice(neg_list, 1, p = norm_soft)[0]
        yield a_i, a_j, a_t
Exemplo n.º 13
0
 def predict(self, input_board):
     logits = self.model.predict(
         np.expand_dims(input_board, axis=0).astype('float64'))
     p = utility.softmax(
         logits)  # Apply softmax on the logits after prediction
     return p.squeeze()  # Remove the extra batch dimension
Exemplo n.º 14
0
	def forward(self, X):
		#Falta agregar la funcion de activacion
		Z1 = np.tanh( X.dot(self.W1) + self.b1 )
		Z2 = np.tanh( Z1.dot(self.W2) + self.b2 )
		Z3 = np.tanh( Z2.dot(self.W3) + self.b3 )
		return softmax( Z3.dot(self.W4) + self.b4), Z3, Z2, Z1
Exemplo n.º 15
0
 def predict(self, input):
     output = numpy.dot(input, self.W.T) + self.b
     hidden_possible = softmax(output)
     return hidden_possible