コード例 #1
0
def test(model, path):
    triplets = load_resnet(path)
    #print(triplets[0:3])
    new_triplets = triplets[:25]
    good_pic_batch = np.array([val[1] for val in new_triplets])
    bad_pic_batch = np.array([val[2] for val in new_triplets])
    caption_batch = np.array([val[0] for val in new_triplets])
    print(good_pic_batch.shape, caption_batch.shape)

    good_pic_pred = model(good_pic_batch)
    bad_pic_pred = model(bad_pic_batch)
    good_pic_pred = good_pic_pred / mg.sqrt(mg.sum(mg.power(good_pic_pred, 2)))
    bad_pic_pred = bad_pic_pred / mg.sqrt((mg.sum(mg.power(bad_pic_pred, 2))))
    print(good_pic_pred.shape)

    # good_pic_pred = good_pic_pred.reshape(1600, 1, 1)
    # bad_pic_pred = bad_pic_pred.reshape(1600, 1, 1)
    # caption_batch = caption_batch.reshape(1600, 1, 1)

    Sgood = (good_pic_pred * caption_batch).sum(axis=-1)
    Sbad = (bad_pic_pred * caption_batch).sum(axis=-1)
    print(Sgood.shape, Sbad.shape)
    # Sgood = Sgood.reshape(32, 50)
    # Sbad = Sbad.reshape(32, 50)

    #loss = margin_ranking_loss(Sgood, Sbad, 1, 0.1)
    acc = accuracy(Sgood.flatten(), Sbad.flatten())
    print(acc)
コード例 #2
0
def negative_log_likelihood(outputs, targets, *, weights=None):
    """ Returns the (weighted) negative log-likelihood loss between outputs and targets.
    
    Note that this does not compute a softmax, so you should input log-probabilities to this.
    See ``softmax_cross_entropy`` if you need your loss to compute a softmax.

    Parameters
    ----------
    outputs : mygrad.Tensor, shape=(N, C)
        The C log probabilities for each of the N pieces of data.
    
    targets : Union[mygrad.Tensor, Sequence[int]], shape=(N,)
        The correct class indices, in [0, C), for each datum.

    weights : Union[mygrad.Tensor, Sequence[Real]], optional (default=None)
        The weighting factor to use on each class, or None.
    
    Returns
    -------
    mygrad.Tensor, shape=()
        The average (weighted) negative log-likelihood.
    """
    if isinstance(targets, Tensor):
        targets = targets.data

    label_locs = (range(len(targets)), targets)
    factors = weights[targets] if weights is not None else np.ones_like(
        targets)
    return -sum(outputs[label_locs] * factors) / sum(factors)
コード例 #3
0
 def __call__(self, x):
     N, D = x.shape
     # mini-batch mean
     mu = (1. / N) * mg.sum(x, axis=0)
     # mini batch variance
     sqr_mu = (x - mu)**2
     var = (1. / N) * mg.sum(x, axis=0)
     # normalize
     xhat = (x - mu) / (mg.sqrt(var + self.eps))
     return mg.matmul(xhat, self.gamma.T) + self.beta
コード例 #4
0
def test_softmax_focal_loss(num_datum, num_classes, alpha, gamma, data, grad,
                            target_type):
    scores = data.draw(
        hnp.arrays(shape=(num_datum, num_classes),
                   dtype=float,
                   elements=st.floats(1, 100)))
    assume((abs(scores.sum(axis=1)) > 0.001).all())

    scores_mygrad = Tensor(scores)
    scores_nn = Tensor(scores)

    truth = np.zeros((num_datum, num_classes))
    targets = data.draw(
        st.tuples(*(st.integers(0, num_classes - 1)
                    for i in range(num_datum))))
    truth[range(num_datum), targets] = 1
    targets = target_type(targets)

    probs = softmax(scores_mygrad)
    mygrad_focal_loss = sum(
        truth * (-alpha * (1 - probs + 1e-14)**gamma * log(probs))) / num_datum
    mygrad_focal_loss.backward(grad)

    nn_loss = softmax_focal_loss(scores_nn, targets, alpha=alpha,
                                 gamma=gamma).mean()
    nn_loss.backward(grad)

    assert isinstance(nn_loss, Tensor) and nn_loss.ndim == 0
    assert_allclose(nn_loss.data, mygrad_focal_loss.data, atol=1e-4, rtol=1e-4)
    assert_allclose(scores_nn.grad, scores_mygrad.grad, atol=1e-4, rtol=1e-4)

    nn_loss.null_gradients()
    assert scores_nn.grad is None
コード例 #5
0
def similarity(caption, image):
    """
    description: checks to see how closely connected the caption is to the image
    :param caption: [numpy.ndarray] shape(M, 50)
    :param image: [numpy.ndarray] shape = (M,50)
    :return: the similarities of the 2 embedded variables  [numpy.ndarray] shape = (50,) Believe
    """
    return mg.sum((caption*image), axis = 1)
コード例 #6
0
def normalize(arr):
    """
    description:
        It should take in an array and normalize it  by dividing by the magnitude of the vector.
        The resulting array is the unit vector
    :param arr: [np.ndarray] shape = (M, 50)
    :return: [np.array] shape = (M, 50)
    """
    return arr/(mg.sqrt(mg.sum(arr**2)))
コード例 #7
0
def mr_loss(model, triple):
    """
    Returns the margin ranking loss, given two image embedding vectors and a "good" caption.

    Parameters
    ----------
    model : Img2Caption
        The model used to convert the image descriptors to word embeddings.
    
    triple : np.ndarray(tuple) - shape(num_tuples, 3)
        A numpy array containing tuples with three elements: the descriptor of a "good" image,
        the caption embedding corresponding to that image, and the descriptor of a "bad" image.

    Returns
    -------
    margin_ranking_loss : mg.Tensor
        The margin ranking loss of the similarities (dot products) between the word embeddings for:
            the "good" image and "good" caption,
            the "good" image and "bad image".
    """
    # S_good = mg.dot(triple[1], model(triple[0])))
    # S_bad = mg.dot(triple[1], model(triple[2])))
    # margin_ranking_loss(S_good, S_bad, y, margin)

    good_images = []
    good_captions = []
    bad_images = []

    for good_img, good_cap, bad_img in triple:
        good_images.append(good_img)
        good_captions.append(good_cap)
        bad_images.append(bad_img)

    good_images = np.array(good_images)
    good_captions = np.array(good_captions)
    bad_images = np.array(bad_images)

    return margin_ranking_loss(
        mg.sum(good_captions * model(good_images), axis=1),
        mg.sum(good_captions * model(bad_images), axis=1),
        1,
        0.1,
    )
コード例 #8
0
 def __call__(self, x):
     N = x.shape[0]
     # mini-batch mean
     mu = mg.expand_dims(x.mean(axis=1), axis=1)
     # mini batch variance
     sqr_mu = (x - mu)**2
     var = mg.expand_dims((1. / N) * mg.sum(x, axis=1), axis=1)
     # normalize
     xhat = (x - mu) / (mg.sqrt(var + self.eps))
     return mg.matmul(xhat, self.gamma.T) + self.beta
コード例 #9
0
def cos_sim(v1, v2):
    '''

    Calculates the cosine similarity between two vectors

    Parameters
    ----------
    v1 : vector of shape (1,M)
    v2 : vector of shape (1,M)

    Returns
    -------
    mygrad.Tensor, shape=(N, 1)
        The model outputs.
    '''
    v1_sumsq = mg.sum(v1**2)
    v2_sumsq = mg.sum(v2**2)
    v1_mag = mg.sqrt(v1_sumsq)
    v2_mag = mg.sqrt(v2_sumsq)
    v1_norm = v1 / v1_mag
    v2_norm = v2 / v2_mag
    return mg.sum((v1_norm * v2_norm), axis=1)
コード例 #10
0
    def __call__(self, x):
        """ The model's forward pass functionality.
        
        Parameters
        ----------
        x : numpy.ndarray, shape = (M,512)
            M is the number of rows
            
        Returns
        -------
        encoded : numpy.ndarray, shape = (M,50)
        
        """

        unnorm_ans = self.dense1(x)

        # We have to turn the output into a unit vector by dividing by the sum of the squares of the unnormalized result
        return unnorm_ans / (mg.sqrt(mg.sum(unnorm_ans ** 2, axis=1, keepdims=True)))
コード例 #11
0
optim = SGD(model.parameters, learning_rate=lr, momentum=momentum)

plotter, figs, axes = create_plot(metrics=["loss"])

epochs = 5
batch_size = 32
train, validation = rohan_func()  # List(Tuple[]), Shape (N,)
for ep in range(epochs):
    for batch in range(len() // batch_size):

        d_img = train[::, 0]
        w_good = train[::1]
        d_bad = train[::, 2]

        w_bad = model(d_bad)  # Shape (32, 50)
        w_img = model(d_img)  # Shape (32, 50)

        dot_good = mg.sum(w_img * w_good, axis=1)  # Shape (32,)
        dot_bad = mg.sum(w_img * w_bad, axis=1)  # Shape (32,)
        loss = margin_ranking_loss(dot_good, dot_bad, margin=0.1)

        loss.backward()

        optim.step()

        loss.null_gradients()

        plotter.set_train_batch(metrics={"loss": loss}, batch_size=batch_size)

    plotter.set_train_epoch()
コード例 #12
0
def l1_loss(preds,ans):
    l_val = mg.sum(mg.abs(preds-ans))
   # print(l_val)
    row,col = preds.shape
    return l_val/row
コード例 #13
0
 def CrossEntropy(self,y_real,y_pred,eps=1e-10):
     y_pred = mg.clip(y_pred, eps, 1. - eps)
     N = y_pred.shape[0]
     return -mg.sum(y_real*mg.log(y_pred+1e-9))/N
コード例 #14
0
def train(model,
          num_epochs,
          margin,
          triplets,
          learning_rate=0.1,
          batch_size=32):
    """ trains the model 
        
        Parameters
        ----------
        
        model -  Model
            an initizized Model class, with input and output dim matching the image ID(512) and the descriptor (50) 
        
        num_epochs - int
            amount of epochs
            
        margin - int
            marhine for the margine ranking loss
            
        triplets 
            triplets created with the data from all_triplets(path)
        
        learning_rate(optional) - int
            learning rate of SDG
            
        batch_size(optional) - int
            the batch size
            

        Returns
        -------
        it trains the model by minimizing the loss function
        
        """
    optim = SGD(model.parameters, learning_rate=learning_rate)
    triplets = load_resnet(r"data\triplets")
    #print(triplets[0:3])
    images = utils.get_img_ids()

    for epoch_cnt in range(num_epochs):
        idxs = np.arange(len(images))
        np.random.shuffle(idxs)

        for batch_cnt in range(0, len(images) // batch_size):

            batch_indices = idxs[batch_cnt * batch_size:(batch_cnt + 1) *
                                 batch_size]
            triplets_batch = [triplets[index] for index in batch_indices]
            #print(triplets_batch[0])

            good_pic_batch = np.array([val[1] for val in triplets_batch])
            bad_pic_batch = np.array([val[2] for val in triplets_batch])
            caption_batch = np.array([val[0] for val in triplets_batch])

            good_pic_pred = model(good_pic_batch)
            bad_pic_pred = model(bad_pic_batch)
            good_pic_pred = good_pic_pred / mg.sqrt(
                mg.sum(mg.power(good_pic_pred, 2), axis=-1, keepdims=True))
            bad_pic_pred = bad_pic_pred / mg.sqrt(
                (mg.sum(mg.power(bad_pic_pred, 2), axis=-1, keepdims=True)))
            #print(good_pic_pred.shape)

            # good_pic_pred = good_pic_pred.reshape(1600, 1, 1)
            # bad_pic_pred = bad_pic_pred.reshape(1600, 1, 1)
            # caption_batch = caption_batch.reshape(1600, 1, 1)

            Sgood = (good_pic_pred * caption_batch).sum(axis=-1)
            Sbad = (bad_pic_pred * caption_batch).sum(axis=-1)
            #print(Sgood.shape, Sbad.shape)
            # Sgood = Sgood.reshape(32, 50)
            # Sbad = Sbad.reshape(32, 50)

            loss = margin_ranking_loss(Sgood, Sbad, 1, margin)
            acc = accuracy(Sgood.flatten(), Sbad.flatten())
            if batch_cnt % 10 == 0:
                print(loss, acc)

            loss.backward()
            optim.step()
            loss.null_gradients()
コード例 #15
0
ファイル: trainer.py プロジェクト: mkhan45/CogLens
def train(
        model,
        triples: List[
            Tuple[np.ndarray, np.ndarray,
                  np.ndarray]],  #caption embeds, good_images, bad_images
        optim,
        plotter,
        batch_size: int = 150,
        epoch_cnt: int = 1000,
        margin: float = 0.1):

    for epoch in range(epoch_cnt):
        idxs = np.arange(len(triples))
        np.random.shuffle(idxs)

        query_embeds, good_images, bad_images = unzip(triples)
        query_embeds, good_images, bad_images = np.array(
            query_embeds), np.array(good_images), np.array(bad_images)

        correct_list = []

        for batch_cnt in range(0, len(triples) // batch_size):
            batch_indices = idxs[batch_cnt * batch_size:(batch_cnt + 1) *
                                 batch_size]

            batch_query = query_embeds[batch_indices].reshape(batch_size, 50)
            good_batch = good_images[batch_indices].reshape(batch_size, 512)
            bad_batch = bad_images[batch_indices].reshape(batch_size, 512)

            # print(batch_query.shape)
            # print(good_batch.shape)
            # print(bad_batch.shape)
            # print("____")

            good_image_encode: mg.Tensor = model(good_batch)
            bad_image_encode: mg.Tensor = model(bad_batch)

            # print(good_image_encode.shape)
            # print(bad_image_encode.shape)
            # print("____")

            good_image_encode /= mg.sqrt(
                mg.sum(good_image_encode**2, axis=1).reshape(batch_size, 1))
            bad_image_encode /= mg.sqrt(
                mg.sum(bad_image_encode**2, axis=1).reshape(batch_size, 1))
            batch_query /= mg.sqrt(
                mg.sum(batch_query**2, axis=1).reshape(batch_size, 1))

            # print(good_image_encode.shape)
            # print(bad_image_encode.shape)
            # print(batch_query.shape)

            good_dists = mg.einsum("ij,ij -> i", good_image_encode,
                                   batch_query)
            bad_dists = mg.einsum("ij,ij -> i", bad_image_encode, batch_query)

            correct_list.append(good_dists - bad_dists > margin)

            loss: mg.Tensor = margin_ranking_loss(good_dists,
                                                  bad_dists,
                                                  1,
                                                  margin=margin)

            loss.backward()

            optim.step()

            loss.null_gradients()

            plotter.set_train_batch(
                {
                    "loss": loss.item(),
                    "acc": np.mean(np.array(correct_list))
                },
                batch_size=batch_size)

        plotter.set_test_epoch()
コード例 #16
0
ファイル: train_model.py プロジェクト: summitwei/sisearch
    # # for mlb in range(500):
    # #     goodFeature2.append(captionEmbed[mlb]@goodFeature[mlb].reshape(50))
    # #     badFeature2.append(captionEmbed[mlb]@badFeature[mlb].reshape(50))
    # # goodFeature=goodFeature[goodFeature@captionEmbed]
    # # badFeature=badFeature[badFeature@captionEmbed]
    # print("pass2")
    # # goodFeature2=np.array(goodFeature2)
    # # badFeature2=np.array(badFeature2)
    # print(goodFeature2.shape)
    # print(badFeature2.shape)
    # print(goodFeature.shape)
    # print(badFeature.shape)
    # print(captionEmbed.shape)
    goodFeature2 = captionEmbed * goodFeature.reshape(500, 50)
    badFeature2 = captionEmbed * badFeature.reshape(500, 50)
    goodFeature2 = mg.sum(goodFeature2, axis=1)
    badFeature2 = mg.sum(badFeature2, axis=1)

    loss = mg.nnet.margin_ranking_loss(goodFeature2, badFeature2, 1, 0.1)
    for ggg in range(0, 500):
        if goodFeature2[ggg] - badFeature2[ggg] > 0.5:
            accuracy += 1
    lossSoFa.append(loss.item())
    loss.backward()
    optim.step()
    loss.null_gradients()
    # plotter.set_train_batch({"loss": loss.item()}, batch_size=500)
    if index % 8 == 0:
        #     plotter.set_train_epoch()
        print("a:%s, loss:%s,accuracy: %s" %
              (index * 500, np.mean(lossSoFa), accuracy / (index * 500)))