Exemple #1
0
def _compute_distance_scores(obs1, obs2, segments1, segments2):
    scores = [[] for _ in range(len(segments1))]
    base_dist = utils.euclidean_dist(obs1, obs2)
    for i, segment1 in enumerate(segments1):
        projection1 = utils.get_projection(segment1['endpoints'], obs1)
        for segment2 in segments2:
            projection2 = utils.get_projection(segment2['endpoints'], obs2)
            dist = utils.euclidean_dist(projection1, projection2)
            dist_diff = abs(dist - base_dist)
            scores[i].append(1.0 / (1.0 + dist_diff))
    return scores
Exemple #2
0
def test(test_loader, model, args):
    print('Testing...')
    losses = AverageMeter()
    accuracy = AverageMeter()

    # Switch to evaluate mode
    model.eval()

    with torch.no_grad():
        for n_episode, batch in enumerate(test_loader, 1):
            data, _ = [_.cuda(non_blocking=True) for _ in batch]
            p = args.n_support * args.n_way
            data_support, data_query = data[:p], data[p:]

            # Compute class prototypes (n_way, output_dim)
            class_prototypes = model(data_support).reshape(args.n_support, args.n_way, -1).mean(dim=0)

            # Generate labels (n_way, n_query)
            labels = torch.arange(args.n_way).repeat(args.n_query)
            labels = labels.type(torch.cuda.LongTensor)

            # Compute loss and metrics
            logits = euclidean_dist(model(data_query), class_prototypes)
            loss = F.cross_entropy(logits, labels)
            acc = compute_accuracy(logits, labels)

            # Record loss and accuracy
            losses.update(loss.item(), data_query.size(0))
            accuracy.update(acc, data_query.size(0))

        print('Test Loss {loss.val:.4f} ({loss.avg:.4f})\t'
              'Test Accuracy {accuracy.val:.3f} ({accuracy.avg:.3f})\t'.format(loss=losses, accuracy=accuracy))

    return losses.avg, accuracy.avg
Exemple #3
0
def train_loop(model, epoch, device, train_loader, optimizer):

    avg_loss = 0
    avg_acc = 0
    for i, batch in enumerate(train_loader):
        data, label = [_.to(device) for _ in batch]
        data_shot, data_query = data[:, :params.
                                     n_shot, :, :, :], data[:, params.
                                                            n_shot:, :, :, :]
        data_shot = data_shot.reshape(-1, 3, 84, 84)
        embedding = model(data_shot)
        proto = embedding.reshape(params.train_n_way, params.n_shot,
                                  -1).mean(1)

        label = torch.from_numpy(
            np.repeat(range(params.train_n_way), params.query))
        label = label.type(torch.cuda.LongTensor)
        data_query = data_query.reshape(-1, 3, 84, 84)
        inference = model(data_query)
        inference = inference.reshape(params.train_n_way * params.query, -1)
        scores = -euclidean_dist(inference, proto)
        optimizer.zero_grad()

        loss = nn.CrossEntropyLoss()(scores, label)
        loss.backward()
        optimizer.step()

        avg_loss = avg_loss + loss.item()
        pred = scores.argmax(dim=1)
        avg_acc += (pred == label).type(torch.FloatTensor).mean().item()

    print('Epoch {:d} | loss={:.4f} | acc={:.4f}'.format(
        epoch, avg_loss / len(train_loader), avg_acc / len(train_loader)))
Exemple #4
0
def test_loop(model, device, val_loader):

    iter_num = len(val_loader)
    acc_all = []
    for i, batch in enumerate(val_loader):
        data, _ = [_.to(device) for _ in batch]
        data_shot, data_query = data[:, :params.
                                     n_shot, :, :, :], data[:, params.
                                                            n_shot:, :, :, :]
        data_shot = data_shot.reshape(-1, 3, 84, 84)
        proto = model(data_shot)
        proto = proto.reshape(params.test_n_way, params.n_shot, -1).mean(dim=1)
        label = torch.from_numpy(
            np.repeat(range(params.train_n_way), params.query))
        #if device=='cuda:0':
        label = label.type(torch.cuda.LongTensor)
        data_query = data_query.reshape(-1, 3, 84, 84)
        scores = -euclidean_dist(model(data_query), proto)

        #loss = nn.CrossEntropyLoss()(scores, label)
        # F.cross_entropy(scores, label)

        pred = scores.argmax(dim=1)
        acc = (pred == label).type(torch.FloatTensor).mean().item()
        acc_all.append(acc)

    acc_all = np.asarray(acc_all)
    acc_mean = np.mean(acc_all)
    acc_std = np.std(acc_all)
    print('%d Test Acc = %4.2f%% +- %4.2f%%' %
          (iter_num, acc_mean * 100, 100 * 1.96 * acc_std / np.sqrt(iter_num)))
    return acc_mean, acc_std
Exemple #5
0
def find_k_nearest(k, point_star):
    """
	Function to loop through each line in the stdin and keep track of the k
	closest points

	Args:
		k: the number of closest neighbors to find
		point_star: the star for which to find the k closest neighbors

	Returns:
		max_heap: a heap containing the k closest distances to the point_star
		star_dist: a key value pari of {dist-d1: [list of stars at dist-d1 to point_star]}
	"""
    max_heap = []
    star_dist = defaultdict(list)
    # skip header and sun row
    utils.skip_line(2)
    for line in sys.stdin:
        star_name, x, y, z = utils.parse_line(line=line)
        dist = utils.euclidean_dist(x=(x, y, z), y=point_star)
        new_star = star(x=x, y=y, z=z, star_name=star_name)
        if len(max_heap) < k:
            heappush(max_heap, -1 * dist)
            star_dist[dist].append(new_star)
        elif dist <= -1 * max_heap[0]:
            heappushpop(max_heap, -1 * dist)
            star_dist[dist].append(new_star)
    return max_heap, star_dist
Exemple #6
0
def extract_features(des, codebook):
    """
  Construct the Bag-of-visual-Words histogram features for images using the codebook.
  HINT: Refer to helper functions.

  :param des(numpy.array): Descriptors.  shape:[num_images, num_des_of_each_img, 128]
  :param codebook(numpy.array): Bag of visual words. shape:[k, 128]
  :return(numpy.array): Bag of visual words shape:[num_images, k]

  """
    # YOUR CODE HERE
    bow_hist = []
    k = codebook.shape[0]
    for i in range(len(des)):
        # data = copy.deepcopy(des[i])
        dist = euclidean_dist(des[i], codebook)
        cluster_idx = np.argmin(dist, axis=1)

        targets = cluster_idx.reshape(-1)
        one_hot_targets = np.eye(k)[targets]
        bow_hist.append(onehot_targets)

    return np.array(bow_hist)


# features = np.zeros((2472, 1024))

# dist = cdist(obs, code_book)
# code = dist.argmin(axis=1)
# min_dist = dist[np.arange(len(code)), code]
# return code, min_dist
Exemple #7
0
    def forward_pred(self, data_shot, data_query, n_way, k_shot):
        proto = self.forward(data_shot)
        proto = proto.view(n_way, k_shot, -1).mean(dim=1)

        query = self.forward(data_query)
        logits = euclidean_dist(query, proto)
        return logits
Exemple #8
0
 def _calc_num_steps_to_next_node(self):
     dist = utils.euclidean_dist(
         self.current_node['x'], self.current_node['y'],
         self.next_node['x'], self.next_node['y']
     )
     num_of_steps_to_next_node = int(round(dist / STEP_LENGTH))
     self.log.info("%d steps to next node", num_of_steps_to_next_node)
     return num_of_steps_to_next_node
def _get_dist_matrix(data, eps):
    dist_matrix = [[True] * len(data) for i in range(len(data))]
    for p in range(len(data)):
        for q in range(p + 1, len(data)):
            dist = euclidean_dist(data[p], data[q])
            if (dist > eps):
                dist_matrix[p][q] = dist_matrix[q][p] = False
    return dist_matrix
Exemple #10
0
def calc_distances(nn_tree):
    n_centroids = nn_tree.get_arrays()[0].shape[0]

    distances = np.ndarray((n_centroids, n_centroids))
    for i in range(n_centroids):
        for j in range(n_centroids):
            distances[i, j] = euclidean_dist(nn_tree.get_arrays()[0][i],
                                             nn_tree.get_arrays()[0][j])

    return distances
def closest_cluster_to(target, clusters):
    min_dist = float('inf')
    min_idx = None
    # clusters = self._enemy_clusters if enemy else self._clusters
    for idx, cluster in clusters.items():
        curr_dist = euclidean_dist(cluster.get_center(), target)
        if curr_dist < min_dist and curr_dist != 0:
            min_dist = curr_dist
            min_idx = idx

    return min_idx
Exemple #12
0
    def score(self, x):
        preds = []
        for l in range(self.encoder.depth):
          enc = lambda x: self.encoder.intermediate_forward(x, l, avg_pool=True).view(x.size(0),-1)
          zq = enc(x)
          zs = enc(self.s)

          preds.append(-euclidean_dist(zs, zq)) # (S, N)
        preds = torch.stack(preds) # (L, C, N)
        preds = preds.sum(0).t() # (N, C)
        return preds.max(1)[0]
Exemple #13
0
def validate(val_loader, model, att, args):
    print('Validating...')
    losses = AverageMeter()
    accuracy = AverageMeter()

    # Switch to evaluate mode
    model.eval()
    att.eval()

    with torch.no_grad():
        for n_episode, batch in enumerate(val_loader, 1):
            data, _ = [_.cuda(non_blocking=True) for _ in batch]
            p = args.n_support * args.n_way_val
            data_support, data_query = data[:p], data[p:]

            # Compute class prototypes (n_way, output_dim)
            # Calculate weighted averages for class prototypes
            # (n_support, n_way_val feature_dimension)
            latent_vecs_val = model(data_support).reshape(
                args.n_support, args.n_way_val, -1)

            # (n_way_train, n_val, feature_dimension)
            latent_vecs_val = latent_vecs_val.transpose(0, 1)

            # _, scores_val = att(latent_vecs_val)
            scores_val = F.softmax(att(latent_vecs_val), dim=1)
            # scores_val = scores_val.unsqueeze(-1).expand_as(latent_vecs_val)
            scores_val = scores_val.expand_as(latent_vecs_val)
            # class_prototypes = torch.sum(
            #     torch.matmul(scores_val, latent_vecs_val), 1)
            class_prototypes = torch.sum(
                torch.mul(scores_val, latent_vecs_val), 1)
            # class_prototypes = att(model(data_support)).reshape(
            #     args.n_support, args.n_way_val, -1).mean(dim=0)

            # Generate labels (n_way, n_query)
            labels = torch.arange(args.n_way_val).repeat(args.n_query_val)
            labels = labels.type(torch.cuda.LongTensor)

            # Compute loss and metrics
            logits = euclidean_dist(model(data_query), class_prototypes)
            loss = F.cross_entropy(logits, labels)
            acc = compute_accuracy(logits, labels)

            # Record loss and accuracy
            losses.update(loss.item(), data_query.size(0))
            accuracy.update(acc, data_query.size(0))

        print('Validation Loss {loss.val:.4f} ({loss.avg:.4f})\t'
              'Validation Accuracy {accuracy.val:.3f} ({accuracy.avg:.3f})\t'.
              format(loss=losses, accuracy=accuracy))

    return losses.avg, accuracy.avg
Exemple #14
0
 def is_near_depot(self, lat_lng):
     ''' Calcola distanza dal deposito in base a lat e lng,
     se inferiore a  config.depot_radius ogni due secondi 
     prova a connettersi al wifi chiamando try_wireless_connection.
     Se si allontata interrompe l'azione
     '''
     dist = euclidean_dist(lat_lng, self.config.depot_location)
     # 0.00001 degrees is about a meter
     # print("Is near depot", dist, lat_lng)
     if dist <= self.config.depot_radius and not self.checking_edge_connection:
         self.checking_edge_connection = setInterval(
             self.config.retry_connection_delay, self.try_wireless_connection)
     elif dist > self.config.depot_radius and self.checking_edge_connection:
         self.checking_edge_connection.cancel()
Exemple #15
0
def hello_render(request):
    # today = date.today()

    dist = euclidean_dist(0, 0, 2, 2)

    area = rectanglearea(0, 0, 2, 2)

    context = {
        'person': "Jhon Doe",
        'date': today,
        'distance': dist,
        'area': area
    }
    return render(request, "catalog/templates/hello.html", context)
Exemple #16
0
    def forward(self, data, mode='test'):
        args = self.args
        data = data.view(args.n_way * (args.n_shot + args.n_query),
                         *data.size()[2:])
        feature = self.encoder(data)

        feature = feature.view(args.n_way, args.n_shot + args.n_query, -1)
        z_support = feature[:, :args.n_shot]
        z_query = feature[:, args.n_shot:]

        proto = z_support.view(args.n_way, args.n_shot, -1).mean(1)
        z_query = z_query.contiguous().view(args.n_way * args.n_query, -1)
        logits = -euclidean_dist(z_query, proto) / self.args.temperature
        return logits
    def predict_batch(self, img_paths_list, top_k):
        # load inference samples
        infer_imgs = list()
        for path in img_paths_list:
            infer_imgs.append(torch.tensor(load_img(path)))  # list of tensor
        X = torch.stack(infer_imgs)

        # load model
        model = ProtoNet().cpu()
        model.load_state_dict(torch.load(self.model_path, map_location='cpu'))
        model.eval()

        # start inferring
        pred_label_list = list()
        pred_class_name = list()
        pred_class_sku = list()
        pred_class_prob = list()

        model_output = model(X)  # [batch_size,128]
        dists = euclidean_dist(
            model_output.to('cpu'),
            self.prototypes.to('cpu'))  # [batch_size,num_classes]
        dists = dists.data.cpu().numpy()
        sorted_dists = np.sort(dists, axis=1)
        sorted_idxs = np.argsort(dists, axis=1)
        # whether reject
        threshold = 15.0
        mask = sorted_dists < threshold

        for i in range(len(infer_imgs)):
            pred_class_prob.append(sorted_dists[i][mask[i]][:top_k].tolist())
            pred_label_list.append(
                self.labels[sorted_idxs[i]][mask[i]][:top_k].tolist())
            pred_class_sku.append(
                [self.idx2sku[idx] for idx in pred_label_list[i]])
            pred_class_name.append(
                [self.sku2name[idx] for idx in pred_class_sku[i]])

        result = []  # list of dict for each image
        for i in range(len(infer_imgs)):
            cur_img_result = {
                'name': pred_class_name[i],
                'prob': pred_class_prob[i],
                'sku': pred_class_sku[i]
            }
            result.append(cur_img_result)

        return result
Exemple #18
0
    def cheapest_insertion(i, seq):
        '''
        returns (cost, j, k)
        where cost, int -- the cost of the insertion (positive is bad)
              j, int -- the index of the element that will go to j
              k, int -- the index of the element that i will go to

        (so we end up with something like this: ... -> j -> i -> k -> ...
        '''
        L = []
        for j, k in zip(seq, seq[1:]):
            old_edge = utils.euclidean_dist(X[[j, k]])
            new_edge = D[j, i] + D[i, k]
            cost = -old_edge + new_edge
            L.append((cost, j, k))
        return min(L, key=lambda x: x[0])
    def retrain(self, img_paths_list, class_name, sku):

        self.labelID += 1

        infer_imgs = []
        for p in img_paths_list:
            infer_imgs += [
                transforms.ToTensor()(im) for im in image_enforce(p)
            ]
        X = torch.stack(infer_imgs)

        # load model
        model = ProtoNet().cpu()
        model.load_state_dict(torch.load(self.model_path, map_location='cpu'))
        model.eval()

        # compute new prototype
        model_output = model(X)  # [batch_size,128]
        batch_prototype = model_output.mean(0)
        batch_prototype = batch_prototype.unsqueeze(0)

        # whether fail to map to a distinguishing emmbedding
        threshold = 0.0
        dists = euclidean_dist(
            batch_prototype.to('cpu'),
            self.prototypes.to('cpu'))  # [batch_size,num_classes]
        min_dist = torch.min(dists).item()
        if min_dist < threshold:
            index = np.argmin(dists)
            sim_lblid = self.labels[index]
            info = {
                'msg': 'fail',
                'similar_object_name': self.sku2name[self.idx2sku[sim_lblid]],
                'similar_object_sku': self.idx2sku[sim_lblid]
            }
            return info

        # add new class info
        self.prototypes = torch.cat([self.prototypes, batch_prototype], 0)
        self.labels = np.concatenate((self.labels, [self.labelID]), axis=0)
        self.idx2sku[self.labelID] = sku
        self.sku2name[sku] = class_name

        info = {'msg': 'success'}
        return info
    def forward(self, data, mode='train'):
        args = self.args
        if mode in ['val', 'test']:
            data = data.view(args.n_way * (args.n_shot + args.n_query),
                             *data.size()[2:])
            feature = self.encoder(data)
            feature = feature.view(args.n_way, args.n_shot + args.n_query, -1)
            z_support = feature[:, :args.n_shot]
            z_query = feature[:, args.n_shot:]

            proto = z_support.view(args.n_way, args.n_shot, -1).mean(1)
            z_query = z_query.contiguous().view(args.n_way * args.n_query, -1)
            scores = -euclidean_dist(z_query, proto) / self.args.temperature
        else:
            feature = self.encoder(data)
            return feature
            # scores  = self.classifier(feature)
        return scores
def _get_dists(data, centers):
    dists = [None] * len(data)
    for p in range(len(data)):
        dists[p] = [euclidean_dist(data[p], center) for center in centers]
    return dists
def evaluation(data_loader, class_prototypes, model, classes, args):
    queries_data = {n: [] for n in range(len(classes))}
    losses = AverageMeter()
    accuracy_1 = AverageMeter()
    accuracy_5 = AverageMeter()
    metrics_class = {}
    with torch.no_grad():
        print('Forwarding queries...')
        for data, targets in data_loader:
            data = data.cuda(non_blocking=True)
            outputs = model(data)
            for i, output in enumerate(outputs):
                queries_data[targets[i].item()].append(output)
        mean_accuracy = []
        for key, values in queries_data.items():
            if len(values) > 0:
                logits = euclidean_dist(torch.stack(values), class_prototypes)
                labels = torch.Tensor([key]).repeat(len(values)).type(
                    torch.cuda.LongTensor)
                loss = F.cross_entropy(logits, labels).item()
                acc = accuracy_top_k(logits, labels, top_k=(1, 2, 3, 4, 5))
                acc = [a.item() for a in acc]
                metrics_class[key] = {
                    'class_name': classes[key],
                    'accuracy': acc[0],
                    'loss': loss,
                    'n_samples': len(values)
                }
                # Record loss and accuracy
                losses.update(loss, len(values))
                accuracy_1.update(acc[0], len(values))
                accuracy_5.update(acc[4], len(values))
                mean_accuracy.append(acc[0])
                # print('Class ' + classes[key])
                printer.pprint(metrics_class[key])
            else:
                metrics_class[key] = {
                    'class_name': classes[key],
                    'accuracy': np.nan,
                    'loss': np.nan,
                    'n_samples': 0
                }
                # print('Class ' + classes[key] + ' is empty')

    mean_accuracy = sum(mean_accuracy) / len(mean_accuracy)

    print('Total Weighted Top-1 Accuracy %.4f\n'
          'Total Weighted Top-5 Accuracy %.4f\n'
          'Mean Class Top-1 Accuracy %.4f\n'
          'Total Avg Loss %.4f' %
          (accuracy_1.avg, accuracy_5.avg, mean_accuracy, losses.avg))

    class_metrics_df = pd.DataFrame.from_dict(metrics_class, 'index')
    class_metrics_df.round(4)
    class_metrics_df.to_csv(os.path.join(results_path, args.results_name +
                                         '_individual.csv'),
                            index=False,
                            float_format='%.4f')

    average_df = pd.DataFrame({
        'accuracy_top_1': [accuracy_1.avg],
        'accuracy_top_5': [accuracy_5.avg],
        'sensitivity': [mean_accuracy]
    })
    average_df.round(4)
    average_df.to_csv(os.path.join(results_path,
                                   args.results_name + '_average.csv'),
                      index=False,
                      float_format='%.4f')
Exemple #23
0
    def forward(self, input):
        x, labels, idx_train, idx_val, idx_test = input  # x is N * L, where L is the time-series feature dimension

        if True:
            N = x.size(0)

            # LSTM
            if self.use_lstm:
                x_lstm = self.lstm(x)[0]
                x_lstm = x_lstm.mean(1)
                x_lstm = x_lstm.view(N, -1)

            if self.use_cnn:
                # Covolutional Network
                # input ts: # N * C * L
                if self.use_rp:
                    for i in range(len(self.conv_1_models)):
                        #x_conv = x
                        x_conv = self.conv_1_models[i](x[:, self.idx[i], :])
                        x_conv = self.conv_bn_1(x_conv)
                        x_conv = F.leaky_relu(x_conv)

                        x_conv = self.conv_2(x_conv)
                        x_conv = self.conv_bn_2(x_conv)
                        x_conv = F.leaky_relu(x_conv)

                        x_conv = self.conv_3(x_conv)
                        x_conv = self.conv_bn_3(x_conv)
                        x_conv = F.leaky_relu(x_conv)

                        x_conv = torch.mean(x_conv, 2)

                        if i == 0:
                            x_conv_sum = x_conv
                        else:
                            x_conv_sum = torch.cat([x_conv_sum, x_conv], dim=1)

                    x_conv = x_conv_sum
                else:
                    x_conv = x
                    x_conv = self.conv_1(x_conv)  # N * C * L
                    x_conv = self.conv_bn_1(x_conv)
                    x_conv = F.leaky_relu(x_conv)

                    x_conv = self.conv_2(x_conv)
                    x_conv = self.conv_bn_2(x_conv)
                    x_conv = F.leaky_relu(x_conv)

                    x_conv = self.conv_3(x_conv)
                    x_conv = self.conv_bn_3(x_conv)
                    x_conv = F.leaky_relu(x_conv)

                    x_conv = x_conv.view(N, -1)

            if self.use_lstm and self.use_cnn:
                x = torch.cat([x_conv, x_lstm], dim=1)
            elif self.use_lstm:
                x = x_lstm
            elif self.use_cnn:
                x = x_conv
            #

        # linear mapping to low-dimensional space
        x = self.mapping(x)

        # generate the class protocal with dimension C * D (nclass * dim)
        proto_list = []
        for i in range(self.nclass):
            idx = (labels[idx_train].squeeze() == i).nonzero().squeeze(1)
            if self.use_att:
                A = self.att_models[i](x[idx_train][idx])  # N_k * 1
                A = torch.transpose(A, 1, 0)  # 1 * N_k
                A = F.softmax(A, dim=1)  # softmax over N_k

                class_repr = torch.mm(A, x[idx_train][idx])  # 1 * L
                class_repr = torch.transpose(class_repr, 1, 0)  # L * 1
            else:  # if do not use attention, simply use the mean of training samples with the same labels.
                class_repr = x[idx_train][idx].mean(0)  # L * 1
            proto_list.append(class_repr.view(1, -1))
        x_proto = torch.cat(proto_list, dim=0)

        # prototype distance
        proto_dists = euclidean_dist(x_proto, x_proto)
        proto_dists = torch.exp(-0.5 * proto_dists)
        num_proto_pairs = int(self.nclass * (self.nclass - 1) / 2)
        proto_dist = torch.sum(proto_dists) / num_proto_pairs

        dists = euclidean_dist(x, x_proto)

        dump_embedding(x_proto, x, labels)
        return torch.exp(-0.5 * dists), proto_dist
Exemple #24
0
 def is_on_target(self):
     return euclidean_dist(self.get_center(), self.target_loc) < 1
Exemple #25
0
    def log_p_y(self, xs, xq, no_grad=False, mask=None):
        """log_p_y

        Args:
            xs: support set (#way, #shot, ...)
            xq: support set (#query classes, #queries, ...)
            return_target_inds: set to True only if #way and #query index over the same classes
        Return:
            a dict of {
                'log_p_y': log p(y) (#query classes, #queries, #way)
                'target_inds': ...
                'logits': ...
            }
        """
        ret_dict = {}

        # xs.size() == (50, 3, 1, 28, 28)  for 50-way, 3-shot classification
        # xq.size() == (50, 5, 1, 28, 28)  for 50-way, 5 query points for each of the 50 classes
        n_class = xs.size(0)
        n_query_class = xq.size(0)
        n_support = xs.size(1)
        n_query = xq.size(1)

        # Combines the support and query points into one "minibatch" to get embeddings
        x = torch.cat([xs.reshape(n_class * n_support, *xs.size()[2:]),
                       xq.reshape(n_query_class * n_query, *xq.size()[2:])], 0)

        try:
            z = self.encoder(x, no_grad=no_grad)
        except:
            z = self.encoder(x)  # If the encoder doesn't support option no_grad

        z_dim = z.size(-1)
        supports = z[:n_class*n_support].view(n_class, n_support, z_dim)
        ret_dict['supports'] = supports
        # Prototype representations of the support images in each class: (50, 64)
        if mask is not None:
            mask = mask.unsqueeze(-1)
            # ipdb.set_trace()
            z_proto = (supports * mask).sum(1) / mask.sum(1)
        else:
            z_proto = supports.mean(1)
            z_proto_var = supports.var(1)

        ret_dict['z'] = z  # z.shape == (50, 256)
        # Embeddings of the query points: (500, 64)
        zq = z[n_class*n_support:]
        ret_dict['zq'] = zq

        def _vectorized_mog_logp(zq, z_proto_m, z_proto_var):
            res = []
            for ci, (_m, _v) in enumerate(zip(z_proto_m, z_proto_var)):
                _m, _v = _m.unsqueeze(0), _v.unsqueeze(0)
                _v = _v + 1e-7
                logp = -1 * (torch.log(_v).sum() +
                             (torch.pow(zq - _m , 2) / _v).sum(-1))
                res.append(logp)
            return torch.transpose(torch.stack(res), 0, 1)


        if self.decision == 'baseline':
            # Distances between each query embedding and each class prototype
            dists = euclidean_dist(zq, z_proto)

            # Class logits for each query point and each class: (50, 10, 50) [#query classes, #queries per class, #support classes] ?
            log_p_y = F.log_softmax(-dists, dim=1).view(n_query_class, n_query, -1)
            # Prepare logits
            ret_dict['logits'] = (-dists).view(n_query_class, n_query, -1)
        elif self.decision == 'maha':
            raise "Well, we cannot fit a full-covariance b/c N < D"
            # tied full-covariance
            diff = supports - z_proto.unsqueeze(1) # (way, shot, dim)
            diff = diff.view(-1, diff.shape[-1]) # (way*shot, dim) b/c shared across way
            cov = torch.mean(torch.stack([torch.ger(vec,vec) for vec in diff]),0)
            mahas = []
            for _zq in zq:
                mahas.append()
        elif self.decision == 'tied-var':
            if mask is not None:
                raise # implement the masking part
            z_proto_var = supports.view(-1, supports.shape[-1]).var(0).unsqueeze(0).repeat(5,1)
            log_p_y = _vectorized_mog_logp(zq, z_proto, z_proto_var).view(n_query_class, n_query, -1)
            ret_dict['logits'] = log_p_y

        ret_dict['log_p_y']  = log_p_y
        # Prepare target_inds
        if n_class == n_query_class:
            target_inds = torch.arange(0, n_class).view(n_class, 1, 1).expand(n_class, n_query, 1).long()
            target_inds.requires_grad = False
            if xq.is_cuda:
                target_inds = target_inds.cuda()
            ret_dict['target_inds'] = target_inds

        return ret_dict
Exemple #26
0
def _no_shift(centers, new_centers, tol):
    shifts = [
        euclidean_dist(centers[i], new_centers[i]) for i in range(len(centers))
    ]
    no_shift = all([shift <= tol for shift in shifts])
    return no_shift
def _get_shifts(centers, new_centers):
    shifts = [
        euclidean_dist(centers[i], new_centers[i]) for i in range(len(centers))
    ]
    return shifts
    with torch.no_grad():
        for i, batch in enumerate(novel_loader):
            data, _ = [_.to(device) for _ in batch]
            data_shot, data_query = data[:, :params.
                                         n_shot, :, :, :], data[:, params.
                                                                n_shot:, :, :, :]
            data_shot = data_shot.reshape(-1, 3, 84, 84)
            proto = model(data_shot)
            proto = proto.reshape(params.test_n_way, params.n_shot,
                                  -1).mean(dim=1)
            label = torch.from_numpy(
                np.repeat(range(params.train_n_way), params.query))
            # if device=='cuda:0':
            label = label.type(torch.cuda.LongTensor)
            data_query = data_query.reshape(-1, 3, 84, 84)
            scores = -euclidean_dist(model(data_query), proto)

            #loss = nn.CrossEntropyLoss()(scores, label)
            # F.cross_entropy(scores, label)

            pred = scores.argmax(dim=1)
            acc = (pred == label).type(torch.FloatTensor).mean().item()
            acc_all.append(acc)

        acc_all = np.asarray(acc_all)
        acc_mean = np.mean(acc_all)
        acc_std = np.std(acc_all)
        print('%d Test Acc = %4.2f%% +- %4.2f%%' %
              (iter_num, acc_mean * 100,
               100 * 1.96 * acc_std / np.sqrt(iter_num)))
def train(train_loader, model, optimizer, epoch, args):
    print("Training epoch %d" % epoch)
    episode_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    accuracy = AverageMeter()

    # Switch to train mode
    model.train()

    end = time.time()

    optimizer.zero_grad()

    # Iterate over episodes
    for n_episode, batch in enumerate(train_loader, 1):
        data_time.update(time.time() - end)
        data, _ = [_.cuda(non_blocking=True) for _ in batch]
        p = args.n_support * args.n_way_train
        data_support, data_query = data[:p], data[p:]

        # Compute class prototypes (n_way, output_dim)
        if n_episode > 1 and args.alpha > 0.0:
            class_prototypes = args.alpha * class_prototypes + (1 - args.alpha) * \
                model(data_support).reshape(args.n_support, args.n_way_train, -1).mean(dim=0)
        else:
            class_prototypes = model(data_support).reshape(
                args.n_support, args.n_way_train, -1).mean(dim=0)

        # Generate labels (n_way, n_query)
        labels = torch.arange(args.n_way_train).repeat(args.n_query_train)
        labels = labels.type(torch.cuda.LongTensor)

        # Compute loss and metrics
        logits = euclidean_dist(model(data_query), class_prototypes)
        loss = F.cross_entropy(logits, labels)
        acc = compute_accuracy(logits, labels)

        # Record loss and accuracy
        losses.update(loss.item(), data_query.size(0))
        accuracy.update(acc, data_query.size(0))

        # Compute gradient and do SGD step
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()

        # Free the graph
        if args.alpha > 0.0:
            class_prototypes = class_prototypes.detach()
        else:
            class_prototypes = None

        # Measure elapsed time
        episode_time.update(time.time() - end)
        end = time.time()

        if n_episode % args.print_freq == 0:
            print(
                'Epoch: [{0}][{1}/{2}]\t'
                'Episode Time {episode_time.val:.3f} ({episode_time.avg:.3f})\t'
                'Data Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
                'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                'Accuracy {accuracy.val:.3f} ({accuracy.avg:.3f})\t'.format(
                    epoch,
                    n_episode,
                    args.n_episodes_train,
                    episode_time=episode_time,
                    data_time=data_time,
                    loss=losses,
                    accuracy=accuracy))

    return losses.avg, accuracy.avg
Exemple #30
0
    sum_loss = 0
    sum_tr_loss = 0
    sum_clf_loss = 0

    correct = 0
    overall = 0
    for n, (x, y) in enumerate(trainloader):

        classes = y.clone()
        if gpu:
            x = x.to('cuda')
            y = y.to('cuda')

        features, logits = model(x, y)

        dist = euclidean_dist(features, features)
        labels = torch.cuda.LongTensor(
            np.repeat(np.arange(dist.shape[0] // 3), 3)).unsqueeze(1)

        pos = labels.eq(labels.t())
        neg = labels.ne(labels.t())

        dist_ap = torch.max(dist[pos].view(dist.shape[0], -1), 1)[0]
        dist_an = torch.min(dist[neg].view(dist.shape[0], -1), 1)[0]

        optimizer.zero_grad()

        tr_loss = triplet_loss(dist_ap=dist_ap, dist_an=dist_an)
        clf_loss = F.cross_entropy(logits, y)
        loss = tr_loss + clf_loss