def predict_batch(self, img_paths_list, top_k):
        # load inference samples
        infer_imgs = list()
        for path in img_paths_list:
            infer_imgs.append(torch.tensor(load_img(path)))  # list of tensor
        X = torch.stack(infer_imgs)

        # load model
        model = ProtoNet().cpu()
        model.load_state_dict(torch.load(self.model_path, map_location='cpu'))
        model.eval()

        # start inferring
        pred_label_list = list()
        pred_class_name = list()
        pred_class_sku = list()
        pred_class_prob = list()

        model_output = model(X)  # [batch_size,128]
        dists = euclidean_dist(
            model_output.to('cpu'),
            self.prototypes.to('cpu'))  # [batch_size,num_classes]
        dists = dists.data.cpu().numpy()
        sorted_dists = np.sort(dists, axis=1)
        sorted_idxs = np.argsort(dists, axis=1)
        # whether reject
        threshold = 15.0
        mask = sorted_dists < threshold

        for i in range(len(infer_imgs)):
            pred_class_prob.append(sorted_dists[i][mask[i]][:top_k].tolist())
            pred_label_list.append(
                self.labels[sorted_idxs[i]][mask[i]][:top_k].tolist())
            pred_class_sku.append(
                [self.idx2sku[idx] for idx in pred_label_list[i]])
            pred_class_name.append(
                [self.sku2name[idx] for idx in pred_class_sku[i]])

        result = []  # list of dict for each image
        for i in range(len(infer_imgs)):
            cur_img_result = {
                'name': pred_class_name[i],
                'prob': pred_class_prob[i],
                'sku': pred_class_sku[i]
            }
            result.append(cur_img_result)

        return result
    def retrain(self, img_paths_list, class_name, sku):

        self.labelID += 1

        infer_imgs = []
        for p in img_paths_list:
            infer_imgs += [
                transforms.ToTensor()(im) for im in image_enforce(p)
            ]
        X = torch.stack(infer_imgs)

        # load model
        model = ProtoNet().cpu()
        model.load_state_dict(torch.load(self.model_path, map_location='cpu'))
        model.eval()

        # compute new prototype
        model_output = model(X)  # [batch_size,128]
        batch_prototype = model_output.mean(0)
        batch_prototype = batch_prototype.unsqueeze(0)

        # whether fail to map to a distinguishing emmbedding
        threshold = 0.0
        dists = euclidean_dist(
            batch_prototype.to('cpu'),
            self.prototypes.to('cpu'))  # [batch_size,num_classes]
        min_dist = torch.min(dists).item()
        if min_dist < threshold:
            index = np.argmin(dists)
            sim_lblid = self.labels[index]
            info = {
                'msg': 'fail',
                'similar_object_name': self.sku2name[self.idx2sku[sim_lblid]],
                'similar_object_sku': self.idx2sku[sim_lblid]
            }
            return info

        # add new class info
        self.prototypes = torch.cat([self.prototypes, batch_prototype], 0)
        self.labels = np.concatenate((self.labels, [self.labelID]), axis=0)
        self.idx2sku[self.labelID] = sku
        self.sku2name[sku] = class_name

        info = {'msg': 'success'}
        return info
        loss.backward()

        optim.step()

        train_loss.append(loss.item())
        train_acc.append(acc.item())
        postfix_dict = {"train_loss": loss.item(), "train_acc": acc.item()}
        trange.set_postfix(**postfix_dict)

    avg_loss = np.mean(train_loss[-episodes:])
    avg_acc = np.mean(train_acc[-episodes:])
    print('Avg Train Loss: {}, Avg Train Acc: {}'.format(avg_loss, avg_acc))
    scheduler.step()

    model.eval()
    val_trange = tqdm(val_dataloader)

    for batch in val_trange:
        with torch.no_grad():
            x, y = batch
            x, y = x.to(device), y.to(device)
            model_output = model(x)
            prototype, query_samples = get_proto_query(model_output.cpu(),
                                                       y.cpu(),
                                                       n_aug=0,
                                                       n_support=N_shot)
            loss, acc = cal_loss(query_samples,
                                 prototype,
                                 n_classes=val_N_way,
                                 n_aug=0)
示例#4
0
        optimizer.step()
        train_loss.append(loss.item())
        train_acc.append(acc.item())
    # ONE EPOCH ENDS

    avg_loss = np.mean(
        train_loss[-opts.iterations:])  # TODO: why need iterations?
    avg_acc = np.mean(train_acc[-opts.iterations:])
    print_log(
        'Avg Train Loss: {:.5f}, Avg Train Acc: {:.5f}'.format(
            avg_loss, avg_acc), opts.log_file)

    if val_db is None:
        continue
    val_iter = iter(val_db)
    net.eval()
    for batch in val_iter:
        x, y = batch[0].to(opts.device), batch[1].to(opts.device)
        loss, acc = loss_fn(net(x),
                            target=y,
                            n_support=opts.num_support_val,
                            distance=opts.distance,
                            device=opts.device)
        val_loss.append(loss.item())
        val_acc.append(acc.item())
    avg_loss = np.mean(val_loss[-opts.iterations:])
    avg_acc = np.mean(val_acc[-opts.iterations:])
    postfix = ' (Best)' if avg_acc >= best_acc else ' (Best: {:.5f})'.format(
        best_acc)
    print_log(
        'Avg Val Loss: {:.5f}, Avg Val Acc: {:.5f}{}'.format(