def init_protonet(opt):
    '''
    Initialize the ProtoNet
    '''
    model = ProtoNet()
    model = model.cuda() if opt.cuda else model
    return model
예제 #2
0
def init_protonet(opt, pretrained_file= "", pretrained = False):
    '''
    Initialize the ProtoNet
    '''
    device = 'cuda:0' if torch.cuda.is_available() and opt.cuda else 'cpu'
    model = ProtoNet().to(device)
    if(pretrained):
        model.load_state_dict(torch.load(pretrained_file))
        print("Loaded pre-trained model")
    return model
예제 #3
0
def init_protonet(opt):
    '''
    Initialize the ProtoNet
    '''
    if opt.dataset == 'omniglot':
        model = ProtoNet()
    elif opt.dataset == 'mini_imagenet':
        model = ProtoNet(x_dim=3)
    model = model.cuda() if opt.cuda else model
    return model
예제 #4
0
def init_protonet(opt):
    '''
    Initialize the ProtoNet
    '''
    device = 'cuda:0' if torch.cuda.is_available() and opt.cuda else 'cpu'
    model = ProtoNet(x_dim=3).to(device)
    return model
def init_protonet(opt, x_dim):
    '''
    Initialize the ProtoNet
    '''
    device = 'cuda:{}'.format(
        opt.gpu_id) if torch.cuda.is_available() and opt.cuda else 'cpu'
    model = ProtoNet(x_dim=x_dim,
                     nn_architecture=opt.nn_architecture).to(device)
    return model
예제 #6
0
def init_protonet(opt):
    '''
    Initialize the ProtoNet
    '''
    device = 'cuda:0' if torch.cuda.is_available() and opt.cuda else 'cpu'

    if opt.dataset == 0:
        if opt.net == 1:
            model = ProtoNet().to(device)
        elif opt.net == 2:
            model = ProtoResNet().to(device)
    elif opt.dataset == 1:
        if opt.net == 1:
            model = ProtoNet(x_dim=3).to(device)
        elif opt.net == 2:
            model = ProtoResNet(x_dim=3).to(device)
    else:
        raise(Exception("No such dataset!!"))
    return model
예제 #7
0
def init_protonet(opt):
    '''
    Initialize the ProtoNet
    '''
    device = 'cuda:0' if torch.cuda.is_available() and opt.cuda else 'cpu'

    if opt.net == 1:
        model = ProtoNet().to(device)
    elif opt.net == 2:
        model = ProtoResNet().to(device)
    return model
    def predict_batch(self, img_paths_list, top_k):
        # load inference samples
        infer_imgs = list()
        for path in img_paths_list:
            infer_imgs.append(torch.tensor(load_img(path)))  # list of tensor
        X = torch.stack(infer_imgs)

        # load model
        model = ProtoNet().cpu()
        model.load_state_dict(torch.load(self.model_path, map_location='cpu'))
        model.eval()

        # start inferring
        pred_label_list = list()
        pred_class_name = list()
        pred_class_sku = list()
        pred_class_prob = list()

        model_output = model(X)  # [batch_size,128]
        dists = euclidean_dist(
            model_output.to('cpu'),
            self.prototypes.to('cpu'))  # [batch_size,num_classes]
        dists = dists.data.cpu().numpy()
        sorted_dists = np.sort(dists, axis=1)
        sorted_idxs = np.argsort(dists, axis=1)
        # whether reject
        threshold = 15.0
        mask = sorted_dists < threshold

        for i in range(len(infer_imgs)):
            pred_class_prob.append(sorted_dists[i][mask[i]][:top_k].tolist())
            pred_label_list.append(
                self.labels[sorted_idxs[i]][mask[i]][:top_k].tolist())
            pred_class_sku.append(
                [self.idx2sku[idx] for idx in pred_label_list[i]])
            pred_class_name.append(
                [self.sku2name[idx] for idx in pred_class_sku[i]])

        result = []  # list of dict for each image
        for i in range(len(infer_imgs)):
            cur_img_result = {
                'name': pred_class_name[i],
                'prob': pred_class_prob[i],
                'sku': pred_class_sku[i]
            }
            result.append(cur_img_result)

        return result
    def retrain(self, img_paths_list, class_name, sku):

        self.labelID += 1

        infer_imgs = []
        for p in img_paths_list:
            infer_imgs += [
                transforms.ToTensor()(im) for im in image_enforce(p)
            ]
        X = torch.stack(infer_imgs)

        # load model
        model = ProtoNet().cpu()
        model.load_state_dict(torch.load(self.model_path, map_location='cpu'))
        model.eval()

        # compute new prototype
        model_output = model(X)  # [batch_size,128]
        batch_prototype = model_output.mean(0)
        batch_prototype = batch_prototype.unsqueeze(0)

        # whether fail to map to a distinguishing emmbedding
        threshold = 0.0
        dists = euclidean_dist(
            batch_prototype.to('cpu'),
            self.prototypes.to('cpu'))  # [batch_size,num_classes]
        min_dist = torch.min(dists).item()
        if min_dist < threshold:
            index = np.argmin(dists)
            sim_lblid = self.labels[index]
            info = {
                'msg': 'fail',
                'similar_object_name': self.sku2name[self.idx2sku[sim_lblid]],
                'similar_object_sku': self.idx2sku[sim_lblid]
            }
            return info

        # add new class info
        self.prototypes = torch.cat([self.prototypes, batch_prototype], 0)
        self.labels = np.concatenate((self.labels, [self.labelID]), axis=0)
        self.idx2sku[self.labelID] = sku
        self.sku2name[sku] = class_name

        info = {'msg': 'success'}
        return info
예제 #10
0
from PIL import Image
import os
import pickle


def get_embd(x, model):
    '''
    Test the model trained with the prototypical learning algorithm
    '''
    #    device = 'cuda:0' if torch.cuda.is_available() and opt.cuda else 'cpu'
    #     x, y = x.to(device), y.to(device)
    out = model(x)
    return (out)


model = ProtoNet()


def load_img(path):
    x = Image.open(path).convert('RGB')
    x = x.resize((28, 28))
    shape = 3, x.size[0], x.size[1]
    x = np.array(x, np.float32, copy=False)
    x = 1.0 - torch.from_numpy(x)
    x = x.transpose(0, 1).contiguous().view(shape)
    return x


img = load_img('/home/pallav_soni/dumm.jpeg')
img = torch.unsqueeze(img, 0)
#print(img.size())
예제 #11
0
        noise = torch.empty(x.shape).normal_(mean=0, std=1).to(self.device)
        x = x + noise

        out = self.fc1(x)
        out = self.relu(out)
        out = self.fc2(out)
        out = self.relu(out)
        out = self.fc3(out)
        out = self.relu(out)
        return out


# In[30]:

device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = ProtoNet()
model = model.to(device)

generator = Halliculator(featdim=1600, device=device)
generator = generator.to(device)

# In[31]:

# x, y = next(iter(train_dataloader))
# x = x.to(device)
# y = y.to(device)
# out = model(x)
# generator(out)
# ps, qs = get_proto_query(out.cpu(), y.cpu(), n_aug=1, n_support=1)

# In[32]:
예제 #12
0
    parser.add_argument('-gpu_id', type=int, nargs='+', default=0)
    parser.add_argument('-distance',
                        type=str,
                        help='cosine or euclidean',
                        default='euclidean')
    return parser


# PARAMS
opts = get_basic_parser(get_parser()).parse_args()
opts.method = 'proto'
setup(opts)

# CREATE MODEL
net = ProtoNet().to(opts.device)

# RESUME (fixme with appropriate epoch and iter)
if os.path.exists(opts.model_file):
    print_log(
        'loading previous best checkpoint [{}] ...'.format(opts.model_file),
        opts.log_file)
    net.load_state_dict(torch.load(opts.model_file))

if opts.multi_gpu:
    print_log('Wrapping network into multi-gpu mode ...', opts.log_file)
    net = torch.nn.DataParallel(net)

# PREPARE DATA
train_db, val_db, test_db, _ = data_loader(opts)
예제 #13
0
# In[5]:

# train_dataset = MiniDataset(csv_path, data_dir)
# val_dataset = MiniDataset(val_csv_path, val_data_dir)

# train_sampler = BatchSampler(train_dataset.labels, N_way, sample_per_class, episodes)
# val_sampler = BatchSampler(val_dataset.labels, val_N_way, sample_per_class, episodes)

# train_dataloader = DataLoader(train_dataset, batch_sampler=train_sampler)
# val_dataloader = DataLoader(val_dataset, batch_sampler=val_sampler)

# In[6]:

device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = ProtoNet()
model = model.to(device)

# In[7]:

optim = torch.optim.Adam(params=model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optim,
                                            gamma=lr_gamma,
                                            step_size=lr_step)

# In[8]:

train_loss = []
train_acc = []
val_loss = []
val_acc = []
예제 #14
0
    N_query = args.N_query
    model_path = args.load
    test_csv = args.test_csv
    test_data_dir = args.test_data_dir
    testcase_csv = args.testcase_csv
    output_csv = args.output_csv

    test_dataset = MiniDataset(test_csv, test_data_dir)
    test_loader = DataLoader(test_dataset,
                             batch_size=N_way * (N_query + N_shot),
                             num_workers=3,
                             pin_memory=False,
                             worker_init_fn=worker_init_fn,
                             sampler=GeneratorSampler(testcase_csv))

    model = ProtoNet()
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    model.load_state_dict((torch.load(model_path, map_location=device)))

    prediction_results = predict(model, test_loader, N_way, N_shot)

    row_names = ["query{}".format(i) for i in range(1, 76)]
    row_names = ["episode_id"] + row_names

    with open(output_csv, 'w', newline='') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(row_names)

        for i, pred in enumerate(prediction_results):
            row = [i] + list(pred.numpy())
            writer.writerow(row)
예제 #15
0
def create_model():
    protonet_model = ProtoNet()
    if use_gpu:
        protonet_model = protonet_model.cuda()
    return protonet_model
예제 #16
0
from PIL import Image
import os
import pickle


def get_embd(x, model):
    '''
    Test the model trained with the prototypical learning algorithm
    '''
    #    device = 'cuda:0' if torch.cuda.is_available() and opt.cuda else 'cpu'
    #     x, y = x.to(device), y.to(device)
    out = model(x)
    return (out)


model = ProtoNet()
model_path = '/home/pallav_soni/pro/output/best_model.pth'
model.load_state_dict(torch.load(model_path))


def load_img(path):
    x = Image.open(path).convert('RGB')
    x = x.resize((28, 28))
    shape = 3, x.size[0], x.size[1]
    x = np.array(x, np.float32, copy=False)
    x = 1.0 - torch.from_numpy(x)
    x = x.transpose(0, 1).contiguous().view(shape)
    x = torch.unsqueeze(x, 0)
    return x