示例#1
0
def test():
    submit_path = config.submit + config.model_name + os.sep + config.description + os.sep + str(
        config.fold) + os.sep  #save submitted csv results
    weight_path = config.weights + config.model_name + os.sep + config.description + os.sep + str(
        config.fold) + os.sep
    csv_map = OrderedDict({'cls': [], 'label': [], 'probability': []})

    test_loader = DataLoader(customDataset(config.test_data, train=False),
                             batch_size=config.batch_size * 2,
                             shuffle=False,
                             pin_memory=True)

    model = get_net()
    model = DataParallel(model.cuda(), device_ids=config.gpus)
    checkpoint = torch.load(weight_path + 'model_best.pth.tar')
    model.load_state_dict(checkpoint['state_dict'])

    model.eval()
    with torch.no_grad():
        for index, (data, file_paths) in enumerate(test_loader):
            labels = [int(path.split('/')[-2]) for path in file_paths]
            data = Variable(data).cuda()
            output = model(data)
            smax = nn.Softmax(1)
            smax_out = smax(output)
            _, cls = torch.max(smax_out, 1)

            csv_map['cls'].extend(cls)
            csv_map['label'].extend(labels)
            for output in smax_out:
                prob = ";".join([str(i) for i in output.data.tolist()])
                csv_map['probability'].append(prob)
    result = pd.DataFrame(csv_map)
    result.to_csv(submit_path + 'submit.csv', index=False, header=None)
示例#2
0
class Person_Attribute(object):
    def __init__(self, weights="resnest50.pth"):
        self.device = torch.device("cuda")
        self.net = resnest50().to(self.device)
        self.net = DataParallel(self.net)
        self.weights = weights
        self.net.load_state_dict(torch.load(self.weights))

        TRAIN_MEAN = [0.485, 0.499, 0.432]
        TRAIN_STD = [0.232, 0.227, 0.266]
        self.transforms = transforms.Compose([
            transforms.ToCVImage(),
            transforms.Resize((128, 256)),
            transforms.ToTensor(),
            transforms.Normalize(TRAIN_MEAN, TRAIN_STD)
        ])

    def recog(self, img_path):
        img = cv2.imread(img_path)
        img = self.transforms(img)
        img = img.unsqueeze(0)

        with torch.no_grad():
            self.net.eval()
            img_input = img.to(self.device)
            outputs = self.net(img_input)
            results = []
            for output in outputs:
                output = torch.softmax(output, 1)
                output = np.array(output[0].cpu())
                label = np.argmax(output)
                score = output[label]
                results.append((label, score))
        return results
示例#3
0
    def __init__(self, model_name, batch_size, gpu_memory):
        super().__init__(batch_size, gpu_memory)
        if model_name in [
                'pt_vgg', 'pt_resnet', 'pt_inception', 'pt_densenet'
        ]:
            model = model_class_dict[model_name](pretrained=True)
            self.mean = np.reshape([0.485, 0.456, 0.406], [1, 3, 1, 1])
            self.std = np.reshape([0.229, 0.224, 0.225], [1, 3, 1, 1])
            model = DataParallel(model.cuda())
        else:
            model = model_class_dict[model_name]()
            if model_name in ['pt_post_avg_cifar10', 'pt_post_avg_imagenet']:
                # checkpoint = torch.load(model_path_dict[model_name])
                self.mean = np.reshape([0.485, 0.456, 0.406], [1, 3, 1, 1])
                self.std = np.reshape([0.229, 0.224, 0.225], [1, 3, 1, 1])
            else:
                model = DataParallel(model).cuda()
                checkpoint = torch.load(model_path_dict[model_name] + '.pth')
                self.mean = np.reshape([0.485, 0.456, 0.406], [1, 3, 1, 1])
                self.std = np.reshape([0.225, 0.225, 0.225], [1, 3, 1, 1])
                model.load_state_dict(checkpoint)
                model.float()
        self.mean, self.std = self.mean.astype(np.float32), self.std.astype(
            np.float32)

        model.eval()
        self.model = model
示例#4
0
def infer(model, rank=0):
    model = model.cuda()
    model = DataParallel(model)
    model.load_state_dict(torch.load(model_state_dict))
    model.eval()
    if rank == 0:
        print('preparing dataset...')
    data_iterator = DataIterator(coco_dir,
                                 resize=resize,
                                 max_size=max_size,
                                 batch_size=batch_size,
                                 stride=stride,
                                 training=training,
                                 dist=dist)
    if rank == 0:
        print('finish loading dataset!')

    results = []
    with torch.no_grad():
        for i, (data, ids, ratios) in enumerate(data_iterator, start=1):
            scores, boxes, classes = model(data)
            results.append([scores, boxes, classes, ids, ratios])
            if rank == 0:
                size = len(data_iterator.ids)
                msg = '[{:{len}}/{}]'.format(min(i * batch_size, size),
                                             size,
                                             len=len(str(size)))
                print(msg, flush=True)

    results = [torch.cat(r, dim=0) for r in zip(*results)]
    results = [r.cpu() for r in results]
示例#5
0
def predict_softmax(args):
    model, _ = create_model(args)
    if torch.cuda.device_count() > 1:
        model = DataParallel(model)
    model = model.cuda()

    model.eval()
    test_loader = get_test_loader(batch_size=args.val_batch_size,
                                  dev_mode=args.dev_mode)

    preds, scores, founds = [], [], []
    with torch.no_grad():
        for i, (x, found) in enumerate(test_loader):
            x = x.cuda()
            output = model(x, None, True)
            output = F.softmax(output, dim=1)
            score, pred = output.max(1)

            preds.append(pred.cpu())
            scores.append(score.cpu())
            founds.append(found)
            print('{}/{}'.format(args.batch_size * (i + 1), test_loader.num),
                  end='\r')

    preds = torch.cat(preds, 0).numpy()
    scores = torch.cat(scores, 0).numpy()
    founds = torch.cat(founds, 0).numpy()

    classes, stoi = get_classes(num_classes=args.num_classes,
                                start_index=args.start_index,
                                other=args.other)
    print(preds.shape)

    pred_labels = [classes[i] for i in preds]
    create_submission(args, pred_labels, scores, founds, args.sub_file)
    def test(self, samples_test, dir_test=settings.test, predict=None):
        if predict is None:
            predict = self.predict

        net = DataParallel(self.net).cuda()

        transforms = generator.TransformationsGenerator([])

        test_dataset = datasets.ImageDataset(samples_test, dir_test, transforms, test=True)
        test_dataloader = DataLoader(
            test_dataset,
            num_workers=10,
            batch_size=32
        )

        with tqdm(total=len(test_dataloader), leave=True, ascii=True) as pbar, torch.no_grad():
            net.eval()

            for images, ids in test_dataloader:
                masks_predictions = predict(net, images)

                pbar.set_description('Creating test predictions...')
                pbar.update()

                masks_predictions = masks_predictions.cpu().squeeze().numpy()

                for p, id in zip(masks_predictions, ids):
                    yield p, id
示例#7
0
class Feature_extract(object):
    def __init__(self):
        self.device = torch.device("cuda")
        self.model = resnet.resnet_face18(opt.use_se)
        self.model = DataParallel(self.model)
        self.model.load_state_dict(torch.load(opt.test_model_path))
        self.model.to(self.device)

        normalize = T.Normalize(mean=[0.485, 0.456, 0.406],
                                std=[0.229, 0.224, 0.225])
        self.transforms = T.Compose([T.ToTensor(), normalize])

    def feature_extract(self, img_path):
        img = Image.open(img_path)
        img = img.resize((112, 112))
        img = self.transforms(img)
        img = img.unsqueeze(0)
        with torch.no_grad():
            self.model.eval()
            data_input = img.to(self.device)
            feature = self.model(data_input)
        feature = np.array(feature.cpu())[0, :].tolist()
        vector = np.mat(feature)
        denom = np.linalg.norm(vector)
        return (np.array(feature) / denom).tolist()
示例#8
0
class DefaultPredictor:
    """
    Create a simple end-to-end predictor with the given config.
    The predictor takes an BGR image, resizes it to the specified resolution,
    runs the model and produces a dict of predictions.
    This predictor takes care of model loading and input preprocessing for you.
    If you'd like to do anything more fancy, please refer to its source code
    as examples to build and use the model manually.
    Attributes:
        metadata (Metadata): the metadata of the underlying dataset, obtained from
            cfg.DATASETS.TEST.
    Examples:
    .. code-block:: python
        pred = DefaultPredictor(cfg)
        inputs = cv2.imread("input.jpg")
        outputs = pred(inputs)
    """
    def __init__(self, cfg):
        self.cfg = cfg.clone()  # cfg can be modified by model
        model = build_model(self.cfg)
        self.model = DataParallel(model)
        self.model.cuda()
        self.model.eval()

        checkpointer = Checkpointer(self.model)
        checkpointer.load(cfg.MODEL.WEIGHTS)

        num_channels = len(cfg.MODEL.PIXEL_MEAN)
        self.mean = torch.tensor(cfg.MODEL.PIXEL_MEAN).view(
            1, num_channels, 1, 1)
        self.std = torch.tensor(cfg.MODEL.PIXEL_STD).view(
            1, num_channels, 1, 1)

    def __call__(self, original_image):
        """
        Args:
            original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
        Returns:
            predictions (np.ndarray): the output of the model
        """
        with torch.no_grad(
        ):  # https://github.com/sphinx-doc/sphinx/issues/4258
            # Apply pre-processing to image.
            # the model expects RGB inputs
            original_image = original_image[:, :, ::-1]
            image = cv2.resize(original_image,
                               tuple(self.cfg.INPUT.SIZE_TEST[::-1]),
                               interpolation=cv2.INTER_CUBIC)
            image = T.ToTensor()(image)[None]
            image.sub_(self.mean).div_(self.std)

            inputs = {
                "images": image,
            }
            pred_feat = self.model(inputs)
            # Normalize feature to compute cosine distance
            pred_feat = F.normalize(pred_feat)
            pred_feat = pred_feat.cpu().data.numpy()
            return pred_feat
    def create_model(self, depth, drop_ratio, net_mode, model_path):
        model = DataParallel(ResNet(depth, drop_ratio,
                                    net_mode)).to(self.device)
        load_state(model, None, None, model_path, True, False)

        model.eval()

        return model
示例#10
0
def extract_feature(model_path, backbone_net, face_scrub_path, megaface_path, batch_size=32, gpus='0', do_norm=False):

    if backbone_net == 'MobileFace':
        net = mobilefacenet.MobileFaceNet()
    elif backbone_net == 'Res50_IR':
        net = cbam.CBAMResNet_IR(50, feature_dim=args.feature_dim, mode='ir')
    elif backbone_net == 'SERes50_IR':
        net = cbam.CBAMResNet_IR(50, feature_dim=args.feature_dim, mode='se_ir')
    elif backbone_net == 'CBAMRes50_IR':
        net = cbam.CBAMResNet_IR(50, feature_dim=args.feature_dim, mode='cbam_ir')
    elif backbone_net == 'Res100_IR':
        net = cbam.CBAMResNet_IR(100, feature_dim=args.feature_dim, mode='ir')
    elif backbone_net == 'SERes100_IR':
        net = cbam.CBAMResNet_IR(100, feature_dim=args.feature_dim, mode='se_ir')
    elif backbone_net == 'CBAMRes100_IR':
        net = cbam.CBAMResNet_IR(100, feature_dim=args.feature_dim, mode='cbam_ir')
    else:
        print(args.backbone, ' is not available!')

    multi_gpus = False
    if len(gpus.split(',')) > 1:
        multi_gpus = True
    os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    net.load_state_dict(torch.load(model_path)['net_state_dict'])
    if multi_gpus:
        net = DataParallel(net).to(device)
    else:
        net = net.to(device)
    net.eval()

    transform = transforms.Compose([
        transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))  # range [0.0, 1.0] -> [-1.0,1.0]
    ])
    megaface_dataset = MegaFace(face_scrub_path, megaface_path, transform=transform)
    megaface_loader = torch.utils.data.DataLoader(megaface_dataset, batch_size=batch_size,
                                             shuffle=False, num_workers=12, drop_last=False)

    for data in megaface_loader:
        img, img_path= data[0].to(device), data[1]
        with torch.no_grad():
            output = net(img).data.cpu().numpy()

        if do_norm is False:
            for i in range(len(img_path)):
                abs_path = img_path[i] + '.feat'
                write_mat(abs_path, output[i])
            print('extract 1 batch...without feature normalization')
        else:
            for i in range(len(img_path)):
                abs_path = img_path[i] + '.feat'
                feat = output[i]
                feat = feat / np.sqrt((np.dot(feat, feat)))
                write_mat(abs_path, feat)
            print('extract 1 batch...with feature normalization')
    print('all images have been processed!')
class Person_Attribute(object):
    def __init__(self, weights="resnest50.pth"):
        self.device = torch.device("cuda")
        self.net = resnest50().to(self.device)
        self.net = DataParallel(self.net)
        self.weights = weights
        self.net.load_state_dict(torch.load(self.weights))

        TRAIN_MEAN = [0.485, 0.499, 0.432]
        TRAIN_STD = [0.232, 0.227, 0.266]
        self.size = (128, 256)
        self.transforms = transforms.Compose([
            transforms.ToCVImage(),
            transforms.Resize((128, 256)),
            transforms.ToTensor(),
            transforms.Normalize(TRAIN_MEAN, TRAIN_STD)
        ])

        self.mean = torch.tensor([0.485, 0.499, 0.432], dtype=torch.float32)
        self.std = torch.tensor([0.232, 0.227, 0.266], dtype=torch.float32)

        self.atts = [
            "gender", "age", "orientation", "hat", "glasses", "handBag",
            "shoulderBag", "backBag", "upClothing", "downClothing"
        ]

    def detect(self, img):
        #imgss = self.transforms(img)
        image = img.astype('uint8')
        image = cv2.resize(image, self.size, cv2.INTER_LINEAR)
        image = image.transpose(2, 0, 1)
        image = torch.from_numpy(image)
        image = image.float() / 255.0

        image = image.sub_(self.mean[:, None, None]).div_(self.std[:, None,
                                                                   None])

        image = image.unsqueeze(0)

        with torch.no_grad():
            self.net.eval()
            img_input = image.to(self.device)
            outputs = self.net(img_input)
            results = []
            for output in outputs:
                output = torch.softmax(output, 1)
                output = np.array(output[0].cpu())
                label = np.argmax(output)
                score = output[label]
                results.append((label, score))
            labels = [i[0] for i in results]
            dict_result = {}

            for att, label in zip(self.atts, labels):
                if label == -1:
                    continue
                dict_result.update({str(att): name_dict[att][label]})
        return dict_result
示例#12
0
class LoadEvalModel(object):
    def __init__(self, eval_backbone, resize_fn, world_size,
                 distributed_data_parallel, device):
        super(LoadEvalModel, self).__init__()
        self.eval_backbone = eval_backbone
        self.resize_fn = resize_fn
        self.save_output = misc.SaveOutput()

        if self.eval_backbone == "Inception_V3":
            self.res, mean, std = 299, [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
            self.model = InceptionV3(resize_input=False,
                                     normalize_input=False).to(device)
        elif self.eval_backbone == "SwAV":
            self.res, mean, std = 224, [0.485, 0.456,
                                        0.406], [0.229, 0.224, 0.225]
            self.model = torch.hub.load("facebookresearch/swav",
                                        "resnet50").to(device)
            hook_handles = []
            for name, layer in self.model.named_children():
                if name == "fc":
                    handle = layer.register_forward_pre_hook(self.save_output)
                    hook_handles.append(handle)
        else:
            raise NotImplementedError

        self.resizer = resize.build_resizer(mode=resize_fn, size=self.res)
        self.trsf = transforms.Compose([transforms.ToTensor()])
        self.mean = torch.Tensor(mean).view(1, 3, 1, 1).to("cuda")
        self.std = torch.Tensor(std).view(1, 3, 1, 1).to("cuda")

        if world_size > 1 and distributed_data_parallel:
            misc.make_model_require_grad(self.model)
            self.model = DDP(self.model,
                             device_ids=[device],
                             broadcast_buffers=True)
        elif world_size > 1 and distributed_data_parallel is False:
            self.model = DataParallel(self.model, output_device=device)
        else:
            pass

    def eval(self):
        self.model.eval()

    def get_outputs(self, x, quantize=False):
        if quantize:
            x = ops.quantize_images(x)
        else:
            x = x.detach().cpu().numpy().astype(np.uint8)
        x = ops.resize_images(x, self.resizer, self.trsf, self.mean, self.std)

        if self.eval_backbone == "Inception_V3":
            repres, logits = self.model(x)
        elif self.eval_backbone == "SwAV":
            logits = self.model(x)
            repres = self.save_output.outputs[0][0]
            self.save_output.clear()
        return repres, logits
示例#13
0
def test_large_img(args):
    # Setup Model
    #model = torch.load(args.model_path,map_location=lambda storage,loc: storage)
    #model = torch.load(args.model_path)
    #load model from model files(mode train on DataParallel)
    model = get_model(args.model_path.split('/')[-2], 5)
    state_dict = torch.load(args.model_path).state_dict()
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        name = k[7:]  #remove moudle
        new_state_dict[name] = v
    model.load_state_dict(new_state_dict)
    model = DataParallel(model.cuda(),
                         device_ids=[i for i in range(len(args.gpu))])

    model.cuda()
    model.eval()

    pred_labels_list = []
    for crop_scale in args.crop_scales:
        pred_labels_single = process_single_scale(args, model, crop_scale)
        pred_labels_list.append(pred_labels_single)
        color_mask = segmap(pred_labels_single)
        test_id = os.path.basename(args.img_path)[0]
        misc.imsave(
            os.path.join(args.tempdir,
                         "%s_temp_scale_%d.png" % (test_id, crop_scale)),
            color_mask)

    if (len(args.crop_scales) == 1):
        pred_labels = pred_labels_list[0]
    else:
        average_map = np.zeros_like(pred_labels_list[0])
        for i in range(average_map.shape[0]):
            for j in range(average_map.shape[1]):
                pre_list = []
                for index in range(len(args.crop_scales)):
                    pre_list.append(pred_labels_list[index][i][j])
                most_label = np.argmax(np.bincount(pre_list))
                average_map[i][j] = most_label
        pred_labels = average_map

    misc.imsave(args.out_path, np.asarray(pred_labels, dtype=np.uint8))
    color_mask = segmap(pred_labels)
    misc.imsave(args.vis_out_path, color_mask)

    if (args.img_label_path != None):
        gts = misc.imread(args.img_label_path)
        score, class_iou = scores(gts, pred_labels, n_class=n_classes)

        for k, v in score.items():
            print k, v

        for i in range(n_classes):
            print i, class_iou[i]
 def setARCFACE(self):
     model = resnet_face18(False)
     model = DataParallel(model).to(self.device)
     model.load_state_dict(
         torch.load(
             os.path.join(settings.BASE_DIR, 'static/') +
             'src/weights/resnet18_pretrain.pth'))
     # model.load_state_dict(torch.load(os.path.join(settings.BASE_DIR, 'static/') + 'src/weights/resnet18_KFace.pth'))
     model.eval()
     return model
def test_model(test_dataset):
    num_test = len(test_dataset)
    test_useful_end_idx = get_useful_end_idx(sequence_length, num_test)
    test_idx = []
    for i in test_useful_end_idx:
        for j in range(sequence_length):
            test_idx.append(i - j * srate)
    test_idx.reverse()
    test_loader = DataLoader(
        test_dataset,
        batch_size=test_batch_size,
        sampler=SeqSampler(test_dataset, test_idx),
        # sampler=test_idx,
        num_workers=0,
        pin_memory=False)
    model = res34_tcn()
    model = DataParallel(model)
    model.load_state_dict(torch.load(model_name))
    # model = model.module
    # model = DataParallel(model)

    if use_gpu:
        model = model.cuda()
    # model = DataParallel(model)
    # model = model.module

    model.eval()

    all_preds_s = []

    num = 0
    with torch.no_grad():
        for data in test_loader:
            num = num + 1
            inputs, _, kdatas = data
            if use_gpu:
                inputs = Variable(inputs.cuda())
                kdatas = Variable(kdatas.cuda())
            else:
                inputs = Variable(inputs)
                kdatas = Variable(kdatas)

            outputs_s = model.forward(inputs, kdatas)

            #outputs_s = outputs_s[-1, (sequence_length - 1):: sequence_length]
            outputs_s = outputs_s[-1]
            outputs_s = F.softmax(outputs_s, dim=-1)

            _, preds_s = torch.max(outputs_s.data, -1)

            for j in range(preds_s.shape[0]):
                all_preds_s.append(preds_s[j].data.item())

    return all_preds_s
示例#16
0
class MSG_GAN:

    def __init__(self, depth=7, latent_size=512, gen_dilation=1,
                 dis_dilation=1, use_spectral_norm=True, device=th.device("cpu")):
        """ constructor for the class """
        from torch.nn import DataParallel

        self.gen = Generator(depth, latent_size, dilation=gen_dilation,
                             use_spectral_norm=use_spectral_norm).to(device)
        self.dis = Discriminator(depth, latent_size, dilation=dis_dilation,
                                 use_spectral_norm=use_spectral_norm).to(device)

        # Create the Generator and the Discriminator
        if device == th.device("cuda"):
            self.gen = DataParallel(self.gen)
            self.dis = DataParallel(self.dis)

        # state of the object
        self.latent_size = latent_size
        self.depth = depth
        self.device = device

        # by default the generator and discriminator are in eval mode
        self.gen.eval()
        self.dis.eval()

    def optimize_discriminator(self, dis_optim, noise, real_batch, loss_fn):

        # generate a batch of samples
        fake_samples = self.gen(noise)
        fake_samples = list(map(lambda x: x.detach(), fake_samples))

        loss = loss_fn.dis_loss(real_batch, fake_samples)

        # optimize discriminator
        dis_optim.zero_grad()
        loss.backward(retain_graph=True)
        dis_optim.step()

        return loss.item()

    def optimize_generator(self, gen_optim, noise, real_batch, loss_fn):

        # generate a batch of samples
        fake_samples = self.gen(noise)

        loss = loss_fn.gen_loss(real_batch, fake_samples)

        # optimize discriminator
        gen_optim.zero_grad()
        loss.backward(retain_graph=True)
        gen_optim.step()

        return loss.item()
示例#17
0
    def create_model(self, depth, drop_ratio, net_mode, model_path, head):
        model = DataParallel(ResNet(depth, drop_ratio,
                                    net_mode)).to(self.device)
        head = DataParallel(head()).to(self.device)

        load_state(model, head, None, model_path, True)

        model.eval()
        head.eval()

        return model, head
示例#18
0
 def _set_model(self, model_weights_path):
     """
     A function which instantiates the model and loads the weights
     :param model_weights_path: str, path to the model weights
     :return: None
     """
     model = resnet_face18(False)
     model = DataParallel(model)
     model.load_state_dict(torch.load(model_weights_path, map_location=self.torch_device))
     model.to(self.torch_device)
     model.eval()
     self.model = model
def load_generator(
    path: Path, channels: int, img_size: int, n_classes: int, latent_dim: int
) -> nn.Module:
    model = DataParallel(
        Generator(
            n_channels=channels, depth=9, n_classes=n_classes, latent_size=latent_dim
        )
    )
    model.load_state_dict(torch.load(path))
    model.eval()

    return model
def analyze_with_mean_inputs(model, input_shape, data_loader, device,
                             split_name, method, dim, model_type,
                             output_file_path):
    if output_file_path is None:
        output_file_path = './{}_with_mean_inputs_by_{}.eps'.format(
            model_type, '{}_{}-dim'.format(method, dim))

    file_util.make_parent_dirs(output_file_path)
    model = model.module if isinstance(model, DataParallel) else model
    input_batch = torch.rand(input_shape).unsqueeze(0).to(device)
    module_wrap_util.wrap_decomposable_modules(model,
                                               RepresentationWrapper,
                                               input_batch,
                                               method=method,
                                               dim=dim)
    if device.type == 'cuda':
        model = DataParallel(model)

    model.eval()
    accumulated_tensor_dict = dict()
    with torch.no_grad():
        print('Computing mean inputs ...')
        for batch_idx, (sample_batch, targets) in enumerate(data_loader):
            for x, y in zip(sample_batch, targets):
                class_label = y.item()
                if class_label not in accumulated_tensor_dict:
                    accumulated_tensor_dict[class_label] = [x, 1]
                else:
                    accumulated_tensor_dict[class_label][0] += x
                    accumulated_tensor_dict[class_label][1] += 1

        mean_input_list = list()
        for y, (x, num_samples) in accumulated_tensor_dict.items():
            mean_x = x / num_samples
            mean_input_list.append(mean_x)

        mean_batch = torch.stack(mean_input_list)
        print('Analyzing layer-wise discriminability ...')
        preds = model(mean_batch)

    transformed_output_list = list()
    name_list = list()
    extract_transformed_outputs(model, transformed_output_list, name_list)
    xs = list(range(len(name_list)))
    discriminabilities = assess_discriminabilities(transformed_output_list)
    plt.plot(xs, discriminabilities, label=method)
    plt.xticks(xs, name_list, rotation=90)
    plt.xlabel('Layer')
    plt.ylabel('Discriminability')
    plt.title(split_name)
    plt.legend()
    plt.savefig(output_file_path)
    plt.show()
示例#21
0
def test(args):
    model = Rockfish.load_from_checkpoint(checkpoint_path=args.checkpoint)
    model.freeze()

    test_ds = Fast5Data(args.test_path, args.recursive, args.reseg_path,
                    args.norm_method, args.motif, args.sample_size, args.window)

    if args.n_workers > 0:
        test_dl = DataLoader(test_ds, batch_size=args.batch_size,
                        num_workers=args.n_workers, pin_memory=True,
                        worker_init_fn=worker_init_fn,
                        prefetch_factor=args.prefetch_factor)
    else:
        test_dl = DataLoader(test_ds, batch_size=args.batch_size,
                        num_workers=args.n_workers, pin_memory=True,
                        worker_init_fn=worker_init_fn)

    n_gpus = torch.cuda.device_count()
    if n_gpus > 0:
        model = DataParallel(model, device_ids=list(range(n_gpus)))
        model.to(f'cuda:{model.device_ids[0]}')

    model.eval()

    output_queue = mp.Queue()
    consumers = []
    abs_out_path = str(args.out_path.absolute())
    for i in range(args.output_workers):
        worker_path = TMP_PATH.format(final=abs_out_path, id=i)
        process = Process(target=output_worker, args=(worker_path, output_queue))
        process.start()

        consumers.append(process)

    with torch.no_grad():
        for info, sig, k_mer in tqdm(test_dl):
            pred = model(sig, k_mer).squeeze(-1)
            pred = pred.cpu().numpy()

            output_queue.put((info, pred))

    for _ in range(len(consumers)):
        output_queue.put(None)
    for c in consumers:
        c.join()

    with args.out_path.open('w') as out:
        for i in range(len(consumers)):
            worker_path = TMP_PATH.format(final=abs_out_path, id=i)
            with open(worker_path, 'r') as tmp_f:
                out.write(tmp_f.read())
            os.remove(worker_path)
示例#22
0
def create_feature_model(args):
    args.predict = True
    cls_model, _ = create_model(args)
    model = FeatureNetV1(args.backbone, cls_model=cls_model)
    
    if torch.cuda.device_count() > 1:
        model_name = model.name
        model = DataParallel(model)
        model.name = model_name
    model = model.cuda()

    model.eval()
    return model
示例#23
0
def main():
    args = get_parser()
    with open(args.cfg_path) as f:
        cfg = json.load(f)
    model = MODELS[cfg['model']](num_nodes=cfg['grid_size'], use_crf=cfg['use_crf'])
    model = DataParallel(model, device_ids=None)
    checkpoint = torch.load(args.load_path)
    model.load_state_dict(checkpoint['state_dict'])
    model = model.cuda()
    model.eval()
    x = torch.ones((2,4,3,224,224))
    y = model(x)
    print(y.size())
示例#24
0
def main():
    opt = Config(os.getcwd())
    if opt.backbone == 'resnet18':
        model = resnet_face18(opt.use_se)
    elif opt.backbone == 'resnet34':
        model = resnet34()
    elif opt.backbone == 'resnet50':
        model = resnet50()

    model = DataParallel(model)
    # load_model(model, opt.test_model_path)
    model.load_state_dict(
        torch.load(opt.test_model_path, map_location={'cuda:0': 'cpu'}))
    model.to(torch.device(device))
    model.eval()
    global args

    train_dataset = Dataset(opt.train_root,
                            opt.train_list,
                            phase='train',
                            input_shape=opt.input_shape)
    trainloader = data.DataLoader(train_dataset,
                                  batch_size=opt.train_batch_size,
                                  shuffle=True,
                                  num_workers=opt.num_workers)

    # centroid_map = create_centroid(model, trainloader)

    test_dataset = Dataset(opt.test_root,
                           opt.test_list,
                           phase='test',
                           input_shape=opt.input_shape)
    test_loader = data.DataLoader(
        test_dataset,
        batch_size=1000,
        # batch_size=opt.test_batch_size,
        shuffle=True,
        num_workers=opt.num_workers)

    for x, y in test_loader:

        latent_vecs = model(x)
        print(latent_vecs.shape, y.shape)
        target = y
        plot3d_tsne(
            latent_vecs,
            target,
        )
        show_umap(latent_vecs, target)
        t_sne(latent_vecs, target)
示例#25
0
def extract_feature(model_path, backbone_net, face_scrub_path, megaface_path, batch_size=1024, gpus='0', do_norm=False):
    # gpu init
    multi_gpus = False
    if len(gpus.split(',')) > 1:
        multi_gpus = True
    os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # backbone
    backbones = {'MobileFaceNet': MobileFacenet(),
                 'ResNet50_IR': CBAMResNet(50, feature_dim=args.feature_dim, mode='ir'),
                 'SEResNet50_IR': CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se'),
                 'ResNet100_IR': CBAMResNet(100, feature_dim=args.feature_dim, mode='ir'),
                 'SEResNet100_IR': CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se')}
    if backbone_net in backbones:
        net = backbones[backbone_net]
    else:
        print(backbone_net + ' is not available!')

    # load parameter
    net.load_state_dict(torch.load(model_path))

    if multi_gpus == True:
        net = DataParallel(net).to(device)
    else:
        net = net.to(device)
    net.eval()

    # dataset and dataloader
    megaface_dataset = MegaFace(face_scrub_path, megaface_path)
    megaface_dataloader = DataLoader(megaface_dataset, batch_size=batch_size, shuffle=False, num_workers=12, drop_last=False)

    for data in megaface_dataloader:
        img, img_path= data[0].to(device), data[1]
        with torch.no_grad():
            output = net(img).data.cpu().numpy()

        if do_norm is False:
            for i in range(len(img_path)):
                abs_path = img_path[i] + '.feat'
                write_mat(abs_path, output[i])
            print('extract 1 batch...without feature normalization')
        else:
            for i in range(len(img_path)):
                abs_path = img_path[i] + '.feat'
                feat = output[i]
                feat = feat / np.sqrt((np.dot(feat, feat)))
                write_mat(abs_path, feat)
            print('extract 1 batch...with feature normalization')
    print('all images have been processed!')
示例#26
0
class LungDetection(object):
    def __init__(self, model_path):
        max_stride = 16
        margin = 32
        stride = 4
        sidelen = 144
        pad_value = 170
        self.split_comber = split_combine.SplitComb(sidelen, max_stride,
                                                    stride, margin, pad_value)

        # detection net
        config1, nod_net, loss, get_pbb = res18.get_model()
        checkpoint = torch.load(model_path)
        nod_net.load_state_dict(checkpoint)
        self.get_pbb = get_pbb
        self.nod_net = DataParallel(nod_net).cuda()
        self.nod_net.eval()
        del nod_net

    @func_set_timeout(40)
    def prediction(self, imgs, coord, nzhw, spacing, endbox, batch=1):
        splitlist = list(range(0, len(imgs), batch))
        if splitlist[-1] != len(imgs):
            splitlist.append(len(imgs))

        def _run(start, end):
            with torch.no_grad():
                input = Variable(imgs[start:end]).cuda()
                inputcoord = Variable(coord[start:end]).cuda()
                output = self.nod_net(input, inputcoord)
                result = output.data.cpu().numpy()
                del input, inputcoord, output
                return result

        outputlist = [
            _run(start, end)
            for start, end in zip(splitlist[:-1], splitlist[1:])
        ]
        output = np.concatenate(outputlist, 0)

        output = self.split_comber.combine(output, nzhw=nzhw)
        thresh = -3
        pbb, _ = self.get_pbb(output, thresh, ismask=True)
        torch.cuda.empty_cache()
        pbb = nms(pbb, 0.05)
        nodule_df = pbb_to_df(pbb, spacing, endbox)
        nodule_df = nodule_df[nodule_df.probability > 0.25]
        return nodule_df, pbb
示例#27
0
    def loadModel(self,
                  model_path,
                  model_weights_path,
                  gpu_ids=(),
                  is_eval=True):
        print("load model uses CPU...")
        model = torch.load(model_path,
                           map_location=lambda storage, loc: storage)
        print("load weights uses CPU...")
        weights = torch.load(model_weights_path,
                             map_location=lambda storage, loc: storage)

        if hasattr(model, "module"):
            print("deal with dataparallel and extract module...")
            model = model.module
            from collections import OrderedDict
            new_state_dict = OrderedDict()
            for k, v in weights.items():
                name = k[7:]  # remove `module.`
                new_state_dict[name] = v
            weights = new_state_dict
            # load params

        model.load_state_dict(weights)
        if torch.cuda.is_available() and (len(gpu_ids) == 1):
            print("convert to GPU %s" % str(gpu_ids))
            model = model.cuda()
        elif torch.cuda.is_available() and (len(gpu_ids) > 1):
            print("convert to GPUs %s" % str(gpu_ids))
            model = DataParallel(model, gpu_ids).cuda()
        if is_eval:
            return model.eval()
        else:
            return model
示例#28
0
def greedy(stage=1):
    p = None
    m = DataParallel(ModelStack(1)).to(o.device)
    if stage > 1:
        p = DataParallel(ModelStack(stage - 1)).to(o.device)
        load(p, "save/01-10g.tar")
        p.eval()
        p.stage = stage - 1
        # init stage using stage-1
        a = change_key(p.module.m[-1].state_dict(), lambda x: f"m.0.{x}")
        load(m, a)
    train(m, p)
    # concat and save
    a = change_key(m.module.m[0].state_dict(), lambda x: f"m.{stage-1}." + x)
    if p:
        a.update(p.module.state_dict())
    torch.save(a, "save/01-10g.tar")
示例#29
0
def load_model(backbone, device_ids, test_model_path, use_se):
    if backbone == 'resnet18_finger':
        model = resnet.resnet18_finger(use_se)
    elif backbone == 'resnet18':
        model = resnet.resnet18(pretrained=False)
    elif backbone == 'resnet34':
        model = resnet.resnet34(pretrained=False)
    elif backbone == 'resnet50':
        model = resnet.resnet50(pretrained=False)
    if opt.multi_gpus:
        model = DataParallel(model, device_ids=device_ids)
    model.load_state_dict(torch.load(test_model_path))
    #model.to(torch.device("cuda"))
    if torch.cuda.is_available():
        model = model.cuda()
    model.eval()
    return model
示例#30
0
def test(best_roc, fold, device, val_loader, test_loader, val_d, test_df,
         meta_features, oof):
    best_model_path = model_dir + [
        file for file in os.listdir(model_dir)
        if str(round(best_roc, 3)) in file and "Fold" + str(fold) in file
    ][0]

    preds = torch.zeros((len(test_df), 1), dtype=torch.float32, device=device)
    # add meta feature from the csv file
    model = DataParallel(
        EfficientNetwork(1, args.arch, meta_features).to(device))
    model.load_state_dict(
        torch.load(best_model_path))  # Loading best model of this fold
    model.eval()  # switch model to the evaluation mode
    with torch.no_grad():
        # Predicting on validation set once again to obtain data for OOF
        # print(f"-------------saving results to oof---------------")
        # val_preds = torch.zeros((len(val_d), 1), dtype=torch.float32, device=device)
        # for j, (x_val, y_val) in tqdm(enumerate(val_loader), total=len(val_loader)):
        #     y_val = y_val.to(device)
        #     if args.use_meta_features:
        #         l = x_val[0].shape[0]
        #     else:
        #         l = x_val.shape[0]
        #     z_val = model(x_val)
        #     val_pred = torch.sigmoid(z_val)
        #     val_preds[j * l:j * l + l] = val_pred
        # oof[val_idx] = val_preds.cpu().numpy()

        # Predicting on test set
        # tta_preds = torch.zeros((len(test_df), 1), dtype=torch.float32, device=device)
        for j in range(args.TTA):
            print(f"processing {j + 1}th TTA")
            for i, x_test in tqdm(enumerate(test_loader),
                                  total=len(test_loader)):
                if args.use_meta_features:
                    l = x_test[0].shape[0]
                else:
                    l = x_test.shape[0]
                z_test = model(x_test)
                z_test = torch.sigmoid(z_test)
                preds[i * test_loader.batch_size:i * test_loader.batch_size +
                      l] += z_test
    preds /= args.TTA
    return preds
示例#31
0
        total_loss = criterion(output, label)
        total_loss.backward()
        optimizer_ft.step()

        train_total_loss += total_loss.item() * batch_size
        total += batch_size

    train_total_loss = train_total_loss / total
    time_elapsed = time.time() - since
    loss_msg = '    total_loss: {:.4f} time: {:.0f}m {:.0f}s'\
        .format(train_total_loss, time_elapsed // 60, time_elapsed % 60)
    _print(loss_msg)

    # test model on lfw
    if epoch % TEST_FREQ == 0:
        net.eval()
        featureLs = None
        featureRs = None
        _print('Test Epoch: {} ...'.format(epoch))
        for data in testloader:
            for i in range(len(data)):
                data[i] = data[i].cuda()
            res = [net(d).data.cpu().numpy() for d in data]
            featureL = np.concatenate((res[0], res[1]), 1)
            featureR = np.concatenate((res[2], res[3]), 1)
            if featureLs is None:
                featureLs = featureL
            else:
                featureLs = np.concatenate((featureLs, featureL), 0)
            if featureRs is None:
                featureRs = featureR