Exemple #1
0
class Feature_extract(object):
    def __init__(self):
        self.device = torch.device("cuda")
        self.model = resnet.resnet_face18(opt.use_se)
        self.model = DataParallel(self.model)
        self.model.load_state_dict(torch.load(opt.test_model_path))
        self.model.to(self.device)

        normalize = T.Normalize(mean=[0.485, 0.456, 0.406],
                                std=[0.229, 0.224, 0.225])
        self.transforms = T.Compose([T.ToTensor(), normalize])

    def feature_extract(self, img_path):
        img = Image.open(img_path)
        img = img.resize((112, 112))
        img = self.transforms(img)
        img = img.unsqueeze(0)
        with torch.no_grad():
            self.model.eval()
            data_input = img.to(self.device)
            feature = self.model(data_input)
        feature = np.array(feature.cpu())[0, :].tolist()
        vector = np.mat(feature)
        denom = np.linalg.norm(vector)
        return (np.array(feature) / denom).tolist()
Exemple #2
0
def create_supervised_trainer_with_mask(model,
                                        optimizer,
                                        loss_fn,
                                        device=None):
    if device:
        if torch.cuda.device_count() > 1:
            model = DataParallel(model)
        model.to(device)

    def _update(engine, batch):
        model.train()
        optimizer.zero_grad()
        img, target, masks = batch
        img = img.to(device) if torch.cuda.device_count() >= 1 else img
        target = target.to(
            device) if torch.cuda.device_count() >= 1 else target
        score, feat = model(img, masks)
        loss, loss_dict = loss_fn(score, feat, target)
        loss.backward()
        optimizer.step()
        # compute acc
        acc = (score.max(1)[1] == target).float().mean()
        loss_dict['loss'] = loss.item()
        return acc.item(), loss_dict

    return Engine(_update)
Exemple #3
0
    def perturb(self, X_nat, y):
        """
		Given examples (X_nat, y), returns adversarial
		examples within epsilon of X_nat in l_infinity norm.
		"""
        if self.rand:
            X = X_nat + torch.distributions.uniform.Uniform(
                -self.epsilon, self.epsilon).sample()
        else:
            X = X_nat.clone().detach()

        def normalized_eval(x, model):
            normalized_x = torch.stack([F.normalize(x[i], [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) \
               for i in range(len(x))])
            return model(normalized_x)

        X_pgd = Variable(X.data, requires_grad=True)

        for i in range(self.k):
            print('local model attack iter:', i)
            grads_list = []
            loss_vals = []
            for model in self.models:
                # print('attacking model',  model.__class__.__name__)
                # zero gradient
                opt = optim.SGD([X_pgd], lr=1e-3)
                opt.zero_grad()
                model.cuda()  # set to gpu
                model = DataParallel(model)  # set parallel
                with torch.enable_grad():
                    logits = normalized_eval(X_pgd, model)
                    loss = -self.loss_fn(logits, y)
                loss.backward()
                loss_vals.append(loss.item())  # for logging
                grads_list.append(
                    X_pgd.grad.data.clone())  # for computing gradient
                # print success mask
                # success_mask = (logits.argmax(1) == y).float()
                # print(i, success_mask)
                # set model to cpu
                model.to('cpu')  # to cpu to avoid gpu memory overflow
                torch.cuda.empty_cache()  # clear cache
            # gradient averaging
            grads = torch.stack(grads_list)
            grad = torch.sum(grads, dim=0) / len(self.models)
            # update
            eta = self.a * grad.data.sign()
            X_pgd = Variable(X_pgd.data + eta, requires_grad=True)
            eta = torch.clamp(X_pgd.data - X.data, -self.epsilon,
                              self.epsilon)  # eta [-epsilon, epsilon] range
            X_pgd = Variable(X.data + eta, requires_grad=True)
            X_pgd = Variable(torch.clamp(X_pgd, 0, 1.0),
                             requires_grad=True)  # [0, 1] pixel range
            # logging
            print('loss: ', np.mean(loss_vals))
            torch.cuda.empty_cache()
            # break # TODO: delete

        return X_pgd.data.clone().detach()
Exemple #4
0
    def __init__(self, encoder, decoder,
                 data_loaders,
                 config):

        self.parse_config(config)
        self.config = config

        # Data Loaders
        self.data_loader = data_loaders
        self.train_loader = data_loaders['train_loader']
        self.valid_loader = data_loaders['valid_loader']
        self.test_loader = data_loaders['test_loader']

        self.metrics = [F.mse_loss, nll, kl]

        self.optimizer = torch.optim.Adam(lr=self.learning_rate,
                                          betas=self.learning_betas,
                                          params=list(encoder.parameters()) + list(decoder.parameters()))

        self.lr_scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer,
                                                            step_size=self.scheduler_stepsize,
                                                            gamma=self.scheduler_gamma)

        assert (torch.cuda.is_available() or self.gpu_id is None, "Cuda not available, running on GPU not possible.")

        if self.gpu_id == -1:
            self.device = torch.device(f'cuda:0')

            # Declare as parallel
            encoder = DataParallel(encoder)
            decoder = DataParallel(decoder)
        else:
            self.device = torch.device('cpu') if self.gpu_id is None else torch.device(f'cuda:{self.gpu_id}')

        # Move models to specific device in any way
        self.encoder = encoder.to(self.device)
        self.decoder = decoder.to(self.device)

        self.do_validation = True

        # Create unique foldername for current training run and save all there
        self.setup_save_dir(config)

        # Logging config
        setup_logging(self.log_path, self.logger_config_path)
        self.logger = logging.getLogger('trainer')
        self.writer = WriterTensorboardX(self.log_path, self.logger, True)

        # Used for early stopping and model saving
        self.best_validation_mse = inf

        # Parse Config and set model attributes
        self.rel_rec, self.rel_send = gen_fully_connected(self.n_atoms
                                                          , device=self.device)
Exemple #5
0
 def _set_model(self, model_weights_path):
     """
     A function which instantiates the model and loads the weights
     :param model_weights_path: str, path to the model weights
     :return: None
     """
     model = resnet_face18(False)
     model = DataParallel(model)
     model.load_state_dict(torch.load(model_weights_path, map_location=self.torch_device))
     model.to(self.torch_device)
     model.eval()
     self.model = model
Exemple #6
0
def test(args):
    model = Rockfish.load_from_checkpoint(checkpoint_path=args.checkpoint)
    model.freeze()

    test_ds = Fast5Data(args.test_path, args.recursive, args.reseg_path,
                    args.norm_method, args.motif, args.sample_size, args.window)

    if args.n_workers > 0:
        test_dl = DataLoader(test_ds, batch_size=args.batch_size,
                        num_workers=args.n_workers, pin_memory=True,
                        worker_init_fn=worker_init_fn,
                        prefetch_factor=args.prefetch_factor)
    else:
        test_dl = DataLoader(test_ds, batch_size=args.batch_size,
                        num_workers=args.n_workers, pin_memory=True,
                        worker_init_fn=worker_init_fn)

    n_gpus = torch.cuda.device_count()
    if n_gpus > 0:
        model = DataParallel(model, device_ids=list(range(n_gpus)))
        model.to(f'cuda:{model.device_ids[0]}')

    model.eval()

    output_queue = mp.Queue()
    consumers = []
    abs_out_path = str(args.out_path.absolute())
    for i in range(args.output_workers):
        worker_path = TMP_PATH.format(final=abs_out_path, id=i)
        process = Process(target=output_worker, args=(worker_path, output_queue))
        process.start()

        consumers.append(process)

    with torch.no_grad():
        for info, sig, k_mer in tqdm(test_dl):
            pred = model(sig, k_mer).squeeze(-1)
            pred = pred.cpu().numpy()

            output_queue.put((info, pred))

    for _ in range(len(consumers)):
        output_queue.put(None)
    for c in consumers:
        c.join()

    with args.out_path.open('w') as out:
        for i in range(len(consumers)):
            worker_path = TMP_PATH.format(final=abs_out_path, id=i)
            with open(worker_path, 'r') as tmp_f:
                out.write(tmp_f.read())
            os.remove(worker_path)
def main():
    opt = Config(os.getcwd())
    if opt.backbone == 'resnet18':
        model = resnet_face18(opt.use_se)
    elif opt.backbone == 'resnet34':
        model = resnet34()
    elif opt.backbone == 'resnet50':
        model = resnet50()

    model = DataParallel(model)
    # load_model(model, opt.test_model_path)
    model.load_state_dict(
        torch.load(opt.test_model_path, map_location={'cuda:0': 'cpu'}))
    model.to(torch.device(device))
    model.eval()
    global args

    train_dataset = Dataset(opt.train_root,
                            opt.train_list,
                            phase='train',
                            input_shape=opt.input_shape)
    trainloader = data.DataLoader(train_dataset,
                                  batch_size=opt.train_batch_size,
                                  shuffle=True,
                                  num_workers=opt.num_workers)

    # centroid_map = create_centroid(model, trainloader)

    test_dataset = Dataset(opt.test_root,
                           opt.test_list,
                           phase='test',
                           input_shape=opt.input_shape)
    test_loader = data.DataLoader(
        test_dataset,
        batch_size=1000,
        # batch_size=opt.test_batch_size,
        shuffle=True,
        num_workers=opt.num_workers)

    for x, y in test_loader:

        latent_vecs = model(x)
        print(latent_vecs.shape, y.shape)
        target = y
        plot3d_tsne(
            latent_vecs,
            target,
        )
        show_umap(latent_vecs, target)
        t_sne(latent_vecs, target)
Exemple #8
0
def send_to_cuda(device, model):
    if isinstance(device, tuple):
        model = DataParallel(model, device)
        model = model.to(device[0])
    else:
        model = model.cuda(device)

    return model
def extract_feature(model_path, backbone_net, face_scrub_path, megaface_path, batch_size=32, gpus='0', do_norm=False):

    if backbone_net == 'MobileFace':
        net = mobilefacenet.MobileFaceNet()
    elif backbone_net == 'Res50_IR':
        net = cbam.CBAMResNet_IR(50, feature_dim=args.feature_dim, mode='ir')
    elif backbone_net == 'SERes50_IR':
        net = cbam.CBAMResNet_IR(50, feature_dim=args.feature_dim, mode='se_ir')
    elif backbone_net == 'CBAMRes50_IR':
        net = cbam.CBAMResNet_IR(50, feature_dim=args.feature_dim, mode='cbam_ir')
    elif backbone_net == 'Res100_IR':
        net = cbam.CBAMResNet_IR(100, feature_dim=args.feature_dim, mode='ir')
    elif backbone_net == 'SERes100_IR':
        net = cbam.CBAMResNet_IR(100, feature_dim=args.feature_dim, mode='se_ir')
    elif backbone_net == 'CBAMRes100_IR':
        net = cbam.CBAMResNet_IR(100, feature_dim=args.feature_dim, mode='cbam_ir')
    else:
        print(args.backbone, ' is not available!')

    multi_gpus = False
    if len(gpus.split(',')) > 1:
        multi_gpus = True
    os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    net.load_state_dict(torch.load(model_path)['net_state_dict'])
    if multi_gpus:
        net = DataParallel(net).to(device)
    else:
        net = net.to(device)
    net.eval()

    transform = transforms.Compose([
        transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))  # range [0.0, 1.0] -> [-1.0,1.0]
    ])
    megaface_dataset = MegaFace(face_scrub_path, megaface_path, transform=transform)
    megaface_loader = torch.utils.data.DataLoader(megaface_dataset, batch_size=batch_size,
                                             shuffle=False, num_workers=12, drop_last=False)

    for data in megaface_loader:
        img, img_path= data[0].to(device), data[1]
        with torch.no_grad():
            output = net(img).data.cpu().numpy()

        if do_norm is False:
            for i in range(len(img_path)):
                abs_path = img_path[i] + '.feat'
                write_mat(abs_path, output[i])
            print('extract 1 batch...without feature normalization')
        else:
            for i in range(len(img_path)):
                abs_path = img_path[i] + '.feat'
                feat = output[i]
                feat = feat / np.sqrt((np.dot(feat, feat)))
                write_mat(abs_path, feat)
            print('extract 1 batch...with feature normalization')
    print('all images have been processed!')
Exemple #10
0
def loadModel(data_root, file_list, backbone_net, gpus='0', resume=None):

    if backbone_net == 'MobileFace':
        net = mobilefacenet.MobileFaceNet()
    elif backbone_net == 'Res50_IR':
        net = cbam.CBAMResNet_IR(50, feature_dim=args.feature_dim, mode='ir')
    elif backbone_net == 'SERes50_IR':
        net = cbam.CBAMResNet_IR(50,
                                 feature_dim=args.feature_dim,
                                 mode='se_ir')
    elif backbone_net == 'CBAMRes50_IR':
        net = cbam.CBAMResNet_IR(50,
                                 feature_dim=args.feature_dim,
                                 mode='cbam_ir')
    elif backbone_net == 'Res100_IR':
        net = cbam.CBAMResNet_IR(100, feature_dim=args.feature_dim, mode='ir')
    elif backbone_net == 'SERes100_IR':
        net = cbam.CBAMResNet_IR(100,
                                 feature_dim=args.feature_dim,
                                 mode='se_ir')
    elif backbone_net == 'CBAMRes100_IR':
        net = cbam.CBAMResNet_IR(100,
                                 feature_dim=args.feature_dim,
                                 mode='cbam_ir')
    else:
        print(args.backbone, ' is not available!')

    # gpu init
    multi_gpus = False
    if len(gpus.split(',')) > 1:
        multi_gpus = True
    os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    net.load_state_dict(torch.load(resume)['net_state_dict'])

    if multi_gpus:
        net = DataParallel(net).to(device)
    else:
        net = net.to(device)

    transform = transforms.Compose([
        transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
        transforms.Normalize(mean=(0.5, 0.5, 0.5),
                             std=(0.5, 0.5,
                                  0.5))  # range [0.0, 1.0] -> [-1.0,1.0]
    ])
    cfp_dataset = CFP_FP(data_root, file_list, transform=transform)
    cfp_loader = torch.utils.data.DataLoader(cfp_dataset,
                                             batch_size=128,
                                             shuffle=False,
                                             num_workers=4,
                                             drop_last=False)

    return net.eval(), device, cfp_dataset, cfp_loader
Exemple #11
0
def main():
    opt = Config(os.getcwd())
    if opt.backbone == 'resnet18':
        model = resnet_face18(opt.use_se)
    elif opt.backbone == 'resnet34':
        model = resnet34()
    elif opt.backbone == 'resnet50':
        model = resnet50()

    model = DataParallel(model)
    # load_model(model, opt.test_model_path)
    model.load_state_dict(
        torch.load(opt.test_model_path, map_location={'cuda:0': 'cpu'}))
    model.to(torch.device(device))
    model.eval()
    global args

    train_dataset = Dataset(opt.train_root,
                            opt.train_list,
                            phase='train',
                            input_shape=opt.input_shape)
    trainloader = data.DataLoader(train_dataset,
                                  batch_size=opt.train_batch_size,
                                  shuffle=True,
                                  num_workers=opt.num_workers)

    centroid_map = create_centroid(model, trainloader)

    test_dataset = Dataset(opt.test_root,
                           opt.test_list,
                           phase='test',
                           input_shape=opt.input_shape)
    test_loader = data.DataLoader(
        test_dataset,
        batch_size=1,
        # batch_size=opt.test_batch_size,
        shuffle=True,
        num_workers=opt.num_workers)

    estimate(model, test_loader, centroid_map)
Exemple #12
0
    def __init__(self, config, model, grouper, loss, metric, n_train_steps):
        # get metrics
        self.loss = loss
        logged_metrics = [
            self.loss,
        ]
        if metric is not None:
            self.metric = metric
            logged_metrics.append(self.metric)
        else:
            self.metric = None

        # initialize models, optimizers, and schedulers
        if not hasattr(self, 'optimizer') or self.optimizer is None:
            self.optimizer = initialize_optimizer(config, model)
        self.max_grad_norm = config.max_grad_norm
        scheduler = initialize_scheduler(config, self.optimizer, n_train_steps)

        if config.use_data_parallel:
            model = DataParallel(model)
        model.to(config.device)

        self.batch_idx = 0
        self.gradient_accumulation_steps = config.gradient_accumulation_steps

        # initialize the module
        super().__init__(
            device=config.device,
            grouper=grouper,
            logged_metrics=logged_metrics,
            logged_fields=['objective', 'percent_src_examples'],
            schedulers=[
                scheduler,
            ],
            scheduler_metric_names=[
                config.scheduler_metric_name,
            ],
            no_group_logging=config.no_group_logging,
        )
        self.model = model
Exemple #13
0
def load_model(base_dir,
               run_name,
               experiment_mode="",
               device=None,
               force_multiple_gpu=False):
    model_fname = get_model_fname(base_dir,
                                  run_name,
                                  experiment_mode=experiment_mode)

    checkpoint = torch.load(model_fname, map_location=device)
    hparams = checkpoint["hparams"]
    model_name = checkpoint.get("model_name", "v0")
    chosen_diseases = hparams["diseases"].split(",")
    train_resnet = hparams["train_resnet"]
    multiple_gpu = hparams.get("multiple_gpu", False)

    def extract_params(name):
        params = {}
        prefix = name + "_"
        for key, value in hparams.items():
            if key.startswith(prefix):
                key = key[len(prefix):]
                params[key] = value
        return params

    opt_params = extract_params("opt")

    # Load model
    model = init_empty_model(model_name, chosen_diseases, train_resnet)

    # NOTE: this force param has to be used for cases when the hparam was not saved
    if force_multiple_gpu or multiple_gpu:
        model = DataParallel(model)

    if device:
        model = model.to(device)

    # Load optimizer
    opt_name = hparams["opt"]
    OptClass = optimizers.get_optimizer_class(opt_name)
    optimizer = OptClass(model.parameters(), **opt_params)

    model.load_state_dict(checkpoint["model_state_dict"])
    optimizer.load_state_dict(checkpoint["optimizer_state_dict"])

    # Load loss
    loss_name = hparams["loss"]
    loss_params = extract_params("loss")

    # TODO: make a class to hold all of these values (and avoid changing a lot of code after any change here)
    return model, model_name, optimizer, opt_name, loss_name, loss_params, chosen_diseases
Exemple #14
0
    def __init__(self, module, device_ids=None, dtype='float32'):
        from toys.parsers import parse_dtype

        super().__init__()

        if not isinstance(module, DataParallel):
            module = DataParallel(module, device_ids)

        self.device_ids = module.device_ids
        self.dtype = parse_dtype(dtype)
        self.module = module.to(self.dtype)
        self._train_mode = True

        self.train()
Exemple #15
0
def extract_feature(model_path, backbone_net, face_scrub_path, megaface_path, batch_size=1024, gpus='0', do_norm=False):
    # gpu init
    multi_gpus = False
    if len(gpus.split(',')) > 1:
        multi_gpus = True
    os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # backbone
    backbones = {'MobileFaceNet': MobileFacenet(),
                 'ResNet50_IR': CBAMResNet(50, feature_dim=args.feature_dim, mode='ir'),
                 'SEResNet50_IR': CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se'),
                 'ResNet100_IR': CBAMResNet(100, feature_dim=args.feature_dim, mode='ir'),
                 'SEResNet100_IR': CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se')}
    if backbone_net in backbones:
        net = backbones[backbone_net]
    else:
        print(backbone_net + ' is not available!')

    # load parameter
    net.load_state_dict(torch.load(model_path))

    if multi_gpus == True:
        net = DataParallel(net).to(device)
    else:
        net = net.to(device)
    net.eval()

    # dataset and dataloader
    megaface_dataset = MegaFace(face_scrub_path, megaface_path)
    megaface_dataloader = DataLoader(megaface_dataset, batch_size=batch_size, shuffle=False, num_workers=12, drop_last=False)

    for data in megaface_dataloader:
        img, img_path= data[0].to(device), data[1]
        with torch.no_grad():
            output = net(img).data.cpu().numpy()

        if do_norm is False:
            for i in range(len(img_path)):
                abs_path = img_path[i] + '.feat'
                write_mat(abs_path, output[i])
            print('extract 1 batch...without feature normalization')
        else:
            for i in range(len(img_path)):
                abs_path = img_path[i] + '.feat'
                feat = output[i]
                feat = feat / np.sqrt((np.dot(feat, feat)))
                write_mat(abs_path, feat)
            print('extract 1 batch...with feature normalization')
    print('all images have been processed!')
def loadModel(args, idx):
    if args.backbone_net[idx] == 'MobileFace':
        net = mobilefacenet.MobileFaceNet()
    elif args.backbone_net[idx] == 'CBAM_50':
        net = cbam.CBAMResNet(50, feature_dim=args.feature_dim[idx], mode='ir')
    elif args.backbone_net[idx] == 'CBAM_50_SE':
        net = cbam.CBAMResNet(50,
                              feature_dim=args.feature_dim[idx],
                              mode='ir_se')
    elif args.backbone_net[idx] == 'CBAM_100':
        net = cbam.CBAMResNet(100,
                              feature_dim=args.feature_dim[idx],
                              mode='ir')
    elif args.backbone_net[idx] == 'CBAM_100_SE':
        net = cbam.CBAMResNet(100,
                              feature_dim=args.feature_dim[idx],
                              mode='ir_se')
    elif args.backbone_net[idx] == 'CBAM_152':
        net = cbam.CBAMResNet(152,
                              feature_dim=args.feature_dim[idx],
                              mode='ir')
    elif args.backbone_net[idx] == 'CBAM_152_SE':
        net = cbam.CBAMResNet(152,
                              feature_dim=args.feature_dim[idx],
                              mode='ir_se')
    elif args.backbone_net[idx] == 'Attention_56':
        net = attention.ResidualAttentionNet_56(
            feature_dim=args.feature_dim[idx])
    else:
        net = None
        print(args.backbone_net[idx], ' is not available!')
        assert 1 == 0

    # gpu init
    multi_gpus = False
    # if len(args.gpus.split(',')) > 1:
    #     multi_gpus = True
    # os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
    # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    net.load_state_dict(torch.load(args.resume[idx])['net_state_dict'])

    if multi_gpus:
        net = DataParallel(net).to(device)
    else:
        net = net.to(device)

    return net.eval()
Exemple #17
0
def get_model(conf, num_class=10, data_parallel=True):
    name = conf['type']

    if name == 'mixnet_m':
        model = mixnet_m()
    elif name == 'dsq_mixnet_m':
        model = dsq_mixnet_m()
    else:
        raise NameError('no model named, %s' % name)
    if data_parallel:
        model = model.cuda()
        model = DataParallel(model)
    else:
        model = model.to(device)
    cudnn.benchmark = True
    return model
def loadModel(backbone_net, feature_dim, gpus, resume, root, dev_path, flip):
    if backbone_net == 'MobileFace':
        net = mobilefacenet.MobileFaceNet()
    elif backbone_net == 'CBAM_50':
        net = cbam.CBAMResNet(50, feature_dim=feature_dim, mode='ir')
    elif backbone_net == 'CBAM_50_SE':
        net = cbam.CBAMResNet(50, feature_dim=feature_dim, mode='ir_se')
    elif backbone_net == 'CBAM_100':
        net = cbam.CBAMResNet(100, feature_dim=feature_dim, mode='ir')
    elif backbone_net == 'CBAM_100_SE':
        net = cbam.CBAMResNet(100, feature_dim=feature_dim, mode='ir_se')
    elif backbone_net == 'CBAM_152':
        net = cbam.CBAMResNet(152, feature_dim=feature_dim, mode='ir')
    elif backbone_net == 'CBAM_152_SE':
        net = cbam.CBAMResNet(152, feature_dim=feature_dim, mode='ir_se')
    elif backbone_net == 'Attention_56':
        net = attention.ResidualAttentionNet_56(feature_dim=feature_dim)
    else:
        net = None
        print(backbone_net, ' is not available!')
        assert 1 == 0

    # gpu init
    multi_gpus = False
    if len(gpus.split(',')) > 1:
        multi_gpus = True
    os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    net.load_state_dict(torch.load(resume)['net_state_dict'])

    if multi_gpus:
        net = DataParallel(net).to(device)
    else:
        net = net.to(device)

    transform = transforms.Compose([
        transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
        transforms.Normalize(mean=(0.5, 0.5, 0.5),
                             std=(0.5, 0.5,
                                  0.5))  # range [0.0, 1.0] -> [-1.0,1.0]
    ])
    lfw_dataset = LFW(root, dev_path, transform=transform, flip=flip)
    lfw_loader = DataLoader(lfw_dataset, batch_size=1, shuffle=False)

    return net.eval(), device, lfw_dataset, lfw_loader
Exemple #19
0
def loadModel(data_root,
              file_list,
              backbone_net,
              gpus='0',
              model_para_path=None):
    # gpu init
    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, args.gpus))
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # backbone
    backbones = {
        'MobileFaceNet':
        MobileFacenet(),
        'ResNet50_IR':
        CBAMResNet(50, feature_dim=args.feature_dim, mode='ir'),
        'SEResNet50_IR':
        CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se'),
        'ResNet100_IR':
        CBAMResNet(100, feature_dim=args.feature_dim, mode='ir'),
        'SEResNet100_IR':
        CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se')
    }
    if backbone_net in backbones:
        net = backbones[backbone_net]
    else:
        print(backbone_net + ' is not available!')

    # load parameter
    net.load_state_dict(torch.load(model_para_path))

    if args.use_multi_gpus == True:
        net = DataParallel(net).to(device)
    else:
        net = net.to(device)

    # dataset and dataloader
    agedb_dataset = AgeDB30(data_root, file_list)
    agedb_loader = DataLoader(agedb_dataset,
                              batch_size=128,
                              shuffle=False,
                              num_workers=4,
                              drop_last=False)

    return net.eval(), device, agedb_dataset, agedb_loader
Exemple #20
0
 def __init__(self, model_name, dataset_name, batch_size=400):
     self.model_name = model_name
     self.batch_size = batch_size
     if dataset_name == 'imagenet':
         self.mean = np.reshape([0.485, 0.456, 0.406], [1, 3, 1, 1])
         self.std = np.reshape([0.229, 0.224, 0.225], [1, 3, 1, 1])
     elif dataset_name == 'cifar-10':
         self.mean = np.reshape([0.4914, 0.4822, 0.4465], [1, 3, 1, 1])
         self.std = np.reshape([0.2023, 0.1994, 0.2010], [1, 3, 1, 1])
     else:
         self.mean = np.reshape([0.491, 0.482, 0.446], [1, 3, 1, 1])
         self.std = np.reshape([0.202, 0.199, 0.201], [1, 3, 1, 1])
          
     if model_name in ['pt_vgg', 'pt_resnet', 'pt_inception', 'pt_densenet', 'pt_alexnet']:
         model = model_class_dict[model_name](pretrained=True)
         model = DataParallel(model.cuda())
     else:
         if model_name == 'vgg':
             model = model_class_dict[model_name]('VGG16')
         else:
             model = model_class_dict[model_name]()
         
         if dataset_name == 'cifar-10':            
             checkpoint = torch.load('../cifar-10/trainedModel/' + model_name + 'model.pth')
             checkpoint = checkpoint['net']
             new_checkpoint = OrderedDict()
             for k,v in checkpoint.items():
                 name = k[7:]
                 new_checkpoint[name] = v 
             model.load_state_dict(new_checkpoint)
         else:
             checkpoint = torch.load('../kaggle-cifar10/trainedModel/' + model_name + '.pth.tar')
             model.load_state_dict(checkpoint['state_dict'])
         model = model.to('cuda')
         model = DataParallel(model)
             
         # model.float()
     cudnn.benchmark = True
     self.mean, self.std = self.mean.astype(np.float32), self.std.astype(np.float32)
     model.eval()
     self.model = model
Exemple #21
0
    def run_once(self,
                 opt,
                 run_engine_opt,
                 log_dir,
                 prev_log_dir=None,
                 fold_idx=0):
        """Simply run the defined run_step of the related method once."""
        check_manual_seed(self.seed)

        log_info = {}
        if self.logging:
            # check_log_dir(log_dir)
            rm_n_mkdir(log_dir)

            tfwriter = SummaryWriter(log_dir=log_dir)
            json_log_file = log_dir + "/stats.json"
            with open(json_log_file, "w") as json_file:
                json.dump({}, json_file)  # create empty file
            log_info = {
                "json_file": json_log_file,
                "tfwriter": tfwriter,
            }

        ####
        loader_dict = {}
        for runner_name, runner_opt in run_engine_opt.items():
            loader_dict[runner_name] = self._get_datagen(
                opt["batch_size"][runner_name],
                runner_name,
                opt["target_info"]["gen"],
                nr_procs=runner_opt["nr_procs"],
                fold_idx=fold_idx,
            )
        ####
        def get_last_chkpt_path(prev_phase_dir, net_name):
            stat_file_path = prev_phase_dir + "/stats.json"
            with open(stat_file_path) as stat_file:
                info = json.load(stat_file)
            epoch_list = [int(v) for v in info.keys()]
            last_chkpts_path = "%s/%s_epoch=%d.tar" % (
                prev_phase_dir,
                net_name,
                max(epoch_list),
            )
            return last_chkpts_path

        # TODO: adding way to load pretrained weight or resume the training
        # parsing the network and optimizer information
        net_run_info = {}
        net_info_opt = opt["run_info"]
        for net_name, net_info in net_info_opt.items():
            assert inspect.isclass(net_info["desc"]) or inspect.isfunction(
                net_info["desc"]
            ), "`desc` must be a Class or Function which instantiate NEW objects !!!"
            net_desc = net_info["desc"]()

            # TODO: customize print-out for each run ?
            # summary_string(net_desc, (3, 270, 270), device='cpu')

            pretrained_path = net_info["pretrained"]
            if pretrained_path is not None:
                if pretrained_path == -1:
                    # * depend on logging format so may be broken if logging format has been changed
                    pretrained_path = get_last_chkpt_path(
                        prev_log_dir, net_name)
                    net_state_dict = torch.load(pretrained_path)["desc"]
                else:
                    chkpt_ext = os.path.basename(pretrained_path).split(
                        ".")[-1]
                    if chkpt_ext == "npz":
                        net_state_dict = dict(np.load(pretrained_path))
                        net_state_dict = {
                            k: torch.from_numpy(v)
                            for k, v in net_state_dict.items()
                        }
                    elif chkpt_ext == "tar":  # ! assume same saving format we desire
                        net_state_dict = torch.load(pretrained_path)["desc"]

                colored_word = colored(net_name, color="red", attrs=["bold"])
                print("Model `%s` pretrained path: %s" %
                      (colored_word, pretrained_path))

                # load_state_dict returns (missing keys, unexpected keys)
                net_state_dict = convert_pytorch_checkpoint(net_state_dict)
                load_feedback = net_desc.load_state_dict(net_state_dict,
                                                         strict=False)
                # * uncomment for your convenience
                print("Missing Variables: \n", load_feedback[0])
                print("Detected Unknown Variables: \n", load_feedback[1])

            # * extremely slow to pass this on DGX with 1 GPU, why (?)
            net_desc = DataParallel(net_desc)
            net_desc = net_desc.to("cuda")
            # print(net_desc) # * dump network definition or not?
            optimizer, optimizer_args = net_info["optimizer"]
            optimizer = optimizer(net_desc.parameters(), **optimizer_args)
            # TODO: expand for external aug for scheduler
            nr_iter = opt["nr_epochs"] * len(loader_dict["train"])
            scheduler = net_info["lr_scheduler"](optimizer)
            net_run_info[net_name] = {
                "desc": net_desc,
                "optimizer": optimizer,
                "lr_scheduler": scheduler,
                # TODO: standardize API for external hooks
                "extra_info": net_info["extra_info"],
            }

        # parsing the running engine configuration
        assert ("train" in run_engine_opt
                ), "No engine for training detected in description file"

        # initialize runner and attach callback afterward
        # * all engine shared the same network info declaration
        runner_dict = {}
        for runner_name, runner_opt in run_engine_opt.items():
            runner_dict[runner_name] = RunEngine(
                dataloader=loader_dict[runner_name],
                engine_name=runner_name,
                run_step=runner_opt["run_step"],
                run_info=net_run_info,
                log_info=log_info,
            )

        for runner_name, runner in runner_dict.items():
            callback_info = run_engine_opt[runner_name]["callbacks"]
            for event, callback_list, in callback_info.items():
                for callback in callback_list:
                    if callback.engine_trigger:
                        triggered_runner_name = callback.triggered_engine_name
                        callback.triggered_engine = runner_dict[
                            triggered_runner_name]
                    runner.add_event_handler(event, callback)

        # retrieve main runner
        main_runner = runner_dict["train"]
        main_runner.state.logging = self.logging
        main_runner.state.log_dir = log_dir
        # start the run loop
        main_runner.run(opt["nr_epochs"])

        print("\n")
        print("########################################################")
        print("########################################################")
        print("\n")
        return
Exemple #22
0
def main(verbose: int = 1,
         print_freq: int = 100,
         restore: Union[bool, str] = True,
         val_freq: int = 1,
         run_id: str = "model",
         dset_name: str = "memento_frames",
         model_name: str = "frames",
         freeze_until_it: int = 1000,
         additional_metrics: Mapping[str, Callable] = {'rc': rc},
         debug_n: Optional[int] = None,
         batch_size: int = cfg.BATCH_SIZE,
         require_strict_model_load: bool = False,
         restore_optimizer=True,
         optim_string='adam',
         lr=0.01) -> None:

    print("TRAINING MODEL {} ON DATASET {}".format(model_name, dset_name))

    ckpt_savedir = os.path.join(cfg.DATA_SAVEDIR, run_id, cfg.CKPT_DIR)
    print("Saving ckpts to {}".format(ckpt_savedir))
    logs_savepath = os.path.join(cfg.DATA_SAVEDIR, run_id, cfg.LOGDIR)
    print("Saving logs to {}".format(logs_savepath))
    utils.makedirs([ckpt_savedir, logs_savepath])
    last_ckpt_path = os.path.join(ckpt_savedir, "last_model.pth")

    device = utils.set_device()

    print('DEVICE', device)

    # model
    model = get_model(model_name, device)
    # print("model", model)
    model = DataParallel(model)

    # must call this before constructing the optimizer:
    # https://pytorch.org/docs/stable/optim.html
    model.to(device)

    # set up training
    # TODO better one?

    if optim_string == 'adam':
        optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    elif optim_string == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=lr,
                                    momentum=0.9,
                                    weight_decay=0.0001)
    else:
        raise RuntimeError(
            "Unrecognized optimizer string {}".format(optim_string))

    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=5,
                                                   gamma=0.1)
    # criterion = MemAlphaLoss(device=device)
    # criterion = MemMSELoss()
    # criterion = lambda x, y: MemMSELoss()(x, y) +
    # CaptionsLoss(device=device)(x, y)
    losses = {
        'mem_mse':
        MemMSELoss(device=device, weights=np.load("memento_weights.npy")),
        'captions':
        CaptionsLoss(device=device,
                     class_weights=cap_utils.get_vocab_weights())
    }

    initial_epoch = 0
    iteration = 0
    unfrozen = False

    if restore:
        ckpt_path = restore if isinstance(restore, str) else last_ckpt_path

        if os.path.exists(ckpt_path):

            print("Restoring weights from {}".format(ckpt_path))

            ckpt = torch.load(ckpt_path)
            utils.try_load_state_dict(model, ckpt['model_state_dict'],
                                      require_strict_model_load)

            if restore_optimizer:
                utils.try_load_optim_state(optimizer,
                                           ckpt['optimizer_state_dict'],
                                           require_strict_model_load)
            initial_epoch = ckpt['epoch']
            iteration = ckpt['it']
    else:
        ckpt_path = last_ckpt_path

    # dataset
    train_ds, val_ds, test_ds = get_dataset(dset_name)
    assert val_ds or test_ds

    if debug_n is not None:
        train_ds = Subset(train_ds, range(debug_n))
        test_ds = Subset(test_ds, range(debug_n))

    train_dl = DataLoader(train_ds,
                          batch_size=batch_size,
                          shuffle=True,
                          num_workers=cfg.NUM_WORKERS)
    test_dl = DataLoader(test_ds,
                         batch_size=batch_size,
                         shuffle=False,
                         num_workers=cfg.NUM_WORKERS)

    # training loop
    start = time.time()

    try:
        for epoch in range(initial_epoch, cfg.NUM_EPOCHS):
            logger = SummaryWriter(logs_savepath)

            # effectively puts the model in train mode.
            # Opposite of model.eval()
            model.train()

            print("Epoch {}".format(epoch))

            for i, (x, y_) in tqdm(enumerate(train_dl),
                                   total=len(train_ds) / batch_size):

                y: ModelOutput[MemModelFields] = ModelOutput(y_)
                iteration += 1

                if not unfrozen and iteration > freeze_until_it:
                    print("Unfreezing encoder")
                    unfrozen = True

                    for param in model.parameters():
                        param.requires_grad = True

                logger.add_scalar('DataTime', time.time() - start, iteration)

                x = x.to(device)
                y = y.to_device(device)

                out = ModelOutput(model(x, y.get_data()))
                loss_vals = {name: l(out, y) for name, l in losses.items()}
                # print("loss_vals", loss_vals)
                loss = torch.stack(list(loss_vals.values()))

                if verbose:
                    print("stacked loss", loss)
                loss = loss.sum()
                # loss = criterion(out, y)

                # I think this zeros out previous gradients (in case people
                # want to accumulate gradients?)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                # logging
                utils.log_loss(logger, loss, loss_vals, iteration)
                logger.add_scalar('ItTime', time.time() - start, iteration)
                start = time.time()

                # display metrics

            # do some validation

            if (epoch + 1) % val_freq == 0:
                print("Validating...")
                model.eval()  # puts model in validation mode
                val_iteration = iteration

                with torch.no_grad():

                    labels: Optional[ModelOutput[MemModelFields]] = None
                    preds: Optional[ModelOutput[MemModelFields]] = None
                    val_losses = []

                    for i, (x, y_) in tqdm(enumerate(test_dl),
                                           total=len(test_ds) / batch_size):
                        val_iteration += 1

                        y = ModelOutput(y_)
                        y_numpy = y.to_numpy()

                        labels = y_numpy if labels is None else labels.merge(
                            y_numpy)

                        x = x.to(device)
                        y = y.to_device(device)

                        out = ModelOutput(model(x, y.get_data()))
                        out_numpy = out.to_device('cpu').to_numpy()
                        preds = out_numpy if preds is None else preds.merge(
                            out_numpy)

                        loss_vals = {
                            name: l(out, y)
                            for name, l in losses.items()
                        }
                        loss = torch.stack(list(loss_vals.values())).sum()
                        utils.log_loss(logger,
                                       loss,
                                       loss_vals,
                                       val_iteration,
                                       phase='val')

                        val_losses.append(loss)

                    print("Calculating validation metric...")
                    # print("preds", {k: v.shape for k, v in preds.items()})
                    # assert False
                    metrics = {
                        fname: f(labels, preds, losses)
                        for fname, f in additional_metrics.items()
                    }
                    print("Validation metrics", metrics)

                    for k, v in metrics.items():
                        if isinstance(v, numbers.Number):
                            logger.add_scalar('Metric_{}'.format(k), v,
                                              iteration)

                    metrics['total_val_loss'] = sum(val_losses)

                    ckpt_path = os.path.join(
                        ckpt_savedir, utils.get_ckpt_path(epoch, metrics))
                    save_ckpt(ckpt_path, model, epoch, iteration, optimizer,
                              dset_name, model_name, metrics)

            # end of epoch
            lr_scheduler.step()

            save_ckpt(last_ckpt_path, model, epoch, iteration, optimizer,
                      dset_name, model_name)

    except KeyboardInterrupt:
        print('Got keyboard interrupt, saving model...')
        save_ckpt(last_ckpt_path, model, epoch, iteration, optimizer,
                  dset_name, model_name)
Exemple #23
0
def main(config_path, resumable=False):
    config = load_config(config_path)
    device = torch.device("cuda:0")

    model = create_model(UNet, config)
    if torch.cuda.device_count() > 1:
        print("Using DataParallel")
        model = DataParallel(model)
    model = model.to(device)
    opt = torch.optim.Adam(
        model.parameters(),
        lr=config.learning_rate,
        weight_decay=config.l2_reg_weight,
    )
    sched = torch.optim.lr_scheduler.MultiStepLR(opt, config.lr_milestones,
                                                 config.lr_step_gamma)
    grad_scaler = torch.cuda.amp.GradScaler()

    metric_checker = MetricImprovementChecker(MaxMetricTracker(-np.inf),
                                              MET_MCC)
    root_dir = config.run_dir
    snap_handler = SnapshotHandler(root_dir, model, opt, sched, metric_checker)
    resume = resumable and snap_handler.can_resume()
    if resume:
        print("Resuming")
    print(f"Initializing run dir: {root_dir}")
    train_summary, test_summary = init_run_dir(root_dir,
                                               config_path,
                                               resume=resume)

    last_epoch = 0
    if resume:
        (
            last_epoch,
            model,
            opt,
            sched,
            metric_checker,
        ) = snap_handler.load_full_snapshot()

    land_mask = torch.tensor(np.load(config.land_mask_path))
    #
    # Training data
    #
    train_ds = build_full_dataset_from_config(config, land_mask, True)
    #
    # Test Data
    #
    test_ds, test_input_ds, test_era_ds = build_full_dataset_from_config(
        config, land_mask, False)

    train_dataloader = torch.utils.data.DataLoader(
        train_ds,
        batch_size=config.train_batch_size,
        shuffle=True,
        drop_last=config.drop_last,
    )
    test_dataloader = torch.utils.data.DataLoader(
        test_ds,
        batch_size=config.test_batch_size,
        shuffle=False,
        drop_last=False,
    )
    if not resume:
        snap_handler.take_model_snapshot()
    try:
        for epoch in range(last_epoch, config.epochs):
            train_summary.add_scalar("learning_rate",
                                     next(iter(opt.param_groups))["lr"], epoch)
            train(
                model,
                device,
                grad_scaler,
                train_dataloader,
                opt,
                land_mask,
                train_summary,
                epoch,
                config,
            )
            loss, cm = test(
                model,
                device,
                grad_scaler,
                test_dataloader,
                opt,
                land_mask,
                test_summary,
                epoch,
                config,
            )
            if metric_checker.check(cm):
                snap_handler.take_model_snapshot()
            log_metrics(test_summary, cm, epoch)
            sched.step()
            if epoch % 3 == 0 and epoch != 0:
                snap_handler.take_full_snapshot(epoch)
    except KeyboardInterrupt:
        print("Exiting training loop")
    except Exception as e:
        print(f"\n{e}")
        raise e
    finally:
        train_summary.close()
        test_summary.close()
        # Free up data for GC
        train_ds = None
        train_dataloader = None

        # Validation
        val_dates = load_dates(config.test_date_map_path)
        if config.use_prior_day:
            val_dates = val_dates[1:]

        model = snap_handler.load_best_model()
        model.eval()
        # Create and save predictions for test data
        print("Generating predictions")
        test_loader = torch.utils.data.DataLoader(
            test_input_ds,
            batch_size=config.test_batch_size,
            shuffle=False,
            drop_last=False,
        )
        pred, raw_prob = get_predictions(test_loader, model, ~land_mask,
                                         LABEL_OTHER, device, config)
        predictions_path = os.path.join(root_dir, FNAME_PREDICTIONS)
        print(f"Saving predictions: '{predictions_path}'")
        np.save(predictions_path, pred)
        probabilities_path = os.path.join(root_dir, FNAME_PROBABILITIES)
        print(f"Saving probabilities: '{probabilities_path}'")
        np.save(probabilities_path, raw_prob)
        # Validate against ERA5
        print("Validating against ERA5")
        test_era_ds = dataset_to_array(test_era_ds).argmax(1).squeeze()
        era_acc = validate_against_era5(pred, test_era_ds, val_dates,
                                        land_mask)
        # Validate against AWS DB
        db = get_db_session(config.db_path)
        lon_grid = np.load(config.lon_grid_path)
        lat_grid = np.load(config.lat_grid_path)
        aws_acc = validate_against_aws(pred, db, val_dates, lon_grid, lat_grid,
                                       land_mask, config)
        db.close()
        # Write accuracies
        acc_file = os.path.join(root_dir, "acc.csv")
        write_accuracies_file(val_dates, era_acc, aws_acc, acc_file)
        print(f"Era Mean Acc: {era_acc.mean()}")
        print(f"AWS Mean Acc: {aws_acc.mean()}")
        add_plots_to_run_dir(root_dir, config.do_val_plots,
                             config.do_pred_plots)
Exemple #24
0
class img2poseModel:
    def __init__(
        self,
        depth,
        min_size,
        max_size,
        model_path=None,
        device=None,
        pose_mean=None,
        pose_stddev=None,
        distributed=False,
        gpu=0,
        threed_68_points=None,
        threed_5_points=None,
        rpn_pre_nms_top_n_test=6000,
        rpn_post_nms_top_n_test=1000,
        bbox_x_factor=1.1,
        bbox_y_factor=1.1,
        expand_forehead=0.3,
    ):
        self.depth = depth
        self.min_size = min_size
        self.max_size = max_size
        self.model_path = model_path
        self.distributed = distributed
        self.gpu = gpu

        if device is None:
            self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        else:
            self.device = device

        # create network backbone
        backbone = resnet_fpn_backbone(f"resnet{self.depth}", pretrained=False)

        if pose_mean is not None:
            pose_mean = torch.tensor(pose_mean)
            pose_stddev = torch.tensor(pose_stddev)

        if threed_68_points is not None:
            threed_68_points = torch.tensor(threed_68_points)

        if threed_5_points is not None:
            threed_5_points = torch.tensor(threed_5_points)

        # create the feature pyramid network
        self.fpn_model = FasterDoFRCNN(
            backbone,
            2,
            min_size=self.min_size,
            max_size=self.max_size,
            pose_mean=pose_mean,
            pose_stddev=pose_stddev,
            threed_68_points=threed_68_points,
            threed_5_points=threed_5_points,
            rpn_pre_nms_top_n_test=rpn_pre_nms_top_n_test,
            rpn_post_nms_top_n_test=rpn_post_nms_top_n_test,
            bbox_x_factor=bbox_x_factor,
            bbox_y_factor=bbox_y_factor,
            expand_forehead=expand_forehead,
        )

        # if using cpu, remove the parallel modules from the saved model
        self.fpn_model_without_ddp = self.fpn_model

        if self.distributed:
            self.fpn_model = self.fpn_model.to(self.device)
            self.fpn_model = DistributedDataParallel(
                self.fpn_model, device_ids=[self.gpu]
            )
            self.fpn_model_without_ddp = self.fpn_model.module

            print("Model will use distributed mode!")

        elif str(self.device) == "cpu":
            self.fpn_model = WrappedModel(self.fpn_model)
            self.fpn_model_without_ddp = self.fpn_model

            print("Model will run on CPU!")

        else:
            self.fpn_model = DataParallel(self.fpn_model)
            self.fpn_model = self.fpn_model.to(self.device)
            self.fpn_model_without_ddp = self.fpn_model

            print(f"Model will use {torch.cuda.device_count()} GPUs!")

        if self.model_path is not None:
            self.load_saved_model(self.model_path)
            self.evaluate()

    def load_saved_model(self, model_path):
        load_model(
            self.fpn_model_without_ddp, model_path, cpu_mode=str(self.device) == "cpu"
        )

    def evaluate(self):
        self.fpn_model.eval()

    def train(self):
        self.fpn_model.train()

    def run_model(self, imgs, targets=None):
        outputs = self.fpn_model(imgs, targets)

        return outputs

    def forward(self, imgs, targets):
        losses = self.run_model(imgs, targets)

        return losses

    def predict(self, imgs):
        assert self.fpn_model.training is False

        with torch.no_grad():
            predictions = self.run_model(imgs)

        return predictions
Exemple #25
0
        DEVICE = torch.device('cuda')
        device_ids = list(range(torch.cuda.device_count()))
        gpus = len(device_ids)
        print('GPU detected')
    else:
        DEVICE = torch.device("cpu")
        device_ids = -1
        print('No GPU. switching to CPU')

    model = 'textattack/albert-base-v2-MRPC'
    tokenizer = AutoTokenizer.from_pretrained(model)
    model = AutoModelForSequenceClassification.from_pretrained(model)
    if not device_ids == -1:
        print('Porting model to CUDA...')
        model = DP(model, device_ids=device_ids)
        model.to(f'cuda:{model.device_ids[0]}')
    model.eval()

    if args.dtypes is None:
        dtypes = ['mini'] if args.debug else ['validation', 'train']
        if not dataset.name == 'squad' and not args.debug:
            dtypes.append('test')
    else:
        dtypes = args.dtypes.split(',')
    print('Running KG builder for {} sets'.format(', '.join(dtypes)))

    results = []
    for dtype in dtypes:
        start_time = time()
        oie_fn = os.path.join(data_dir, 'oie_data', 'predictions_{}.json'.format(dtype))
        print('Loading open IE output for {} set...'.format(dtype))
Exemple #26
0
    fe_dict = get_feature_dict(identity_list, features)
    acc, th = test_performance(fe_dict, compair_list)
    print('lfw face verification accuracy: ', acc, 'threshold: ', th)
    return acc


if __name__ == '__main__':

    opt = Config()
    if opt.backbone == 'resnet18':
        model = resnet_face18(opt.use_se)
    elif opt.backbone == 'resnet34':
        model = resnet34()
    elif opt.backbone == 'resnet50':
        model = resnet50()

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    torch.manual_seed(1)

    model = DataParallel(model)
    # load_model(model, opt.test_model_path)
    model.load_state_dict(torch.load(opt.test_model_path))
    model.to(device)

    identity_list = get_lfw_list(opt.lfw_test_list)
    img_paths = [os.path.join(opt.lfw_root, each) for each in identity_list]

    model.eval()
    lfw_test(model, img_paths, identity_list, opt.lfw_test_list,
             opt.test_batch_size)
Exemple #27
0
def detect(save_img=False):
    out, source, weights, view_img, save_txt, imgsz = \
        opt.output, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size # 加载配置信息

    webcam = source == '0' or source.startswith('rtsp') or source.startswith(
        'http') or source.endswith('.txt')  # 判断测试的资源类型

    # Initialize
    device = torch_utils.select_device(opt.device)
    dir = "pic"

    # 创建输出文件夹
    if os.path.exists(out):
        shutil.rmtree(out)  # delete output folder
    os.makedirs(out)  # make new output folder
    # gpu是否支持半精度 提高性能
    half = device.type != 'cpu'  # half precision only supported on CUDA

    # Load model

    model = torch.load(weights,
                       map_location=device)['model'].float()  # load to FP32

    model.to(device).eval()

    arcface_model = resnet_face18(False)

    arcface_model = DataParallel(arcface_model)
    # load_model(model, opt.test_model_path)
    arcface_model.load_state_dict(torch.load('weights/resnet18_110.pth'),
                                  strict=False)
    arcface_model.to(torch.device("cuda")).eval()

    pred_model = AntiSpoofPredict(0)

    if half:
        model.half()  # to FP16

    features = get_featuresdict(arcface_model, dir)

    vid_path, vid_writer = None, None
    if webcam:
        view_img = True
        cudnn.benchmark = True  # set True to speed up constant image size inference
        dataset = LoadStreams(source, img_size=imgsz)
    else:
        # 图片和视频的加载
        save_img = True
        dataset = LoadImages(source, img_size=imgsz)
    view_img = True
    # Get names and colors 获得框框的类别名和颜色
    names = model.names if hasattr(model, 'names') else model.modules.names
    colors = [[random.randint(0, 255) for _ in range(3)]
              for _ in range(len(names))]

    # Run inference 推理过程
    t0 = time.time()

    img = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
    _ = model(img.half() if half else img
              ) if device.type != 'cpu' else None  # run once 模拟启动
    # 数据预处理
    for path, img, im0s, vid_cap in dataset:
        img = torch.from_numpy(img).to(device)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        if img.ndimension() == 3:
            img = img.unsqueeze(0)
        # Inference
        t1 = torch_utils.time_synchronized()
        pred = model(img, augment=opt.augment)[0]

        # Apply NMS 执行nms筛选boxes
        pred = non_max_suppression(pred,
                                   opt.conf_thres,
                                   opt.iou_thres,
                                   classes=opt.classes,
                                   agnostic=opt.agnostic_nms)
        t2 = torch_utils.time_synchronized()

        # Process detections
        for i, det in enumerate(pred):  # detections per image
            if webcam:  # batch_size >= 1
                p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
            else:
                p, s, im0 = path, '', im0s

            save_path = str(Path(out) / Path(p).name)
            s += '%gx%g ' % img.shape[2:]  # print string

            gn = torch.tensor(im0.shape)[[1, 0, 1,
                                          0]]  #  normalization gain whwh

            if det is not None and len(det):  # 假如预测到有目标(盒子存在)
                # Rescale boxes from img_size to im0 size 还原盒子在原图上的位置
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4],
                                          im0.shape).round()

                # Print results 打印box的结果信息
                for c in det[:, -1].unique():
                    n = (det[:, -1] == c).sum()  # detections per class
                    s += '%g %ss, ' % (n, names[int(c)])  # add to string

                # Write results
                for *xyxy, conf, cls in det:  # x1 y1 x2 y2 cls class
                    prediction = np.zeros((1, 3))
                    # crop
                    face_img = im0[int(xyxy[1]):int(xyxy[3]),
                                   int(xyxy[0]):int(xyxy[2])]
                    # rf_size
                    rf_img = cv2.resize(face_img, (80, 80))

                    # recognition
                    face_img = cv2.resize(face_img, (128, 128))

                    face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2GRAY)

                    face_img = np.dstack((face_img, np.fliplr(face_img)))

                    face_img = face_img.transpose((2, 0, 1))
                    face_img = face_img[:, np.newaxis, :, :]

                    face_img = face_img.astype(np.float32, copy=False)
                    face_img -= 127.5
                    face_img /= 127.5

                    face_data = torch.from_numpy(face_img)
                    face_data = face_data.to(torch.device("cuda"))

                    _output = arcface_model(face_data)  # 获取特征
                    _output = _output.data.cpu().numpy()

                    fe_1 = _output[0]
                    fe_2 = _output[1]

                    _feature = np.hstack((fe_1, fe_2))

                    label = "none"
                    list = os.listdir(dir)
                    max_f = 0
                    t = 0
                    for i, each in enumerate(list):
                        t = cosin_metric(features[each], _feature)
                        # print(each, t)
                        if t > max_f:
                            max_f = t
                            max_n = each
                        # print(max_n,max_f)
                        if (max_f > 0.44):
                            label = max_n[:-4]
                    if opt.open_rf:
                        # pred real or fack
                        for model_name in os.listdir(
                                "weights/anti_spoof_models"):
                            # print(model_test.predict(img, os.path.join(model_dir, model_name)))

                            prediction += pred_model.predict(
                                rf_img,
                                os.path.join("weights/anti_spoof_models",
                                             model_name))
                        rf_label = np.argmax(prediction)
                        value = prediction[0][rf_label] / 2
                        print(rf_label, value)
                        if rf_label == 1 and value > 0.90: label += "_real"
                        else: label += "_fake"
                    plot_one_box(xyxy,
                                 im0,
                                 label=label,
                                 color=colors[int(cls)],
                                 line_thickness=3)

            # Print time (inference + NMS)
            print('%sDone. (%.3fs)' % (s, t2 - t1))  # 输出执行时间

            # Stream results # 显示输出
            if view_img:
                cv2.imshow(p, im0)
                if cv2.waitKey(1) == ord('q'):  # q to quit
                    raise StopIteration

            # Save results (image with detections)
            if save_img:  # 保存图片or视频
                if dataset.mode == 'images':
                    cv2.imwrite(save_path, im0)
                else:
                    if vid_path != save_path:  # new video
                        vid_path = save_path
                        if isinstance(vid_writer, cv2.VideoWriter):
                            vid_writer.release(
                            )  # release previous video writer

                        fps = vid_cap.get(cv2.CAP_PROP_FPS)
                        w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                        h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                        vid_writer = cv2.VideoWriter(
                            save_path, cv2.VideoWriter_fourcc(*opt.fourcc),
                            fps, (w, h))
                    vid_writer.write(im0)

    if save_txt or save_img:
        print('Results saved to %s' % os.getcwd() + os.sep + out)
        if platform == 'darwin':  # MacOS
            os.system('open ' + save_path)

    print('Done. (%.3fs)' % (time.time() - t0))
class PoemImageEmbedTrainer():
    def __init__(self, train_data, test_data, sentiment_model, batchsize, load_model, device):
        self.device = device
        self.train_data = train_data
        self.test_data = test_data
        self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')

        self.train_transform = transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor()
        ])

        self.test_transform = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor()
        ])

        img_dir = 'data/image'
        self.train_set = PoemImageEmbedDataset(self.train_data, img_dir,
                                               tokenizer=self.tokenizer, max_seq_len=100,
                                               transform=self.train_transform)
        self.train_loader = DataLoader(self.train_set, batch_size=batchsize, shuffle=True, num_workers=4)

        self.test_set = PoemImageEmbedDataset(self.test_data, img_dir,
                                              tokenizer=self.tokenizer, max_seq_len=100,
                                              transform=self.test_transform)
        self.test_loader = DataLoader(self.test_set, batch_size=batchsize, num_workers=4)

        self.model = PoemImageEmbedModel(device)

        self.model = DataParallel(self.model)
        load_dataparallel(self.model.module.img_embedder.sentiment_feature, sentiment_model)
        if load_model:
            logger.info('load model from '+ load_model)
            self.model.load_state_dict(torch.load(load_model))
        self.model.to(device)
        self.optimizer = optim.Adam(list(self.model.module.poem_embedder.linear.parameters()) + \
                                    list(self.model.module.img_embedder.linear.parameters()), lr=1e-4)
        self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[2, 4, 6], gamma=0.33)

    def train_epoch(self, epoch, log_interval, save_interval, ckpt_file):
        self.model.train()
        running_ls = 0
        acc_ls = 0
        start = time.time()
        num_batches = len(self.train_loader)
        for i, batch in enumerate(self.train_loader):
            img1, ids1, mask1, img2, ids2, mask2 = [t.to(self.device) for t in batch]
            self.model.zero_grad()
            loss = self.model(img1, ids1, mask1, img2, ids2, mask2)
            loss.backward(torch.ones_like(loss))
            running_ls += loss.mean().item()
            acc_ls += loss.mean().item()
            self.optimizer.step()

            if (i + 1) % log_interval == 0:
                elapsed_time = time.time() - start
                iters_per_sec = (i + 1) / elapsed_time
                remaining = (num_batches - i - 1) / iters_per_sec
                remaining_time = time.strftime("%H:%M:%S", time.gmtime(remaining))

                print('[{:>2}, {:>4}/{}] running loss:{:.4} acc loss:{:.4} {:.3}iters/s {} left'.format(
                    epoch, (i + 1), num_batches, running_ls / log_interval, acc_ls /(i+1),
                    iters_per_sec, remaining_time))
                running_ls = 0

            if (i + 1) % save_interval == 0:
                self.save_model(ckpt_file)

    def save_model(self, file):
        torch.save(self.model.state_dict(), file)
Exemple #29
0
        acc = np.mean((y_test == y_true).astype(int))
        if acc > best_acc:
            best_acc = acc
            best_th = th

    return (best_acc, best_th)



if __name__ == '__main__':

    opt = Config()
    model = resnet_face18(opt.use_se)


    model = DataParallel(model)
    # load_model(model, opt.test_model_path)
    model.load_state_dict(torch.load(opt.test_model_path))
    model.to(torch.device("cuda"))
    a_path, b_path = get_test_dataset(opt.test_list)

    # identity_list = get_lfw_list(opt.lfw_test_list)
    # img_paths = [os.path.join(opt.lfw_root, each) for each in identity_list]

    model.eval()
    anses = test(model, a_path, b_path)
    with open("submission.txt", "w") as f:
        for line in anses:
            f.write(f'{abs(line)}\n')
    # lfw_test(model, img_paths, identity_list, opt.lfw_test_list, opt.test_batch_size)
Exemple #30
0
                             drop_last=False,
                             collate_fn=collator)

checkpoint = sys.argv[3]
config = GPT2Config.from_pretrained(os.path.join(checkpoint, 'config.json'))
model = GPT2LMHeadModel.from_pretrained(os.path.join(checkpoint,
                                                     'pytorch_model.bin'),
                                        config=config)
model.eval()
model_device = device_ids[0]

if len(device_ids) > 1:
    model = DataParallel(model,
                         device_ids=device_ids,
                         output_device=device_ids[-1])
model.to(f'cuda:{model_device}')


def make_position_ids(attention_mask):
    position_ids = attention_mask.long().cumsum(-1) - 1
    position_ids.masked_fill_(attention_mask == 0, 0)
    return position_ids


def slot_in_slots(slot, slots):
    if not slot.strip():
        return False
    slot_split = slot.split()
    return slot_split[0] in slots or ' '.join(slot_split[:2]) in slots