コード例 #1
0
    def test_range_search(self):
        d = 4
        nt = 100
        nq = 10
        nb = 50

        (xt, xb, xq) = get_dataset(d, nb, nt, nq)

        index = faiss.IndexFlatL2(d)
        index.add(xb)

        Dref, Iref = index.search(xq, 5)

        thresh = 0.1  # *squared* distance
        lims, D, I = index.range_search(xq, thresh)

        for i in range(nq):
            Iline = I[lims[i]:lims[i + 1]]
            Dline = D[lims[i]:lims[i + 1]]
            for j, dis in zip(Iref[i], Dref[i]):
                if dis < thresh:
                    li, = np.where(Iline == j)
                    self.assertTrue(li.size == 1)
                    idx = li[0]
                    self.assertGreaterEqual(1e-4, abs(Dline[idx] - dis))
コード例 #2
0
    def test_4variants(self):
        d = 32
        nt = 2500
        nq = 400
        nb = 5000

        (xt, xb, xq) = get_dataset(d, nb, nt, nq)

        index_gt = faiss.IndexFlatL2(d)
        index_gt.add(xb)
        D_ref, I_ref = index_gt.search(xq, 10)

        nok = {}

        for qname in "QT_4bit QT_4bit_uniform QT_8bit QT_8bit_uniform QT_fp16".split(
        ):
            qtype = getattr(faiss.ScalarQuantizer, qname)
            index = faiss.IndexScalarQuantizer(d, qtype, faiss.METRIC_L2)
            index.train(xt)
            index.add(xb)
            D, I = index.search(xq, 10)
            nok[qname] = (I[:, 0] == I_ref[:, 0]).sum()

        print(nok, nq)

        self.assertGreaterEqual(nok['QT_8bit'], nq * 0.9)
        self.assertGreaterEqual(nok['QT_8bit'], nok['QT_4bit'])
        self.assertGreaterEqual(nok['QT_8bit'], nok['QT_8bit_uniform'])
        self.assertGreaterEqual(nok['QT_4bit'], nok['QT_4bit_uniform'])
        self.assertGreaterEqual(nok['QT_fp16'], nok['QT_8bit'])
コード例 #3
0
    def test_IndexFlat(self):
        d = 32
        nb = 1000
        nt = 1500
        nq = 200

        (xt, xb, xq) = get_dataset(d, nb, nt, nq)

        index = faiss.IndexFlatL2(d)
        index.add(xb)

        self.run_search_and_reconstruct(index, xb, xq, eps=0.0)
コード例 #4
0
def main(label, dataset_dir, process):
    train, test = get_dataset()
    train_files, test_files = train[label], test[label]

    x_train = process_map(process, train_files[::-1], max_workers=10)
    x_test = process_map(process, test_files[::-1], max_workers=10)

    with open(os.path.join(dataset_dir, f'{label}_train.p'), 'wb') as fp:
        pickle.dump(x_train, fp)

    with open(os.path.join(dataset_dir, f'{label}_test.p'), 'wb') as fp:
        pickle.dump(x_test, fp)
コード例 #5
0
    def test_IndexTransform(self):
        d = 32
        nb = 1000
        nt = 1500
        nq = 200

        (xt, xb, xq) = get_dataset(d, nb, nt, nq)

        index = faiss.index_factory(d, "L2norm,PCA8,IVF32,PQ8np")
        faiss.ParameterSpace().set_index_parameter(index, "nprobe", 4)
        index.train(xt)
        index.add(xb)

        self.run_search_and_reconstruct(index, xb, xq)
コード例 #6
0
    def test_MultiIndex(self):
        d = 32
        nb = 1000
        nt = 1500
        nq = 200

        (xt, xb, xq) = get_dataset(d, nb, nt, nq)

        index = faiss.index_factory(d, "IMI2x5,PQ8np")
        faiss.ParameterSpace().set_index_parameter(index, "nprobe", 4)
        index.train(xt)
        index.add(xb)

        self.run_search_and_reconstruct(index, xb, xq, eps=1.0)
コード例 #7
0
def main():
    """Train and test a perceptron implementation.

    This takes data at the location defined by DATA_DIRECTORY, splits it into and creates
    a perceptron to predict values.
    """
    data = get_dataset(DATA_DIRECTORY)
    training, validation, test = split_data(data)
    perceptron = Perceptron()
    # TODO: Use validation set to tune alpha
    x, y = training['x'], training['y']
    perceptron.train(x, y, alpha=DEFAULT_STEP_SIZE)
    accuracy = determine_accuracy(perceptron, test)
    print(f'The perceptron is {int(accuracy * 100)}% accurate. Woo.')
コード例 #8
0
    def test_IndexIVFPQ(self):
        d = 32
        nb = 1000
        nt = 1500
        nq = 200

        (xt, xb, xq) = get_dataset(d, nb, nt, nq)

        quantizer = faiss.IndexFlatL2(d)
        index = faiss.IndexIVFPQ(quantizer, d, 32, 8, 8)
        index.cp.min_points_per_centroid = 5  # quiet warning
        index.nprobe = 4
        index.train(xt)
        index.add(xb)

        self.run_search_and_reconstruct(index, xb, xq, eps=1.0)
コード例 #9
0
def execute():
    preparedDataset = get_dataset("salary-data",
                                  dependent_variable_index=1,
                                  test_size=1 / 3)
    dataset = preparedDataset.get_dataset()

    features_train = preparedDataset.get_features_training_set()
    features_test = preparedDataset.get_features_test_set()

    dependents_train = preparedDataset.get_dependents_training_set()
    dependents_test = preparedDataset.get_dependents_test_set()

    regressor = LinearRegression()
    regressor.fit(features_train, dependents_train)

    dependents_predictions = regressor.predict(features_test)
    print(dataset)
    print(dependents_test)
    print(dependents_predictions)
コード例 #10
0
    def test_search_k1(self):

        # verify codepath for k = 1 and k > 1

        d = 64
        nb = 0
        nt = 1500
        nq = 200

        (xt, xb, xq) = get_dataset(d, nb, nt, nq)

        miq = faiss.MultiIndexQuantizer(d, 2, 6)

        miq.train(xt)

        D1, I1 = miq.search(xq, 1)

        D5, I5 = miq.search(xq, 5)

        self.assertEqual(np.abs(I1[:, :1] - I5[:, :1]).max(), 0)
        self.assertEqual(np.abs(D1[:, :1] - D5[:, :1]).max(), 0)
コード例 #11
0
def main():
    train, test = get_dataset()
    x_train, y_train = load_data(train)
    x_test, y_test = load_data(test)

    x_train, vocab = bow(x_train)
    x_test, _ = bow(x_test, vocab)

    mappings = {j: i for (i, j) in enumerate(set(y_train))}

    y_train = list(map(mappings.get, y_train))
    y_test = list(map(mappings.get, y_test))

    train_dataset = TensorDataset(torch.tensor(x_train), torch.tensor(y_train))
    test_dataset = TensorDataset(torch.tensor(x_test), torch.tensor(y_test))

    epochs = 100
    batch_size = len(train_dataset) // 4
    learning_rate = 1e-2
    device = 'cuda:1'

    train_dataloader = DataLoader(train_dataset,
                                  batch_size=batch_size,
                                  shuffle=True)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=batch_size,
                                 shuffle=True)

    model = LogRegClassifier(train_dataset[0][0].shape[0],
                             len(mappings)).to(device)
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    criterion = nn.CrossEntropyLoss().to(device)

    stats = {
        'epoch': [],
        'loss': [],
        'train_accuracy': [],
        'test_accuracy': []
    }
    for epoch in range(1, epochs + 1):
        epoch_loss = []

        model.train()
        correct, total = 0, 0
        for idx, (inpt, target) in enumerate(train_dataloader):
            inpt, target = inpt.float().to(device), target.to(device)
            model.zero_grad()
            pred = model(inpt)
            loss = criterion(pred, target)
            loss.backward()
            optimizer.step()
            epoch_loss.append(loss.item())

            correct += torch.sum(pred.argmax(1) == target).item()
            total += len(target)
        train_accuracy = correct / total

        model.eval()
        correct, total = 0, 0
        for idx, (inpt, target) in enumerate(test_dataloader):
            inpt, target = inpt.float().to(device), target.to(device)
            pred = model(inpt)

            correct += torch.sum(pred.argmax(1) == target).item()
            total += len(target)
        test_accuracy = correct / total

        stats['epoch'].append(epoch)
        stats['loss'].append(np.mean(epoch_loss))
        stats['train_accuracy'].append(np.mean(train_accuracy))
        stats['test_accuracy'].append(np.mean(test_accuracy))
        print(
            f"{stats['epoch'][-1]}\t{stats['loss'][-1]:.5f}\t{stats['train_accuracy'][-1]:.5f}\t{stats['test_accuracy'][-1]:.5f}"
        )

    model.save(model.state_dict(), 'bow-trained.pth')
    torch.save(stats, 'bow-stats.pth')
コード例 #12
0
ファイル: train_resnest.py プロジェクト: XWalways/Attention
def main_worker(gpu, ngpus_per_node, args):
    args.gpu = gpu
    args.rank = args.rank * ngpus_per_node + gpu
    print('rank: {} / {}'.format(args.rank, args.world_size))
    dist.init_process_group(backend=args.dist_backend,
                            init_method=args.dist_url,
                            world_size=args.world_size,
                            rank=args.rank)
    torch.cuda.set_device(args.gpu)
    # init the args
    global best_pred, acclist_train, acclist_val

    if args.gpu == 0:
        print(args)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    transform_train, transform_val = get_transform(
            args.dataset, args.base_size, args.crop_size, args.rand_aug)
    trainset = get_dataset(args.dataset, root=os.path.expanduser('~/.torch/data'),
                           transform=transform_train, train=True, download=True)
    valset = get_dataset(args.dataset, root=os.path.expanduser('~/.torch/data'),
                         transform=transform_val, train=False, download=True)
    train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
    train_loader = torch.utils.data.DataLoader(
        trainset, batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True,
        sampler=train_sampler)

    val_sampler = torch.utils.data.distributed.DistributedSampler(valset, shuffle=False)
    val_loader = torch.utils.data.DataLoader(
        valset, batch_size=args.test_batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True,
        sampler=val_sampler)

    model_kwargs = {}
    if args.pretrained:
        model_kwargs['pretrained'] = True

    if args.final_drop > 0.0:
        model_kwargs['final_drop'] = args.final_drop

    if args.dropblock_prob > 0.0:
        model_kwargs['dropblock_prob'] = args.dropblock_prob

    if args.last_gamma:
        model_kwargs['last_gamma'] = True

    if args.rectify:
        model_kwargs['rectified_conv'] = True
        model_kwargs['rectify_avg'] = args.rectify_avg
    model = models.__dict__[args.model](**model_kwargs)

    if args.dropblock_prob > 0.0:
        from functools import partial
        from models import reset_dropblock
        nr_iters = (args.epochs - args.warmup_epochs) * len(train_loader)
        apply_drop_prob = partial(reset_dropblock, args.warmup_epochs*len(train_loader),
                                  nr_iters, 0.0, args.dropblock_prob)
        model.apply(apply_drop_prob)
    if args.gpu == 0:
        print(model)

    if args.mixup > 0:
        train_loader = MixUpWrapper(args.mixup, 1000, train_loader, args.gpu)
        criterion = NLLMultiLabelSmooth(args.label_smoothing)
    elif args.label_smoothing > 0.0:
        criterion = LabelSmoothing(args.label_smoothing)
    else:
        criterion = nn.CrossEntropyLoss()

    model.cuda(args.gpu)
    criterion.cuda(args.gpu)
    model = DistributedDataParallel(model, device_ids=[args.gpu])
    
    if args.no_bn_wd:
        parameters = model.named_parameters()
        param_dict = {}
        for k, v in parameters:
            param_dict[k] = v
        bn_params = [v for n, v in param_dict.items() if ('bn' in n or 'bias' in n)]
        rest_params = [v for n, v in param_dict.items() if not ('bn' in n or 'bias' in n)]
        if args.gpu == 0:
            print(" Weight decay NOT applied to BN parameters ")
            print(f'len(parameters): {len(list(model.parameters()))} = {len(bn_params)} + {len(rest_params)}')
        optimizer = torch.optim.SGD([{'params': bn_params, 'weight_decay': 0 },
                                     {'params': rest_params, 'weight_decay': args.weight_decay}],
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    else:
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    # check point
    if args.resume is not None:
        if os.path.isfile(args.resume):
            if args.gpu == 0:
                print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch'] + 1 if args.start_epoch == 0 else args.start_epoch
            best_pred = checkpoint['best_pred']
            acclist_train = checkpoint['acclist_train']
            acclist_val = checkpoint['acclist_val']
            model.module.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            if args.gpu == 0:
                print("=> loaded checkpoint '{}' (epoch {})"
                .format(args.resume, checkpoint['epoch']))
        else:
            raise RuntimeError ("=> no resume checkpoint found at '{}'".\
                format(args.resume))
    scheduler = LR_Scheduler(args.lr_scheduler,
                             base_lr=args.lr,
                             num_epochs=args.epochs,
                             iters_per_epoch=len(train_loader),
                             warmup_epochs=args.warmup_epochs)

    def train(epoch):
        train_sampler.set_epoch(epoch)
        model.train()
        losses = AverageMeter()
        top1 = AverageMeter()
        global best_pred, acclist_train
        for batch_idx, (data, target) in enumerate(train_loader):
            scheduler(optimizer, batch_idx, epoch, best_pred)
            if not args.mixup:
                data, target = data.cuda(args.gpu), target.cuda(args.gpu)
            optimizer.zero_grad()
            output = model(data)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()

            if not args.mixup:
                acc1 = accuracy(output, target, topk=(1,))
                top1.update(acc1[0], data.size(0))

            losses.update(loss.item(), data.size(0))
            if batch_idx % 100 == 0 and args.gpu == 0:
                if args.mixup:
                    print('Batch: %d| Loss: %.3f'%(batch_idx, losses.avg))
                else:
                    print('Batch: %d| Loss: %.3f | Top1: %.3f'%(batch_idx, losses.avg, top1.avg))

        acclist_train += [top1.avg]

    def validate(epoch):
        model.eval()
        top1 = AverageMeter()
        top5 = AverageMeter()
        global best_pred, acclist_train, acclist_val
        is_best = False
        for batch_idx, (data, target) in enumerate(val_loader):
            data, target = data.cuda(args.gpu), target.cuda(args.gpu)
            with torch.no_grad():
                output = model(data)
                acc1, acc5 = accuracy(output, target, topk=(1, 5))
                top1.update(acc1[0], data.size(0))
                top5.update(acc5[0], data.size(0))

        # sum all
        sum1, cnt1, sum5, cnt5 = torch_dist_sum(args.gpu, top1.sum, top1.count, top5.sum, top5.count)

        if args.eval:
            if args.gpu == 0:
                top1_acc = sum(sum1) / sum(cnt1)
                top5_acc = sum(sum5) / sum(cnt5)
                print('Validation: Top1: %.3f | Top5: %.3f'%(top1_acc, top5_acc))
            return

        if args.gpu == 0:
            top1_acc = sum(sum1) / sum(cnt1)
            top5_acc = sum(sum5) / sum(cnt5)
            print('Validation: Top1: %.3f | Top5: %.3f'%(top1_acc, top5_acc))

            # save checkpoint
            acclist_val += [top1_acc]
            if top1_acc > best_pred:
                best_pred = top1_acc 
                is_best = True
            save_checkpoint({
                'epoch': epoch,
                'state_dict': model.module.state_dict(),
                'optimizer': optimizer.state_dict(),
                'best_pred': best_pred,
                'acclist_train':acclist_train,
                'acclist_val':acclist_val,
                }, args=args, is_best=is_best)

    if args.export:
        if args.gpu == 0:
            torch.save(model.module.state_dict(), args.export + '.pth')
        return

    if args.eval:
        validate(args.start_epoch)
        return

    for epoch in range(args.start_epoch, args.epochs):
        tic = time.time()
        train(epoch)
        if epoch % 10 == 0:# or epoch == args.epochs-1:
            validate(epoch)
        elapsed = time.time() - tic
        if args.gpu == 0:
            print(f'Epoch: {epoch}, Time cost: {elapsed}')

    if args.gpu == 0:
        save_checkpoint({
            'epoch': args.epochs-1,
            'state_dict': model.module.state_dict(),
            'optimizer': optimizer.state_dict(),
            'best_pred': best_pred,
            'acclist_train':acclist_train,
            'acclist_val':acclist_val,
            }, args=args, is_best=False)
コード例 #13
0
def main(argv):
  (opts, args) = parser.parse_args(argv)
  if 'estimate' in opts.mode:
    mode_idx = int(opts.mode[-1])

  global colorPlatte, bones, Evaluation

  if 'nyu' in opts.config:
    colorPlatte = utils.util.nyuColorIdx
    bones = utils.util.nyuBones
    Evaluation = NYUHandposeEvaluation
  elif 'icvl' in opts.config:
    colorPlatte = utils.util.icvlColorIdx
    bones = utils.util.icvlBones
    Evaluation = ICVLHandposeEvaluation



  # Load experiment setting
  assert isinstance(opts, object)
  config = NetConfig(opts.config)

  batch_size = config.hyperparameters['batch_size'] if 'estimate' in opts.mode else 1
  test_batch_size = batch_size * 32
  max_iterations = config.hyperparameters['max_iterations']
  frac = opts.frac

  dataset_a = get_dataset(config.datasets['train_a'])
  dataset_b = get_dataset(config.datasets['train_b'])
  dataset_test = get_dataset(config.datasets['test_b'])


  train_loader_a = get_data_loader(dataset_a, batch_size, shuffle=True)
  train_loader_b = get_data_loader(dataset_b, batch_size, shuffle=True)
  test_loader_real = get_data_loader(dataset_test, test_batch_size, shuffle=False)

  cmd = "trainer=%s(config.hyperparameters)" % config.hyperparameters['trainer']
  local_dict = locals()
  exec(cmd,globals(),local_dict)
  trainer = local_dict['trainer']

  di_a = dataset_a.di
  di_b = dataset_b.di

  # Check if resume training
  iterations = 0
  if opts.resume == 1:
    iterations = trainer.resume(config.snapshot_prefix, idx=-1, load_opt=True)
    for i in range(iterations//1000):
	trainer.dis_sch.step()
	trainer.gen_sch.step()
  trainer.cuda(opts.gpu)


  print('using %.2f percent of the labeled real data' % frac)
  try:
    if 'estimate' in opts.mode and (mode_idx == 3 or mode_idx == 4):
      trainer.load_vae(config.snapshot_prefix, 2+frac)
    else:
      trainer.load_vae(config.snapshot_prefix, frac)
  except:
    print('Failed to load the parameters of vae')

  if 'estimate' in opts.mode:
    if opts.idx != 0:
      trainer.resume(config.snapshot_prefix, idx=opts.idx, est=mode_idx==5)
    if frac > 0. and frac < 1.:
      dataset_b.set_nmax(frac)
    #trainer.dis.freeze_layers()

  ###############################################################################################
  # Setup logger and repare image outputs
  train_writer = tensorboardX.FileWriter("%s/%s" % (opts.log,os.path.splitext(os.path.basename(opts.config))[0]))
  image_directory, snapshot_directory = prepare_snapshot_and_image_folder(config.snapshot_prefix, iterations, config.image_save_iterations)

  best_err, best_acc = 100., 0.
  start_time = time.time()
  for ep in range(0, MAX_EPOCHS):
    for it, ((images_a, labels_a, com_a, M_a, cube_a, _), (images_b,labels_b, com_b, M_b, cube_b, _)) in \
						enumerate(izip(train_loader_a,train_loader_b)):
      if images_a.size(0) != batch_size or images_b.size(0) != batch_size:
        continue
      images_a = Variable(images_a.cuda(opts.gpu))
      images_b = Variable(images_b.cuda(opts.gpu))
      labels_a = Variable(labels_a.cuda(opts.gpu))
      labels_b = Variable(labels_b.cuda(opts.gpu))
      com_a = Variable(com_a.cuda(opts.gpu))
      com_b = Variable(com_b.cuda(opts.gpu))

      trainer.dis.train()
      if opts.mode == 'pretrain':
	if (iterations+1) % 1000 == 0:
	  trainer.dis_sch.step()
	  trainer.gen_sch.step()
	  print('lr %.8f' % trainer.dis_sch.get_lr()[0])

        trainer.dis_update(images_a, labels_a, images_b, labels_b, com_a, com_b, config.hyperparameters)
        image_outputs = trainer.gen_update(images_a, labels_a, images_b, labels_b, config.hyperparameters)
        assembled_images = trainer.assemble_outputs(images_a, images_b, image_outputs)
      else:
	if (iterations+1) % 100 == 0:
	  trainer.dis_sch.step()
        image_outputs = trainer.post_update(images_a, labels_a, images_b, labels_b,com_a,com_b, mode_idx, config.hyperparameters)
        assembled_images = trainer.assemble_outputs(images_a, images_b, image_outputs)

      # Dump training stats in log file
      if (iterations+1) % config.display == 0:
	elapsed_time = time.time() -  start_time
        write_loss(iterations, max_iterations, trainer, train_writer, elapsed_time)
	start_time = time.time()

      if (iterations + 1) % config.image_display_iterations == 0:
          img_filename = '%s/gen.jpg' % (image_directory)
          torchvision.utils.save_image(assembled_images.data / 2 + 0.5, img_filename, nrow=1)

      if (iterations+1) % config.image_save_iterations == 0:

        if opts.mode == 'pretrain':# and (iterations+1) % (2*config.image_save_iterations) != 0:
          img_filename = '%s/gen_%08d.jpg' % (image_directory, iterations + 1)
          torchvision.utils.save_image(assembled_images.data / 2 + 0.5, img_filename, nrow=1)
          write_html(snapshot_directory + "/index.html", iterations + 1, \
						config.image_save_iterations, image_directory)
	else:
          trainer.dis.eval()
          score, maxerr = 0, 0
          num_samples = 0
	  maxJntError = []
	  meanJntError = 0
	  img2sav = None
    	  gt3D = []
    	  joints = []
	  joints_imgcord = []
          codec = cv2.VideoWriter_fourcc(*'XVID')
          vid = cv2.VideoWriter(os.path.join(image_directory,'gen.avi'), codec, 25, (128*2,128))
          for tit, (test_images_b, test_labels_b, com_b, trans_b, cube_b, fn) in enumerate(test_loader_real):
            test_images_b = Variable(test_images_b.cuda(opts.gpu))
            test_labels_b = Variable(test_labels_b.cuda(opts.gpu))
	    if mode_idx == 0:
               pred_pose, pred_post, _ = trainer.dis.regress_a(test_images_b)
	    else:
               pred_pose, pred_post, _ = trainer.dis.regress_b(test_images_b)

	    if True:
	      pred_pose = trainer.vae.decode(pred_post)

	    n = test_labels_b.size(0)

	    gt_pose = test_labels_b.data.cpu().numpy().reshape((n,-1, 3))
	    pr_pose = pred_pose.data.cpu().numpy().reshape((n,-1, 3))

	    if  tit < 20:
	      for i in range(0, n, 4):
                real_img = visPair(di_b, test_images_b[i].data.cpu().numpy(), gt_pose[i].reshape((-1)), \
				trans_b[i].numpy(), com_b[i].numpy(), cube_b[i].numpy(), 50.0)
                est_img = visPair(di_b, test_images_b[i].data.cpu().numpy(), pr_pose[i].reshape((-1)), \
				trans_b[i].numpy(), com_b[i].numpy(), cube_b[i].numpy(), 50.0)

	        vid.write(np.hstack((real_img,est_img)).astype('uint8'))
	      
	    both_img = np.vstack((real_img,est_img))

	    if True and tit < 8:
	      if img2sav is None:
	        img2sav = both_img
	      else:
	        img2sav = np.hstack((img2sav,both_img))


	    if 'nyu' in opts.config:
		restrictedJointsEval = np.array([0, 3, 6, 9, 12, 15, 18, 21, 24, 25, 27, 30, 31, 32])
		gt_pose = gt_pose[:,restrictedJointsEval]
		pr_pose = pr_pose[:,restrictedJointsEval]

	    for i in range(n):
              gt3D.append(gt_pose[i]*(cube_b.numpy()[0]/2.)+ com_b[i].numpy())
              joints.append(pr_pose[i]*(cube_b.numpy()[0]/2.)+ com_b[i].numpy())
              joints_imgcord.append(di_b.joints3DToImg(pr_pose[i]*(cube_b.numpy()[0]/2.)+ com_b[i].numpy()))


            score += meanJntError
            num_samples += test_images_b.size(0)

	  cv2.imwrite(image_directory + '/_test.jpg', img2sav.astype('uint8'))
	  vid.release()

    	  hpe = Evaluation(np.array(gt3D), np.array(joints))
	  mean_err = hpe.getMeanError()
	  over_40 = 100. * hpe.getNumFramesWithinMaxDist(40) / len(gt3D)
	  best_err = np.minimum(best_err, mean_err)
	  best_acc = np.maximum(best_acc, over_40)
    	  print("------------ Mean err: {:.4f} ({:.4f}) mm, Max over 40mm: {:.2f} ({:.2f}) %".format(mean_err, best_err, over_40, best_acc))


      # Save network weights
      if (iterations+1) % config.snapshot_save_iterations == 0:
	if opts.mode == 'pretrain':
          trainer.save(config.snapshot_prefix, iterations)
	elif 'estimate' in opts.mode:
          trainer.save(config.snapshot_prefix+'_est', iterations)

      iterations += 1
      if iterations >= max_iterations:
        return
コード例 #14
0
ファイル: pose_train.py プロジェクト: xyhak47/LSPS
def main(argv):
    (opts, args) = parser.parse_args(argv)

    global colorPlatte, bones, Evaluation

    if 'nyu' in opts.config:
        colorPlatte = utils.util.nyuColorIdx
        bones = utils.util.nyuBones
        Evaluation = NYUHandposeEvaluation
    elif 'icvl' in opts.config:
        colorPlatte = utils.util.icvlColorIdx
        bones = utils.util.icvlBones
        Evaluation = ICVLHandposeEvaluation

    # Load experiment setting
    assert isinstance(opts, object)
    config = NetConfig(opts.config)

    batch_size = config.hyperparameters['batch_size_pose']
    max_iterations = 200000  #config.hyperparameters['max_iterations']
    frac = opts.frac

    dataset_a = get_dataset(config.datasets['train_a'])
    dataset_b = get_dataset(config.datasets['train_b'])
    dataset_test = get_dataset(config.datasets['test_b'])

    train_loader_a = get_data_loader(dataset_a, batch_size, shuffle=True)
    train_loader_b = get_data_loader(dataset_b, batch_size, shuffle=True)
    test_loader_real = get_data_loader(dataset_test, 1, shuffle=True)

    cmd = "trainer=%s(config.hyperparameters)" % config.hyperparameters[
        'trainer']
    local_dict = locals()
    exec(cmd, globals(), local_dict)
    trainer = local_dict['trainer']

    iterations = 0
    trainer.cuda(opts.gpu)

    dataset_a.pose_only = True
    dataset_b.pose_only = True

    if frac > 0. and frac < 1.:
        dataset_b.set_nmax(frac)

    di_a = dataset_a.di
    di_b = dataset_b.di

    dataset_a.sample_poses()
    dataset_b.sample_poses()

    ###################################################################
    # Setup logger and repare image outputs
    train_writer = tensorboardX.FileWriter(
        "%s/%s" %
        (opts.log, os.path.splitext(os.path.basename(opts.config))[0]))
    image_directory, snapshot_directory = prepare_snapshot_and_image_folder(
        config.snapshot_prefix, iterations, config.image_save_iterations)

    print('using %.2f percent of the labeled real data' % frac)
    start_time = time.time()
    for ep in range(0, MAX_EPOCHS):
        for it, ((labels_a),
                 (labels_b)) in enumerate(izip(train_loader_a,
                                               train_loader_b)):
            if labels_a.size(0) != batch_size or labels_b.size(
                    0) != batch_size:
                continue
            labels_a = Variable(labels_a.cuda(opts.gpu))
            labels_b = Variable(labels_b.cuda(opts.gpu))
            labels = labels_a

            if frac > 0.:
                labels = torch.cat((labels_a, labels_b), 0)

            if (iterations + 1) % 1000 == 0:
                trainer.vae_sch.step()

            recon_pose = trainer.vae_update(labels, config.hyperparameters)

            # Dump training stats in log file
            if (iterations + 1) % config.display == 0:
                elapsed_time = time.time() - start_time
                write_loss(iterations, max_iterations, trainer, train_writer,
                           elapsed_time)
                start_time = time.time()

            if (iterations + 1) % (10 * config.image_save_iterations) == 0:
                if True:
                    score, maxerr = 0, 0
                    num_samples = 0
                    maxJntError = []
                    img2sav = None
                    gt3D = []
                    joints = []
                    for tit, (test_images_b, test_labels_b, com_b, trans_b,
                              cube_b, _) in enumerate(test_loader_real):
                        test_images_b = Variable(test_images_b.cuda(opts.gpu))
                        test_labels_b = Variable(test_labels_b.cuda(opts.gpu))

                        pred_pose = trainer.vae.decode(
                            trainer.vae.encode(test_labels_b)[1])

                        gt3D.append(test_labels_b.data.cpu().numpy().reshape((-1, 3))*(cube_b.numpy()[0]/2.) +\
                     com_b.numpy())

                        joints.append(pred_pose.data.cpu().numpy().reshape((-1, 3))*(cube_b.numpy()[0]/2.) +\
                     com_b.numpy())

                        if True and tit < 8:
                            real_img = visPair(di_b, test_images_b.data.cpu().numpy(), test_labels_b.data.cpu().numpy(), \
                  trans_b.numpy(), com_b.numpy(), cube_b.numpy(), 50.0)
                            est_img = visPair(di_b, test_images_b.data.cpu().numpy(), pred_pose.data.cpu().numpy(), \
                  trans_b.numpy(), com_b.numpy(), cube_b.numpy(), 50.0)

                            if img2sav is None:
                                img2sav = np.vstack((real_img, est_img))
                            else:
                                img2sav = np.hstack(
                                    (img2sav, np.vstack((real_img, est_img))))

                        num_samples += test_images_b.size(0)

                    cv2.imwrite(image_directory + '/_test.jpg',
                                img2sav.astype('uint8'))
                    #maxerr = Evaluation.plotError(maxJntError, image_directory + '/maxJntError.txt')

                    hpe = Evaluation(np.array(gt3D), np.array(joints))
                    print("Mean error: {}mm, max error: {}mm".format(
                        hpe.getMeanError(), hpe.getMaxError()))

            # Save network weights
            if (iterations + 1) % (4 * config.snapshot_save_iterations) == 0:
                trainer.save_vae(config.snapshot_prefix, iterations, 2 + frac)

            iterations += 1
            if iterations >= max_iterations:
                return
コード例 #15
0
def main(label, dataset_dir, process):
    train, test = get_dataset()
    train_files, test_files = train[label], test[label]

    x_train = process_map(process, train_files[::-1], max_workers=10)
    x_test = process_map(process, test_files[::-1], max_workers=10)

    with open(os.path.join(dataset_dir, f'{label}_train.p'), 'wb') as fp:
        pickle.dump(x_train, fp)

    with open(os.path.join(dataset_dir, f'{label}_test.p'), 'wb') as fp:
        pickle.dump(x_test, fp)


if __name__ == '__main__':
    train, test = get_dataset()
    labels = set(train.keys())

    unmasked_dir = os.path.join('preprocessed_datasets', 'unmasked')
    os.makedirs(unmasked_dir, exist_ok=True)
    #
    #ner_tagger = CoreNLPParser(url='http://localhost:9000', tagtype='ner')
    #ner_masked_dir = os.path.join('preprocessed_datasets', 'ner')
    #os.makedirs(ner_masked_dir, exist_ok=True)
    #
    #pos_tagger = CoreNLPParser(url='http://localhost:9000', tagtype='pos')
    #
    #posuh_masked_dir = os.path.join('preprocessed_datasets', 'posuh')
    #os.makedirs(posuh_masked_dir, exist_ok=True)
    #
    #posppn_masked_dir = os.path.join('preprocessed_datasets', 'posppn')