Exemplo n.º 1
0
def generate(**kwargs):
    """
    随机生成动漫头像,并根据netd的分数选择较好的
    """
    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)
    
    device=t.device('cuda') if opt.gpu else t.device('cpu')

    netg, netd = NetG(opt).eval(), NetD(opt).eval()
    noises = t.randn(opt.gen_search_num, opt.nz, 1, 1).normal_(opt.gen_mean, opt.gen_std)
    noises = noises.to(device)

    map_location = lambda storage, loc: storage
    netd.load_state_dict(t.load(opt.netd_path, map_location=map_location))
    netg.load_state_dict(t.load(opt.netg_path, map_location=map_location))
    netd.to(device)
    netg.to(device)


    # 生成图片,并计算图片在判别器的分数
    fake_img = netg(noises)
    scores = netd(fake_img).detach()

    # 挑选最好的某几张
    indexs = scores.topk(opt.gen_num)[1]
    result = []
    for ii in indexs:
        result.append(fake_img.data[ii])
    # 保存图片
    tv.utils.save_image(t.stack(result), opt.gen_img, normalize=True, range=(-1, 1))
 def test_arrayify(self, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double, torch.int, torch.long):
         t = torch.tensor([[1, 2], [3, 4]], device=device).type(dtype)
         t_np = _arrayify(t)
         self.assertIsInstance(t_np, np.ndarray)
         self.assertTrue(t_np.dtype == np.float64)
Exemplo n.º 3
0
    def __init__(
        self,
        cfg,
        confidence_threshold=0.7,
        show_mask_heatmaps=False,
        masks_per_dim=2,
        min_image_size=224,
    ):
        self.cfg = cfg.clone()
        self.model = build_detection_model(cfg)
        self.model.eval()
        self.device = torch.device(cfg.MODEL.DEVICE)
        self.model.to(self.device)
        self.min_image_size = min_image_size

        checkpointer = DetectronCheckpointer(cfg, self.model)
        _ = checkpointer.load(cfg.MODEL.WEIGHT)

        self.transforms = self.build_transform()

        mask_threshold = -1 if show_mask_heatmaps else 0.5
        self.masker = Masker(threshold=mask_threshold, padding=1)

        # used to make colors for each class
        self.palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])

        self.cpu_device = torch.device("cpu")
        self.confidence_threshold = confidence_threshold
        self.show_mask_heatmaps = show_mask_heatmaps
        self.masks_per_dim = masks_per_dim
Exemplo n.º 4
0
    def load_model(self):
        if len(glob.glob(os.path.join(args.save_dir, args.corpus) + '-selector-*.pth')) == 0:
            return

        if args.load_iter is None:
            f_list = glob.glob(os.path.join(args.save_dir, args.corpus) + '-selector-*.pth')
            iter_list = [int(i.split('-')[-1].split('.')[0]) for i in f_list]
            start_iter = sorted(iter_list)[-1]
        else:
            start_iter = args.load_iter

        name = args.corpus + '-selector-{}.pth'.format(start_iter)
        model_file_path = os.path.join(args.save_dir, name)
        print("loading model", model_file_path)

        if opt.device == torch.device('cuda'):
            state = torch.load(model_file_path)
        else:
            state = torch.load(model_file_path, map_location=opt.device)

        self._epoch = state['epoch']
        self._iter = state['iter']
        self.running_avg_loss = state['current_loss']
        self.min_loss = state['min_loss']

        self.model.sentence_selector.load_state_dict(state['selector_state_dict'])

        if not args.is_coverage:
            self.optimizer.load_state_dict(state['optimizer'])
            if opt.device == torch.device('cuda'):
                for state in list(self.optimizer.state.values()):
                    for k, v in list(state.items()):
                        if torch.is_tensor(v):
                            state[k] = v.cuda()
Exemplo n.º 5
0
 def test_constrained_expected_improvement_batch(self, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         mean = torch.tensor(
             [[-0.5, 0.0, 5.0, 0.0], [0.0, 0.0, 5.0, 0.0], [0.5, 0.0, 5.0, 0.0]],
             device=device,
             dtype=dtype,
         ).unsqueeze(dim=-2)
         variance = torch.ones(3, 4, device=device, dtype=dtype).unsqueeze(dim=-2)
         N = torch.distributions.Normal(loc=0.0, scale=1.0)
         a = N.icdf(torch.tensor(0.75))  # get a so that P(-a <= N <= a) = 0.5
         mm = MockModel(MockPosterior(mean=mean, variance=variance))
         module = ConstrainedExpectedImprovement(
             model=mm,
             best_f=0.0,
             objective_index=0,
             constraints={1: [None, 0], 2: [5.0, None], 3: [-a, a]},
         )
         X = torch.empty(3, 1, 1, device=device, dtype=dtype)  # dummy
         ei = module(X)
         ei_expected_unconstrained = torch.tensor(
             [0.19780, 0.39894, 0.69780], device=device, dtype=dtype
         )
         ei_expected = ei_expected_unconstrained * 0.5 * 0.5 * 0.5
         self.assertTrue(torch.allclose(ei, ei_expected, atol=1e-4))
Exemplo n.º 6
0
    def test_probability_of_improvement(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            mean = torch.tensor([0.0], device=device, dtype=dtype).view(1, 1)
            variance = torch.ones(1, 1, device=device, dtype=dtype)
            mm = MockModel(MockPosterior(mean=mean, variance=variance))

            module = ProbabilityOfImprovement(model=mm, best_f=1.96)
            X = torch.zeros(1, 1, device=device, dtype=dtype)
            pi = module(X)
            pi_expected = torch.tensor(0.0250, device=device, dtype=dtype)
            self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4))

            module = ProbabilityOfImprovement(model=mm, best_f=1.96, maximize=False)
            X = torch.zeros(1, 1, device=device, dtype=dtype)
            pi = module(X)
            pi_expected = torch.tensor(0.9750, device=device, dtype=dtype)
            self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4))

            # check for proper error if multi-output model
            mean2 = torch.rand(1, 2, device=device, dtype=dtype)
            variance2 = torch.ones_like(mean2)
            mm2 = MockModel(MockPosterior(mean=mean2, variance=variance2))
            module2 = ProbabilityOfImprovement(model=mm2, best_f=0.0)
            with self.assertRaises(UnsupportedError):
                module2(X)
Exemplo n.º 7
0
 def test_factory(self):
     default_size = torch.Size([1, 3])
     size = torch.Size([3, 3])
     for include_size in [True, False]:
         for use_tensor_idx in [True, False]:
             for use_tensor_val in [True, False]:
                 for use_cuda in ([False] if not torch.cuda.is_available() else [True, False]):
                     # have to include size with cuda sparse tensors
                     include_size = include_size or use_cuda
                     dtype = torch.float64
                     long_dtype = torch.int64
                     device = torch.device('cpu') if not use_cuda else torch.device(torch.cuda.device_count() - 1)
                     indices = torch.tensor(([0], [2]), dtype=long_dtype) if use_tensor_idx else ([0], [2])
                     values = torch.tensor([1.], dtype=dtype) if use_tensor_val else 1.
                     if include_size:
                         sparse_tensor = torch.sparse_coo_tensor(indices, values, size, dtype=dtype,
                                                                 device=device, requires_grad=True)
                     else:
                         sparse_tensor = torch.sparse_coo_tensor(indices, values, dtype=dtype,
                                                                 device=device, requires_grad=True)
                     self.assertEqual(indices, sparse_tensor._indices())
                     self.assertEqual(values, sparse_tensor._values())
                     self.assertEqual(size if include_size else default_size, sparse_tensor.size())
                     self.assertEqual(dtype, sparse_tensor.dtype)
                     if use_cuda:
                         self.assertEqual(device, sparse_tensor._values().device)
                     self.assertEqual(True, sparse_tensor.requires_grad)
Exemplo n.º 8
0
 def test_joint_optimize(
     self,
     mock_get_best_candidates,
     mock_gen_candidates,
     mock_gen_batch_initial_conditions,
     cuda=False,
 ):
     q = 3
     num_restarts = 2
     raw_samples = 10
     options = {}
     mock_acq_function = MockAcquisitionFunction()
     tkwargs = {"device": torch.device("cuda") if cuda else torch.device("cpu")}
     for dtype in (torch.float, torch.double):
         tkwargs["dtype"] = dtype
         mock_gen_batch_initial_conditions.return_value = torch.zeros(
             num_restarts, q, 3, **tkwargs
         )
         mock_gen_candidates.return_value = torch.cat(
             [i * torch.ones(1, q, 3, **tkwargs) for i in range(num_restarts)], dim=0
         )
         mock_get_best_candidates.return_value = torch.ones(1, q, 3, **tkwargs)
         expected_candidates = mock_get_best_candidates.return_value
         bounds = torch.stack(
             [torch.zeros(3, **tkwargs), 4 * torch.ones(3, **tkwargs)]
         )
         candidates = joint_optimize(
             acq_function=mock_acq_function,
             bounds=bounds,
             q=q,
             num_restarts=num_restarts,
             raw_samples=raw_samples,
             options=options,
         )
         self.assertTrue(torch.equal(candidates, expected_candidates))
Exemplo n.º 9
0
    def test_FixedNoiseMultiTaskGP_single_output(self, cuda=False):
        for double in (False, True):
            tkwargs = {
                "device": torch.device("cuda") if cuda else torch.device("cpu"),
                "dtype": torch.double if double else torch.float,
            }
            model = _get_fixed_noise_model_single_output(**tkwargs)
            self.assertIsInstance(model, FixedNoiseMultiTaskGP)
            self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood)
            self.assertIsInstance(model.mean_module, ConstantMean)
            self.assertIsInstance(model.covar_module, ScaleKernel)
            matern_kernel = model.covar_module.base_kernel
            self.assertIsInstance(matern_kernel, MaternKernel)
            self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
            self.assertIsInstance(model.task_covar_module, IndexKernel)
            self.assertEqual(model._rank, 2)
            self.assertEqual(
                model.task_covar_module.covar_factor.shape[-1], model._rank
            )

            # test model fitting
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            mll = fit_gpytorch_model(mll, options={"maxiter": 1})

            # test posterior
            test_x = torch.rand(2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)

            # test posterior (batch eval)
            test_x = torch.rand(3, 2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)
Exemplo n.º 10
0
 def test_manual_bounds(self, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         # get a test module
         train_x = torch.tensor([[1.0, 2.0, 3.0]], device=device, dtype=dtype)
         train_y = torch.tensor([4.0], device=device, dtype=dtype)
         likelihood = GaussianLikelihood()
         model = ExactGP(train_x, train_y, likelihood)
         model.covar_module = RBFKernel(ard_num_dims=3)
         model.mean_module = ConstantMean()
         model.to(device=device, dtype=dtype)
         mll = ExactMarginalLogLikelihood(likelihood, model)
         # test the basic case
         x, pdict, bounds = module_to_array(
             module=mll, bounds={"model.covar_module.raw_lengthscale": (0.1, None)}
         )
         self.assertTrue(np.array_equal(x, np.zeros(5)))
         expected_sizes = {
             "likelihood.noise_covar.raw_noise": torch.Size([1]),
             "model.covar_module.raw_lengthscale": torch.Size([1, 3]),
             "model.mean_module.constant": torch.Size([1]),
         }
         self.assertEqual(set(pdict.keys()), set(expected_sizes.keys()))
         for pname, val in pdict.items():
             self.assertEqual(val.dtype, dtype)
             self.assertEqual(val.shape, expected_sizes[pname])
             self.assertEqual(val.device.type, device.type)
         lower_exp = np.full_like(x, 0.1)
         for p in ("likelihood.noise_covar.raw_noise", "model.mean_module.constant"):
             lower_exp[_get_index(pdict, p)] = -np.inf
         self.assertTrue(np.equal(bounds[0], lower_exp).all())
         self.assertTrue(np.equal(bounds[1], np.full_like(x, np.inf)).all())
Exemplo n.º 11
0
 def set_opt(self):
     model_classes = {  # 因为属性太长, 需要进行 dict 映射, 避免用户输入麻烦
         'lstm': torch.nn.LSTM,
         'cnn': torch.nn.Conv2d,
     }
     input_colses = {
         'lstm': ['text_raw_indices'],
         'cnn': ['text_raw_indices', 'aspect_indices'],
     }
     initializers = {
         'xavier_uniform_': torch.nn.init.xavier_uniform_,
         'xavier_normal_': torch.nn.init.xavier_normal,
         'orthogonal_': torch.nn.init.orthogonal_,
     }
     optimizers = {
         'adadelta': torch.optim.Adadelta,  # default lr=1.0
         'adagrad': torch.optim.Adagrad,    # default lr=0.01
         'adam': torch.optim.Adam,          # default lr=0.001
         'adamax': torch.optim.Adamax,      # default lr=0.002
         'asgd': torch.optim.ASGD,          # default lr=0.01
         'rmsprop': torch.optim.RMSprop,    # default lr=0.01
         'sgd': torch.optim.SGD,
     }
     self.model_class = model_classes[self.model_name]
     self.inputs_cols = input_colses[self.model_name]
     self.initializer = initializers[self.initializer]
     self.optimizer = optimizers[self.optimizer]
     self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') \
         if self.device is None else torch.device(self.device)
Exemplo n.º 12
0
    def test_FixedNoiseGP(self, cuda=False):
        for batch_shape in (torch.Size([]), torch.Size([2])):
            for num_outputs in (1, 2):
                for double in (False, True):
                    tkwargs = {
                        "device": torch.device("cuda") if cuda else torch.device("cpu"),
                        "dtype": torch.double if double else torch.float,
                    }
                    model = self._get_model(
                        batch_shape=batch_shape,
                        num_outputs=num_outputs,
                        n=10,
                        **tkwargs
                    )
                    self.assertIsInstance(model, FixedNoiseGP)
                    self.assertIsInstance(
                        model.likelihood, FixedNoiseGaussianLikelihood
                    )
                    self.assertIsInstance(model.mean_module, ConstantMean)
                    self.assertIsInstance(model.covar_module, ScaleKernel)
                    matern_kernel = model.covar_module.base_kernel
                    self.assertIsInstance(matern_kernel, MaternKernel)
                    self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)

                    # test model fitting
                    mll = ExactMarginalLogLikelihood(model.likelihood, model)
                    mll = fit_gpytorch_model(mll, options={"maxiter": 1})

                    # Test forward
                    test_x = torch.rand(batch_shape + torch.Size([3, 1]), **tkwargs)
                    posterior = model(test_x)
                    self.assertIsInstance(posterior, MultivariateNormal)

                    # TODO: Pass observation noise into posterior
                    # posterior_obs = model.posterior(test_x, observation_noise=True)
                    # self.assertTrue(
                    #     torch.allclose(
                    #         posterior_f.variance + 0.01,
                    #         posterior_obs.variance
                    #     )
                    # )

                    # test posterior
                    # test non batch evaluation
                    X = torch.rand(batch_shape + torch.Size([3, 1]), **tkwargs)
                    posterior = model.posterior(X)
                    self.assertIsInstance(posterior, GPyTorchPosterior)
                    self.assertEqual(
                        posterior.mean.shape, batch_shape + torch.Size([3, num_outputs])
                    )
                    # test batch evaluation
                    X = torch.rand(
                        torch.Size([2]) + batch_shape + torch.Size([3, 1]), **tkwargs
                    )
                    posterior = model.posterior(X)
                    self.assertIsInstance(posterior, GPyTorchPosterior)
                    self.assertEqual(
                        posterior.mean.shape,
                        torch.Size([2]) + batch_shape + torch.Size([3, num_outputs]),
                    )
Exemplo n.º 13
0
 def _setUp(self, double=False, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device, dtype=dtype).unsqueeze(-1)
     train_y = torch.sin(train_x * (2 * math.pi)).squeeze(-1)
     train_yvar = torch.tensor(0.1 ** 2, device=device)
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     self.train_x = train_x
     self.train_y = train_y + noise
     self.train_yvar = train_yvar
     self.bounds = torch.tensor([[0.0], [1.0]], device=device, dtype=dtype)
     model_st = SingleTaskGP(self.train_x, self.train_y)
     self.model_st = model_st.to(device=device, dtype=dtype)
     self.mll_st = ExactMarginalLogLikelihood(
         self.model_st.likelihood, self.model_st
     )
     self.mll_st = fit_gpytorch_model(self.mll_st, options={"maxiter": 5})
     model_fn = FixedNoiseGP(
         self.train_x, self.train_y, self.train_yvar.expand_as(self.train_y)
     )
     self.model_fn = model_fn.to(device=device, dtype=dtype)
     self.mll_fn = ExactMarginalLogLikelihood(
         self.model_fn.likelihood, self.model_fn
     )
     self.mll_fn = fit_gpytorch_model(self.mll_fn, options={"maxiter": 5})
Exemplo n.º 14
0
def generate(**kwargs):
    opt = Config()
    for k, v in kwargs.items():
        setattr(opt, k, v)
    device=t.device('cuda') if opt.use_gpu else t.device('cpu')

    # 数据预处理
    data = t.load(opt.caption_data_path, map_location=lambda s, l: s)
    word2ix, ix2word = data['word2ix'], data['ix2word']

    normalize = tv.transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD)
    transforms = tv.transforms.Compose([
        tv.transforms.Resize(opt.scale_size),
        tv.transforms.CenterCrop(opt.img_size),
        tv.transforms.ToTensor(),
        normalize
    ])
    img = Image.open(opt.test_img)
    img = transforms(img).unsqueeze(0)

    # 用resnet50来提取图片特征
    resnet50 = tv.models.resnet50(True).eval()
    del resnet50.fc
    resnet50.fc = lambda x: x
    resnet50.to(device)
    img = img.to(device)
    img_feats = resnet50(img).detach()

    # Caption模型
    model = CaptionModel(opt, word2ix, ix2word)
    model = model.load(opt.model_ckpt).eval()
    model.to(device)

    results = model.generate(img_feats.data[0])
    print('\r\n'.join(results))
Exemplo n.º 15
0
 def test_add_output_dim(self, cuda=False):
     for double in (False, True):
         tkwargs = {
             "device": torch.device("cuda") if cuda else torch.device("cpu"),
             "dtype": torch.double if double else torch.float,
         }
         original_batch_shape = torch.Size([2])
         # check exception is raised
         X = torch.rand(2, 1, **tkwargs)
         with self.assertRaises(ValueError):
             add_output_dim(X=X, original_batch_shape=original_batch_shape)
         # test no new batch dims
         X = torch.rand(2, 2, 1, **tkwargs)
         X_out, output_dim_idx = add_output_dim(
             X=X, original_batch_shape=original_batch_shape
         )
         self.assertTrue(torch.equal(X_out, X.unsqueeze(0)))
         self.assertEqual(output_dim_idx, 0)
         # test new batch dims
         X = torch.rand(3, 2, 2, 1, **tkwargs)
         X_out, output_dim_idx = add_output_dim(
             X=X, original_batch_shape=original_batch_shape
         )
         self.assertTrue(torch.equal(X_out, X.unsqueeze(1)))
         self.assertEqual(output_dim_idx, 1)
Exemplo n.º 16
0
 def test_gen_batch_initial_conditions_simple_warning(self, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         bounds = torch.tensor([[0, 0], [1, 1]], device=device, dtype=dtype)
         with warnings.catch_warnings(record=True) as ws:
             with mock.patch(
                 "botorch.optim.optimize.draw_sobol_samples",
                 return_value=torch.zeros(10, 1, 2, device=device, dtype=dtype),
             ):
                 batch_initial_conditions = gen_batch_initial_conditions(
                     acq_function=MockAcquisitionFunction(),
                     bounds=bounds,
                     q=1,
                     num_restarts=2,
                     raw_samples=10,
                 )
                 self.assertEqual(len(ws), 1)
                 self.assertTrue(
                     issubclass(ws[-1].category, BadInitialCandidatesWarning)
                 )
                 self.assertTrue(
                     torch.equal(
                         batch_initial_conditions,
                         torch.zeros(2, 1, 2, device=device, dtype=dtype),
                     )
                 )
Exemplo n.º 17
0
def stylize(**kwargs):
    opt = Config()

    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)
    device=t.device('cuda') if opt.use_gpu else t.device('cpu')
    
    # 图片处理
    content_image = tv.datasets.folder.default_loader(opt.content_path)
    content_transform = tv.transforms.Compose([
        tv.transforms.ToTensor(),
        tv.transforms.Lambda(lambda x: x.mul(255))
    ])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device).detach()

    # 模型
    style_model = TransformerNet().eval()
    style_model.load_state_dict(t.load(opt.model_path, map_location=lambda _s, _: _s))
    style_model.to(device)

    # 风格迁移与保存
    output = style_model(content_image)
    output_data = output.cpu().data[0]
    tv.utils.save_image(((output_data / 255)).clamp(min=0, max=1), opt.result_path)
Exemplo n.º 18
0
    def test_degenerate_GPyTorchPosterior(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            # singular covariance matrix
            degenerate_covar = torch.tensor(
                [[1, 1, 0], [1, 1, 0], [0, 0, 2]], dtype=dtype, device=device
            )
            mean = torch.rand(3, dtype=dtype, device=device)
            mvn = MultivariateNormal(mean, lazify(degenerate_covar))
            posterior = GPyTorchPosterior(mvn=mvn)
            # basics
            self.assertEqual(posterior.device.type, device.type)
            self.assertTrue(posterior.dtype == dtype)
            self.assertEqual(posterior.event_shape, torch.Size([3, 1]))
            self.assertTrue(torch.equal(posterior.mean, mean.unsqueeze(-1)))
            variance_exp = degenerate_covar.diag().unsqueeze(-1)
            self.assertTrue(torch.equal(posterior.variance, variance_exp))

            # rsample
            with warnings.catch_warnings(record=True) as w:
                # we check that the p.d. warning is emitted - this only
                # happens once per posterior, so we need to check only once
                samples = posterior.rsample(sample_shape=torch.Size([4]))
                self.assertEqual(len(w), 1)
                self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
                self.assertTrue("not p.d." in str(w[-1].message))
            self.assertEqual(samples.shape, torch.Size([4, 3, 1]))
            samples2 = posterior.rsample(sample_shape=torch.Size([4, 2]))
            self.assertEqual(samples2.shape, torch.Size([4, 2, 3, 1]))
            # rsample w/ base samples
            base_samples = torch.randn(4, 3, 1, device=device, dtype=dtype)
            samples_b1 = posterior.rsample(
                sample_shape=torch.Size([4]), base_samples=base_samples
            )
            samples_b2 = posterior.rsample(
                sample_shape=torch.Size([4]), base_samples=base_samples
            )
            self.assertTrue(torch.allclose(samples_b1, samples_b2))
            base_samples2 = torch.randn(4, 2, 3, 1, device=device, dtype=dtype)
            samples2_b1 = posterior.rsample(
                sample_shape=torch.Size([4, 2]), base_samples=base_samples2
            )
            samples2_b2 = posterior.rsample(
                sample_shape=torch.Size([4, 2]), base_samples=base_samples2
            )
            self.assertTrue(torch.allclose(samples2_b1, samples2_b2))
            # collapse_batch_dims
            b_mean = torch.rand(2, 3, dtype=dtype, device=device)
            b_degenerate_covar = degenerate_covar.expand(2, *degenerate_covar.shape)
            b_mvn = MultivariateNormal(b_mean, lazify(b_degenerate_covar))
            b_posterior = GPyTorchPosterior(mvn=b_mvn)
            b_base_samples = torch.randn(4, 2, 3, 1, device=device, dtype=dtype)
            with warnings.catch_warnings(record=True) as w:
                b_samples = b_posterior.rsample(
                    sample_shape=torch.Size([4]), base_samples=b_base_samples
                )
                self.assertEqual(len(w), 1)
                self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
                self.assertTrue("not p.d." in str(w[-1].message))
            self.assertEqual(b_samples.shape, torch.Size([4, 2, 3, 1]))
Exemplo n.º 19
0
 def test_MultivariateNormalQMCEngineDegenerate(self, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         # X, Y iid standard Normal and Z = X + Y, random vector (X, Y, Z)
         mean = torch.zeros(3, device=device, dtype=dtype)
         cov = torch.tensor(
             [[1, 0, 1], [0, 1, 1], [1, 1, 2]], device=device, dtype=dtype
         )
         engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, seed=12345)
         samples = engine.draw(n=2000)
         self.assertEqual(samples.dtype, dtype)
         self.assertEqual(samples.device.type, device.type)
         self.assertTrue(torch.all(torch.abs(samples.mean(dim=0)) < 1e-2))
         self.assertTrue(torch.abs(torch.std(samples[:, 0]) - 1) < 1e-2)
         self.assertTrue(torch.abs(torch.std(samples[:, 1]) - 1) < 1e-2)
         self.assertTrue(torch.abs(torch.std(samples[:, 2]) - math.sqrt(2)) < 1e-2)
         for i in (0, 1, 2):
             _, pval = shapiro(samples[:, i].cpu().numpy())
             self.assertGreater(pval, 0.9)
         cov = np.cov(samples.cpu().numpy().transpose())
         self.assertLess(np.abs(cov[0, 1]), 1e-2)
         self.assertLess(np.abs(cov[0, 2] - 1), 1e-2)
         # check to see if X + Y = Z almost exactly
         self.assertTrue(
             torch.all(
                 torch.abs(samples[:, 0] + samples[:, 1] - samples[:, 2]) < 1e-5
             )
         )
Exemplo n.º 20
0
    def test_upper_confidence_bound(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            mean = torch.tensor([[0.0]], device=device, dtype=dtype)
            variance = torch.tensor([[1.0]], device=device, dtype=dtype)
            mm = MockModel(MockPosterior(mean=mean, variance=variance))

            module = UpperConfidenceBound(model=mm, beta=1.0)
            X = torch.zeros(1, 1, device=device, dtype=dtype)
            ucb = module(X)
            ucb_expected = torch.tensor([1.0], device=device, dtype=dtype)
            self.assertTrue(torch.allclose(ucb, ucb_expected, atol=1e-4))

            module = UpperConfidenceBound(model=mm, beta=1.0, maximize=False)
            X = torch.zeros(1, 1, device=device, dtype=dtype)
            ucb = module(X)
            ucb_expected = torch.tensor([-1.0], device=device, dtype=dtype)
            self.assertTrue(torch.allclose(ucb, ucb_expected, atol=1e-4))

            # check for proper error if multi-output model
            mean2 = torch.rand(1, 2, device=device, dtype=dtype)
            variance2 = torch.rand(1, 2, device=device, dtype=dtype)
            mm2 = MockModel(MockPosterior(mean=mean2, variance=variance2))
            module2 = UpperConfidenceBound(model=mm2, beta=1.0)
            with self.assertRaises(UnsupportedError):
                module2(X)
Exemplo n.º 21
0
    def data_from_config(cls, config, train=False, model=None):
        if model is None:
            model = cls.model_from_config(config, load_checkpoint=False)
        if train:
            data = config.data
            dataset = data.train.dataset(**vars(data.train.config))
            train_set, valid_set = cls.random_split(
                dataset, data.valid.split,
                train_transform=data.train.transform(model),
                train_target_transform=data.train.target_transform(model),
                valid_transform=data.valid.transform(model),
                valid_target_transform=data.valid.target_transform(model))

            cuda = torch.device(config.device).type == 'cuda'
            train_loader = cls.data_loader(train_set, data.train.batch_size,
                                           train=True, cuda=cuda,
                                           workers=data.train.num_loaders)
            valid_loader = cls.data_loader(valid_set, data.valid.batch_size,
                                           train=False, cuda=cuda,
                                           workers=data.valid.num_loaders)
            return train_loader, valid_loader
        else:
            data = config.data.test
            test_set = TransformedDataset.of(
                data.dataset(**vars(data.config)),
                transform=data.transform(model),
                target_transform=data.target_transform(model))

            cuda = torch.device(config.device).type == 'cuda'
            loader = cls.data_loader(test_set, data.batch_size,
                                     train=False, cuda=cuda,
                                     workers=data.num_loaders)
            return loader
Exemplo n.º 22
0
    def __next__(self):
        if self._first_batch is not None:
            batch = self._first_batch
            self._first_batch = None
            return batch
        if self._counter > self._size:
            raise StopIteration
        # Gather outputs
        outputs = []
        for p in self._pipes:
            outputs.append(p.run())
        for i in range(self._num_gpus):
            dev_id = self._pipes[i].device_id
            out_data = []
            out_labels = []
            # segregate outputs into data/label entries
            for j, out in enumerate(outputs[i]):
                if self.output_map[j] == "data":
                    out_data.append(out)
                elif self.output_map[j] == "label":
                    out_labels.append(out)

            # Change DALI TensorLists into Tensors
            data = [x.as_tensor() for x in out_data]
            data_shape = [x.shape() for x in data]
            # Change label shape from [batch_size, 1] to [batch_size]
            labels = [x.as_tensor() for x in out_labels]
            for l in labels:
                l.squeeze()

            label_shape = [x.shape() for x in labels]
            # If we did not yet allocate memory for that batch, do it now
            if self._data_batches[i][self._current_data_batch] is None:
                
                data_torch_type = to_torch_type[np.dtype(data[0].dtype())]
                label_torch_type = to_torch_type[np.dtype(labels[0].dtype())]
                
                torch_gpu_device = torch.device('cuda', dev_id)
                torch_cpu_device = torch.device('cpu')
                
                pyt_data = [torch.zeros(shape, dtype=data_torch_type, device=torch_gpu_device) for shape in data_shape]
                pyt_labels = [torch.zeros(shape, dtype=label_torch_type, device=torch_cpu_device) for shape in label_shape]

                self._data_batches[i][self._current_data_batch] = (pyt_data, pyt_labels)
            else:
                pyt_data, pyt_labels = self._data_batches[i][self._current_data_batch]

            # Copy data from DALI Tensors to torch tensors
            for j, d_arr in enumerate(data):
                feed_ndarray(d_arr, pyt_data[j])
            for j, l_arr in enumerate(labels):
                feed_ndarray(l_arr, pyt_labels[j])

        
        copy_db_index = self._current_data_batch
        # Change index for double buffering
        self._current_data_batch = (self._current_data_batch + 1) % 2
        self._counter += self._num_gpus * self.batch_size
        return [db[copy_db_index] for db in self._data_batches]
Exemplo n.º 23
0
    def test_sequential_optimize(self, mock_joint_optimize, cuda=False):
        q = 3
        num_restarts = 2
        raw_samples = 10
        options = {}
        tkwargs = {"device": torch.device("cuda") if cuda else torch.device("cpu")}
        for dtype in (torch.float, torch.double):
            mock_acq_function = MockAcquisitionFunction()
            tkwargs["dtype"] = dtype
            joint_optimize_return_values = [
                torch.tensor([[[1.1, 2.1, 3.1]]], **tkwargs) for _ in range(q)
            ]
            mock_joint_optimize.side_effect = joint_optimize_return_values
            expected_candidates = torch.cat(
                joint_optimize_return_values, dim=-2
            ).round()
            bounds = torch.stack(
                [torch.zeros(3, **tkwargs), 4 * torch.ones(3, **tkwargs)]
            )
            inequality_constraints = [
                (torch.tensor([3]), torch.tensor([4]), torch.tensor(5))
            ]
            candidates = sequential_optimize(
                acq_function=mock_acq_function,
                bounds=bounds,
                q=q,
                num_restarts=num_restarts,
                raw_samples=raw_samples,
                options=options,
                inequality_constraints=inequality_constraints,
                post_processing_func=rounding_func,
            )
            self.assertTrue(torch.equal(candidates, expected_candidates))

            expected_call_kwargs = {
                "acq_function": mock_acq_function,
                "bounds": bounds,
                "q": 1,
                "num_restarts": num_restarts,
                "raw_samples": raw_samples,
                "options": options,
                "inequality_constraints": inequality_constraints,
                "equality_constraints": None,
                "fixed_features": None,
            }
            call_args_list = mock_joint_optimize.call_args_list[-q:]
            for i in range(q):
                self.assertEqual(call_args_list[i][1], expected_call_kwargs)

            # test that error is raised for acquisition functions without X_baseline
            mock_acq_function = MockAcquisitionFunction(has_X_baseline_attr=False)
            with self.assertRaises(UnsupportedError):
                sequential_optimize(
                    acq_function=mock_acq_function,
                    bounds=bounds,
                    q=q,
                    num_restarts=num_restarts,
                    raw_samples=raw_samples,
                )
Exemplo n.º 24
0
 def test_MultivariateNormalQMCEngineSymmetric(self, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         # try with non-symmetric cov and expect an error
         mean = torch.zeros(2, device=device, dtype=dtype)
         cov = torch.tensor([[1, 0], [2, 1]], device=device, dtype=dtype)
         with self.assertRaises(ValueError):
             MultivariateNormalQMCEngine(mean=mean, cov=cov)
Exemplo n.º 25
0
 def test_single_eval_neg_eggholder(self, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         X = torch.zeros(2, device=device, dtype=dtype)
         res = neg_eggholder(X)
         self.assertEqual(res.dtype, dtype)
         self.assertEqual(res.device.type, device.type)
         self.assertEqual(res.shape, torch.Size())
Exemplo n.º 26
0
 def test_batch_eval_neg_michalewicz(self, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         X = torch.zeros(2, 10, device=device, dtype=dtype)
         res = neg_michalewicz(X)
         self.assertEqual(res.dtype, dtype)
         self.assertEqual(res.device.type, device.type)
         self.assertEqual(res.shape, torch.Size([2]))
Exemplo n.º 27
0
    def test_make_scipy_linear_constraints(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        shapeX = torch.Size([2, 1, 4])
        res = make_scipy_linear_constraints(
            shapeX=shapeX, inequality_constraints=None, equality_constraints=None
        )
        self.assertEqual(res, [])
        indices = torch.tensor([0, 1], dtype=torch.long, device=device)
        coefficients = torch.tensor([1.5, -1.0], device=device)
        cs = make_scipy_linear_constraints(
            shapeX=shapeX,
            inequality_constraints=[(indices, coefficients, 1.0)],
            equality_constraints=[(indices, coefficients, 1.0)],
        )
        self.assertEqual(len(cs), 4)
        self.assertTrue({c["type"] for c in cs} == {"ineq", "eq"})
        cs = make_scipy_linear_constraints(
            shapeX=shapeX, inequality_constraints=[(indices, coefficients, 1.0)]
        )
        self.assertEqual(len(cs), 2)
        self.assertTrue(all(c["type"] == "ineq" for c in cs))
        cs = make_scipy_linear_constraints(
            shapeX=shapeX, equality_constraints=[(indices, coefficients, 1.0)]
        )
        self.assertEqual(len(cs), 2)
        self.assertTrue(all(c["type"] == "eq" for c in cs))

        # test that len(shapeX) < 3 raises an error
        with self.assertRaises(UnsupportedError):
            make_scipy_linear_constraints(
                shapeX=torch.Size([2, 1]),
                inequality_constraints=[(indices, coefficients, 1.0)],
                equality_constraints=[(indices, coefficients, 1.0)],
            )
        # test that 2-dim indices raises a NotImplementedError
        indices = indices.unsqueeze(0)
        with self.assertRaises(NotImplementedError):
            make_scipy_linear_constraints(
                shapeX=shapeX,
                inequality_constraints=[(indices, coefficients, 1.0)],
                equality_constraints=[(indices, coefficients, 1.0)],
            )
        # test that >2-dim indices raises an UnsupportedError
        indices = indices.unsqueeze(0)
        with self.assertRaises(UnsupportedError):
            make_scipy_linear_constraints(
                shapeX=shapeX,
                inequality_constraints=[(indices, coefficients, 1.0)],
                equality_constraints=[(indices, coefficients, 1.0)],
            )
        # test that out of bounds index raises an error
        indices = torch.tensor([0, 4], dtype=torch.long, device=device)
        with self.assertRaises(RuntimeError):
            make_scipy_linear_constraints(
                shapeX=shapeX,
                inequality_constraints=[(indices, coefficients, 1.0)],
                equality_constraints=[(indices, coefficients, 1.0)],
            )
Exemplo n.º 28
0
 def _getModel(self, double=False, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device, dtype=dtype).unsqueeze(-1)
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     train_y = torch.sin(train_x.view(-1) * (2 * math.pi)) + noise
     model = SingleTaskGP(train_x, train_y)
     mll = ExactMarginalLogLikelihood(model.likelihood, model)
     return mll.to(device=device, dtype=dtype)
Exemplo n.º 29
0
 def test_batch_eval_neg_styblinski_tang(self, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         X = torch.zeros(2, 3, device=device, dtype=dtype)
         res = neg_styblinski_tang(X)
         self.assertEqual(res.dtype, dtype)
         self.assertEqual(res.device.type, device.type)
         self.assertEqual(res.shape, torch.Size([2]))
         self.assertTrue(torch.all(res == 0))
Exemplo n.º 30
0
 def test_neg_michalewicz_global_maximum(self, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         X = torch.tensor(
             GLOBAL_MAXIMIZER, device=device, dtype=dtype, requires_grad=True
         )
         res = neg_michalewicz(X)
         res.backward()
         self.assertAlmostEqual(res.item(), GLOBAL_MAXIMUM, places=4)
         self.assertLess(X.grad.abs().max().item(), 1e-3)
Exemplo n.º 31
0
def main(args):
    if args.apex and amp is None:
        raise RuntimeError(
            "Failed to import apex. Please install apex from https://www.github.com/nvidia/apex "
            "to enable mixed-precision training.")

    if args.output_dir:
        utils.mkdir(args.output_dir)

    utils.init_distributed_mode(args)
    print(args)

    device = torch.device(args.device)

    torch.backends.cudnn.benchmark = True

    train_dir = os.path.join(args.data_path, 'train')
    val_dir = os.path.join(args.data_path, 'val')
    dataset, dataset_test, train_sampler, test_sampler = load_data(
        train_dir, val_dir, args.cache_dataset, args.distributed)
    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=args.batch_size,
                                              sampler=train_sampler,
                                              num_workers=args.workers,
                                              pin_memory=True)

    data_loader_test = torch.utils.data.DataLoader(dataset_test,
                                                   batch_size=args.batch_size,
                                                   sampler=test_sampler,
                                                   num_workers=args.workers,
                                                   pin_memory=True)

    print("Creating model")
    model = torchvision.models.__dict__[args.model](pretrained=args.pretrained)
    model.to(device)
    if args.distributed and args.sync_bn:
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)

    criterion = nn.CrossEntropyLoss()

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.apex:
        model, optimizer = amp.initialize(model,
                                          optimizer,
                                          opt_level=args.apex_opt_level)

    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=args.lr_step_size,
                                                   gamma=args.lr_gamma)

    model_without_ddp = model
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.gpu])
        model_without_ddp = model.module

    if args.resume:
        checkpoint = torch.load(args.resume, map_location='cpu')
        model_without_ddp.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        args.start_epoch = checkpoint['epoch'] + 1

    if args.test_only:
        evaluate(model, criterion, data_loader_test, device=device)
        return

    print("Start training")
    start_time = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        train_one_epoch(model, criterion, optimizer, data_loader, device,
                        epoch, args.print_freq, args.apex)
        lr_scheduler.step()
        evaluate(model, criterion, data_loader_test, device=device)
        if args.output_dir:
            checkpoint = {
                'model': model_without_ddp.state_dict(),
                'optimizer': optimizer.state_dict(),
                'lr_scheduler': lr_scheduler.state_dict(),
                'epoch': epoch,
                'args': args
            }
            utils.save_on_master(
                checkpoint,
                os.path.join(args.output_dir, 'model_{}.pth'.format(epoch)))
            utils.save_on_master(
                checkpoint, os.path.join(args.output_dir, 'checkpoint.pth'))

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print('Training time {}'.format(total_time_str))
Exemplo n.º 32
0
def main():
    # Maybe delete this ?
    group = 'lung'

    parser = argparse.ArgumentParser(description='classifier')
    parser.add_argument('--sample_file', type=str, default='lung.emx.txt', help="the name of the GEM organized by samples (columns) by genes (rows)")
    parser.add_argument('--label_file', type=str, default='sample_condition.txt', help="name of the label file: two columns that maps the sample to the label")
    parser.add_argument('--output_name', type=str, default='tissue-run-1', help="name of the output directory to store the output files")
    #parser.add_argument('--overwrite_output', type=bool, default=False, help="overwrite the output directory file if it already exists")
    parser.add_argument('--batch_size', type=int, default=16, help="size of batches to split data")
    parser.add_argument('--max_epoch', type=int, default=100, help="number of passes through a dataset")
    parser.add_argument('--learning_rate', type=float, default=0.001, help="controls the rate at which the weights of the model update")
    parser.add_argument('--test_split', type=float, default=0.3, help="percentage of test data, the train data will be the remaining data. 30% -> 0.3")
    parser.add_argument('--continuous_discrete', type=str, default='continuous', help="type of data in the sample file, typically RNA will be continous and DNA will be discrete")
    parser.add_argument('--plot_results', type=bool, default=True, help="plots the sample distribution, training/test accuracy/loss, and confusion matrix")
    parser.add_argument('--use_gpu', type=bool, default=False, help="true to use a gpu, false to use the cpu - if the node does not have a gpu then it will use the cpu")
    args = parser.parse_args()

    #If data is discrete, data should only range between 0-3
    #if args.continuous_discrete == "discrete":
        #args.input_num_classes = 4

    # Initialize file paths and create output folder
    LABEL_FILE = os.path.join(INPUT_DIR, args.label_file)
    SAMPLE_FILE = os.path.join(INPUT_DIR, args.sample_file)
    OUTPUT_DIR_FINAL = os.path.join(OUTPUT_DIR, args.output_name + "-" + str(datetime.today().strftime('%Y-%m-%d-%H:%M')))
    if not os.path.exists(OUTPUT_DIR_FINAL):
        os.makedirs(OUTPUT_DIR_FINAL)

    # Create log file to keep track of model parameters
    logging.basicConfig(filename=os.path.join(OUTPUT_DIR_FINAL,'classifier.log'),
                        filemode='w',
                        format='%(message)s',
                        level=logging.INFO)
    logger = logging.getLogger(__name__)
    logger.info('Classifer log file for ' + args.sample_file + ' - Started on ' + str(datetime.today().strftime('%Y-%m-%d-%H:%M')) + '\n')
    logger.info('Batch size: %d', args.batch_size)
    logger.info('Number of epochs: %d', args.max_epoch)
    logger.info('Learning Rate: %f', args.learning_rate)
    logger.info('Sample filename: ' + args.sample_file)
    logger.info('Output directory: ' + args.output_name)

    if args.continuous_discrete != 'continuous' and args.continuous_discrete != 'discrete':
        logger.error("ERROR: check that the continuous_discrete argument is spelled correctly.")
        logger.error("       only continuous or discrete data can be processed.")
        sys.exit("\nCommand line argument error. Please check the log file.\n")

    # Intialize gpu usage if desired
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda and args.use_gpu else "cpu")
    train_kwargs = {'batch_size': 16}
    test_kwargs = {'batch_size': 16}
    if use_cuda:
        cuda_kwargs = {'num_workers': 1,
                       'pin_memory': True,
                       'shuffle': True}
        train_kwargs.update(cuda_kwargs)
        test_kwargs.update(cuda_kwargs)

    # Load matrix, labels/weights, and number of samples
    column_names = ("sample", "label")
    matrix_df = pd.read_csv(SAMPLE_FILE, sep='\t', index_col=[0])
    labels_df = pd.read_csv(LABEL_FILE, names=column_names, delim_whitespace=True, header=None)


    # Error checking for same number of samples in both files and samples are unique
    samples_unique = set(labels_df.iloc[:,0])
    assert len(labels_df) == len(matrix_df.columns)
    assert len(labels_df) == len(samples_unique)

    
    labels, class_weights = preprocessing.labels_and_weights(labels_df)
    args.output_num_classes = len(labels)
    is_binary = False
    if len(labels) == 2:
        is_binary = True
        args.output_num_classess = 1

    # Define model paramters
    batch_size = args.batch_size
    max_epoch = args.max_epoch
    learning_rate = args.learning_rate #5e-4
    num_features = len(matrix_df.index)

    # Setup model
    model = utils.Net(input_seq_length=num_features,
                  output_num_classes=args.output_num_classes).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1)

    if is_binary:
        loss_fn = torch.nn.BCEWithLogitsLoss()
    else:
        loss_fn = torch.nn.CrossEntropyLoss()#(weight=class_weights)

    logger.info('Number of samples: %d\n', len(labels_df))
    logger.info('Labels: ')
    for i in range(len(labels)):
        logger.info('       %d - %s', i, labels[i])
    
    # Replace missing data with the global minimum of the dataset
    val_min, val_max = np.nanmin(matrix_df), np.nanmax(matrix_df)
    matrix_df.fillna(val_min, inplace=True)

    # Transposing matrix to align with label file
    matrix_transposed_df = matrix_df.T

    # Create density and tsne plot
    graphs = Plotter(OUTPUT_DIR_FINAL)
    graphs.density(matrix_df)
    graphs.tsne(matrix_transposed_df, labels_df, labels, title=args.sample_file)

    train_data, test_data = preprocessing.split_data(matrix_transposed_df, labels_df, args.test_split, args.output_num_classes)

    # Convert tuple of df's to tuple of np's
    # Allows the dataset class to access w/ data[][] instead of data[].iloc[]
    train_data_np = (train_data[0].values, train_data[1].values)
    test_data_np = (test_data[0].values, test_data[1].values)

    train_dataset = dataset.Dataset(train_data_np)
    test_dataset = dataset.Dataset(test_data_np)
    train_generator = data.DataLoader(train_dataset, **train_kwargs, drop_last=False)
    test_generator = data.DataLoader(test_dataset, **test_kwargs, drop_last=False)
    # drop_last=True would drop the last batch if the sample size is not divisible by the batch size

    logger.info('\nTraining size: %d \nTesting size: %d\n', len(train_dataset), len(test_dataset))

    # Create variables to store accuracy and loss
    loss_meter = utils.AverageMeter()
    loss_meter.reset()
    summary_file = pd.DataFrame([], columns=['Epoch', 'Training Loss', 'Accuracy', 'Accurate Count', 'Total Items'])
    train_stats = pd.DataFrame([], columns=['accuracy', 'loss'])
    test_stats = pd.DataFrame([], columns=['accuracy', 'loss'])

    # Train and test the model
    for epoch in range(args.max_epoch):
        train_stats = train(model, device, is_binary, train_generator, optimizer, loss_fn, batch_size, loss_meter, train_stats)
        test_stats = test(model, device, is_binary, test_generator, loss_fn, epoch, batch_size, loss_meter, test_stats, train_stats, logger)
        scheduler.step()

    # Training finished - Below is used for testing the network, plots and saving results
    if(args.plot_results):
        y_predict_list = []
        y_target_list = []
        y_predict_list, y_target_list = forward(model, device, is_binary, test_generator, y_predict_list, y_target_list)

        graphs.accuracy(train_stats, test_stats, graphs_title=args.sample_file)
        graphs.confusion(y_predict_list, y_target_list, labels, cm_title=args.sample_file)
        logger.info("\n\nf1 score: %0.2f" % (f1_score(y_target_list, y_predict_list, average="weighted")))

    #summary_file.to_csv(RESULTS_FILE, sep='\t', index=False)
    logger.info('\nFinal Accuracy: %2.3f', test_stats.iloc[epoch]['accuracy'])
    logger.info('\nFinished at  ' + str(datetime.today().strftime('%Y-%m-%d-%H:%M')))
Exemplo n.º 33
0
def main():
    parser = argparse.ArgumentParser()

    # Required parameters
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The input data dir. Should contain the .tsv files (or other data files) for the task.",
    )
    parser.add_argument(
        "--model_type",
        default=None,
        type=str,
        required=True,
        help="Model type selected in the list: " +
        ", ".join(MODEL_CLASSES.keys()),
    )
    parser.add_argument(
        "--model_name_or_path",
        default=None,
        type=str,
        required=True,
        help="Path to pre-trained model or shortcut name selected in the list: "
        + ", ".join(ALL_MODELS),
    )
    parser.add_argument(
        "--task_name",
        default=None,
        type=str,
        required=True,
        help="The name of the task to train selected in the list: " +
        ", ".join(processors.keys()),
    )
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The output directory where the model predictions and checkpoints will be written.",
    )

    # Other parameters
    parser.add_argument(
        "--config_name",
        default="",
        type=str,
        help="Pretrained config name or path if not the same as model_name",
    )
    parser.add_argument(
        "--tokenizer_name",
        default="",
        type=str,
        help="Pretrained tokenizer name or path if not the same as model_name",
    )
    parser.add_argument(
        "--cache_dir",
        default="",
        type=str,
        help=
        "Where do you want to store the pre-trained models downloaded from s3",
    )
    parser.add_argument(
        "--max_seq_length",
        default=128,
        type=int,
        help=
        "The maximum total input sequence length after tokenization. Sequences longer "
        "than this will be truncated, sequences shorter will be padded.",
    )
    parser.add_argument("--do_train",
                        action="store_true",
                        help="Whether to run training.")
    parser.add_argument("--do_eval",
                        action="store_true",
                        help="Whether to run eval on the dev set.")
    parser.add_argument(
        "--evaluate_during_training",
        action="store_true",
        help="Rul evaluation during training at each logging step.",
    )
    parser.add_argument(
        "--do_lower_case",
        action="store_true",
        help="Set this flag if you are using an uncased model.",
    )

    parser.add_argument(
        "--per_gpu_train_batch_size",
        default=8,
        type=int,
        help="Batch size per GPU/CPU for training.",
    )
    parser.add_argument(
        "--per_gpu_eval_batch_size",
        default=8,
        type=int,
        help="Batch size per GPU/CPU for evaluation.",
    )
    parser.add_argument(
        "--gradient_accumulation_steps",
        type=int,
        default=1,
        help=
        "Number of updates steps to accumulate before performing a backward/update pass.",
    )
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--weight_decay",
                        default=0.0,
                        type=float,
                        help="Weight decay if we apply some.")
    parser.add_argument("--adam_epsilon",
                        default=1e-8,
                        type=float,
                        help="Epsilon for Adam optimizer.")
    parser.add_argument("--max_grad_norm",
                        default=1.0,
                        type=float,
                        help="Max gradient norm.")
    parser.add_argument(
        "--num_train_epochs",
        default=3.0,
        type=float,
        help="Total number of training epochs to perform.",
    )
    parser.add_argument(
        "--max_steps",
        default=-1,
        type=int,
        help=
        "If > 0: set total number of training steps to perform. Override num_train_epochs.",
    )
    parser.add_argument("--warmup_steps",
                        default=0,
                        type=int,
                        help="Linear warmup over warmup_steps.")

    parser.add_argument("--logging_steps",
                        type=int,
                        default=500,
                        help="Log every X updates steps.")
    parser.add_argument("--save_steps",
                        type=int,
                        default=500,
                        help="Save checkpoint every X updates steps.")
    parser.add_argument(
        "--eval_all_checkpoints",
        action="store_true",
        help=
        "Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
    )
    parser.add_argument("--no_cuda",
                        action="store_true",
                        help="Avoid using CUDA when available")
    parser.add_argument(
        "--overwrite_output_dir",
        action="store_true",
        help="Overwrite the content of the output directory",
    )
    parser.add_argument(
        "--overwrite_cache",
        action="store_true",
        help="Overwrite the cached training and evaluation sets",
    )
    parser.add_argument("--seed",
                        type=int,
                        default=42,
                        help="random seed for initialization")

    parser.add_argument(
        "--fp16",
        action="store_true",
        help=
        "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
    )
    parser.add_argument(
        "--fp16_opt_level",
        type=str,
        default="O1",
        help=
        "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
        "See details at https://nvidia.github.io/apex/amp.html",
    )
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="For distributed training: local_rank")
    parser.add_argument("--server_ip",
                        type=str,
                        default="",
                        help="For distant debugging.")
    parser.add_argument("--server_port",
                        type=str,
                        default="",
                        help="For distant debugging.")
    args = parser.parse_args()

    if (os.path.exists(args.output_dir) and os.listdir(args.output_dir)
            and args.do_train and not args.overwrite_output_dir):
        raise ValueError(
            "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome."
            .format(args.output_dir))

    # Setup distant debugging if needed
    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd

        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port),
                            redirect_output=True)
        ptvsd.wait_for_attach()

    # Setup CUDA, GPU & distributed training
    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available()
                              and not args.no_cuda else "cpu")
        args.n_gpu = torch.cuda.device_count()
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        torch.distributed.init_process_group(backend="nccl")
        args.n_gpu = 1
    args.device = device

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
    )
    logger.warning(
        "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
        args.local_rank,
        device,
        args.n_gpu,
        bool(args.local_rank != -1),
        args.fp16,
    )

    # Set seed
    set_seed(args)

    # Prepare GLUE task
    args.task_name = args.task_name.lower()
    if args.task_name not in processors:
        raise ValueError("Task not found: %s" % (args.task_name))
    processor = processors[args.task_name]()
    args.output_mode = output_modes[args.task_name]
    label_list = processor.get_labels()
    num_labels = len(label_list)

    # Load pretrained model and tokenizer
    if args.local_rank not in [-1, 0]:
        torch.distributed.barrier(
        )  # Make sure only the first process in distributed training will download model & vocab

    args.model_type = args.model_type.lower()
    config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
    config = config_class.from_pretrained(
        args.config_name if args.config_name else args.model_name_or_path,
        num_labels=num_labels,
        finetuning_task=args.task_name,
        cache_dir=args.cache_dir if args.cache_dir else None,
    )
    tokenizer = tokenizer_class.from_pretrained(
        args.tokenizer_name
        if args.tokenizer_name else args.model_name_or_path,
        do_lower_case=args.do_lower_case,
        cache_dir=args.cache_dir if args.cache_dir else None,
    )
    model = model_class.from_pretrained(
        args.model_name_or_path,
        from_tf=bool(".ckpt" in args.model_name_or_path),
        config=config,
        cache_dir=args.cache_dir if args.cache_dir else None,
    )

    if args.local_rank == 0:
        torch.distributed.barrier(
        )  # Make sure only the first process in distributed training will download model & vocab

    model.to(args.device)

    logger.info("Training/evaluation parameters %s", args)

    # Training
    if args.do_train:
        train_dataset = load_and_cache_examples(args,
                                                args.task_name,
                                                tokenizer,
                                                evaluate=False)
        global_step, tr_loss = train(args, train_dataset, model, tokenizer)
        logger.info(" global_step = %s, average loss = %s", global_step,
                    tr_loss)

    # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
    if args.do_train and (args.local_rank == -1
                          or torch.distributed.get_rank() == 0):
        # Create output directory if needed
        if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
            os.makedirs(args.output_dir)

        logger.info("Saving model checkpoint to %s", args.output_dir)
        # Save a trained model, configuration and tokenizer using `save_pretrained()`.
        # They can then be reloaded using `from_pretrained()`
        model_to_save = (model.module if hasattr(model, "module") else model
                         )  # Take care of distributed/parallel training
        model_to_save.save_pretrained(args.output_dir)
        tokenizer.save_pretrained(args.output_dir)

        # Good practice: save your training arguments together with the trained model
        torch.save(args, os.path.join(args.output_dir, "training_args.bin"))

        # Load a trained model and vocabulary that you have fine-tuned
        model = model_class.from_pretrained(args.output_dir)
        tokenizer = tokenizer_class.from_pretrained(args.output_dir)
        model.to(args.device)

    # Evaluation
    results = {}
    if args.do_eval and args.local_rank in [-1, 0]:
        tokenizer = tokenizer_class.from_pretrained(
            args.output_dir, do_lower_case=args.do_lower_case)
        checkpoints = [args.output_dir]
        if args.eval_all_checkpoints:
            checkpoints = list(
                os.path.dirname(c) for c in sorted(
                    glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME,
                              recursive=True)))
            logging.getLogger("transformers.modeling_utils").setLevel(
                logging.WARN)  # Reduce logging
        logger.info("Evaluate the following checkpoints: %s", checkpoints)
        for checkpoint in checkpoints:
            global_step = checkpoint.split(
                "-")[-1] if len(checkpoints) > 1 else ""
            prefix = checkpoint.split(
                "/")[-1] if checkpoint.find("checkpoint") != -1 else ""

            model = model_class.from_pretrained(checkpoint)
            model.to(args.device)
            result = evaluate(args, model, tokenizer, prefix=prefix)
            result = dict(
                (k + "_{}".format(global_step), v) for k, v in result.items())
            results.update(result)

    return results
Exemplo n.º 34
0
 def device(self):
     return torch.device(f'cuda:{self.gpu}' if torch.cuda.is_available() and self.gpu is not None else 'cpu')
Exemplo n.º 35
0
# https://blog.csdn.net/weixin_44769214/article/details/108188126

import torch
import models
import os
from PIL import Image
from torchvision import transforms
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from train import MyDataset
'''
将1.6版本的pth转换为1.2版本使用的pth
'''
# device = torch.device('cuda')
# net = models.Classification().to(device)
# net.load_state_dict(torch.load('Lenet.pth'))
# torch.save(net.state_dict(), 'Lenet_compatible.pth', _use_new_zipfile_serialization=False)
'''
将1.6 gpu版本的pth转换为1.2版本能使用的cpu版本pth'''
device = torch.device('cpu')
net = models.Classification().to(device)
net.load_state_dict(torch.load('Lenet_compatible.pth', map_location='cpu'))
torch.save(net.state_dict(),
           'Lenet_compatible_cpu_1.pth',
           _use_new_zipfile_serialization=False)
Exemplo n.º 36
0
def train(args: Dict):
    """ Train the NMT Model.
    @param args (Dict): args from cmd line
    """
    train_data_src = read_corpus(args['--train-src'], source='src')
    train_data_tgt = read_corpus(args['--train-tgt'], source='tgt')

    dev_data_src = read_corpus(args['--dev-src'], source='src')
    dev_data_tgt = read_corpus(args['--dev-tgt'], source='tgt')

    train_data = list(zip(train_data_src, train_data_tgt))
    dev_data = list(zip(dev_data_src, dev_data_tgt))

    train_batch_size = int(args['--batch-size'])

    clip_grad = float(args['--clip-grad'])
    valid_niter = int(args['--valid-niter'])
    log_every = int(args['--log-every'])
    model_save_path = args['--save-to']

    vocab = Vocab.load(args['--vocab'])

    model = NMT(embed_size=int(args['--embed-size']),
                hidden_size=int(args['--hidden-size']),
                dropout_rate=float(args['--dropout']),
                vocab=vocab,
                no_char_decoder=args['--no-char-decoder'])
    model.train()

    uniform_init = float(args['--uniform-init'])
    if np.abs(uniform_init) > 0.:
        print('uniformly initialize parameters [-%f, +%f]' %
              (uniform_init, uniform_init),
              file=sys.stderr)
        for p in model.parameters():
            p.data.uniform_(-uniform_init, uniform_init)

    vocab_mask = torch.ones(len(vocab.tgt))
    vocab_mask[vocab.tgt['<pad>']] = 0

    device = torch.device("cuda:0" if args['--cuda'] else "cpu")
    print('use device: %s' % device, file=sys.stderr)

    model = model.to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=float(args['--lr']))

    num_trial = 0
    train_iter = patience = cum_loss = report_loss = cum_tgt_words = report_tgt_words = 0
    cum_examples = report_examples = epoch = valid_num = 0
    hist_valid_scores = []
    train_time = begin_time = time.time()
    print('begin Maximum Likelihood training')

    while True:
        epoch += 1

        for src_sents, tgt_sents in batch_iter(train_data,
                                               batch_size=train_batch_size,
                                               shuffle=True):
            train_iter += 1

            optimizer.zero_grad()

            batch_size = len(src_sents)

            example_losses = -model(src_sents, tgt_sents)  # (batch_size,)
            batch_loss = example_losses.sum()
            loss = batch_loss / batch_size

            loss.backward()

            # clip gradient
            grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(),
                                                       clip_grad)

            optimizer.step()

            batch_losses_val = batch_loss.item()
            report_loss += batch_losses_val
            cum_loss += batch_losses_val

            tgt_words_num_to_predict = sum(
                len(s[1:]) for s in tgt_sents)  # omitting leading `<s>`
            report_tgt_words += tgt_words_num_to_predict
            cum_tgt_words += tgt_words_num_to_predict
            report_examples += batch_size
            cum_examples += batch_size

            if train_iter % log_every == 0:
                print('epoch %d, iter %d, avg. loss %.2f, avg. ppl %.2f ' \
                      'cum. examples %d, speed %.2f words/sec, time elapsed %.2f sec' % (epoch, train_iter,
                                                                                         report_loss / report_examples,
                                                                                         math.exp(report_loss / report_tgt_words),
                                                                                         cum_examples,
                                                                                         report_tgt_words / (time.time() - train_time),
                                                                                         time.time() - begin_time), file=sys.stderr)

                train_time = time.time()
                report_loss = report_tgt_words = report_examples = 0.

            # perform validation
            if train_iter % valid_niter == 0:
                print(
                    'epoch %d, iter %d, cum. loss %.2f, cum. ppl %.2f cum. examples %d'
                    % (epoch, train_iter, cum_loss / cum_examples,
                       np.exp(cum_loss / cum_tgt_words), cum_examples),
                    file=sys.stderr)

                cum_loss = cum_examples = cum_tgt_words = 0.
                valid_num += 1

                print('begin validation ...', file=sys.stderr)

                # compute dev. ppl and bleu
                dev_ppl = evaluate_ppl(
                    model, dev_data,
                    batch_size=128)  # dev batch size can be a bit larger
                valid_metric = -dev_ppl

                print('validation: iter %d, dev. ppl %f' %
                      (train_iter, dev_ppl),
                      file=sys.stderr)

                is_better = len(hist_valid_scores
                                ) == 0 or valid_metric > max(hist_valid_scores)
                hist_valid_scores.append(valid_metric)

                if is_better:
                    patience = 0
                    print('save currently the best model to [%s]' %
                          model_save_path,
                          file=sys.stderr)
                    model.save(model_save_path)

                    # also save the optimizers' state
                    torch.save(optimizer.state_dict(),
                               model_save_path + '.optim')
                elif patience < int(args['--patience']):
                    patience += 1
                    print('hit patience %d' % patience, file=sys.stderr)

                    if patience == int(args['--patience']):
                        num_trial += 1
                        print('hit #%d trial' % num_trial, file=sys.stderr)
                        if num_trial == int(args['--max-num-trial']):
                            print('early stop!', file=sys.stderr)
                            exit(0)

                        # decay lr, and restore from previously best checkpoint
                        lr = optimizer.param_groups[0]['lr'] * float(
                            args['--lr-decay'])
                        print(
                            'load previously best model and decay learning rate to %f'
                            % lr,
                            file=sys.stderr)

                        # load model
                        params = torch.load(
                            model_save_path,
                            map_location=lambda storage, loc: storage)
                        model.load_state_dict(params['state_dict'])
                        model = model.to(device)

                        print('restore parameters of the optimizers',
                              file=sys.stderr)
                        optimizer.load_state_dict(
                            torch.load(model_save_path + '.optim'))

                        # set new lr
                        for param_group in optimizer.param_groups:
                            param_group['lr'] = lr

                        # reset patience
                        patience = 0

            if epoch == int(args['--max-epoch']):
                print('reached maximum number of epochs!', file=sys.stderr)
                exit(0)
Exemplo n.º 37
0
from pathlib import Path
import requests

import pickle
import gzip
import numpy as np

import torch
from torch import nn, optim
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader

import torchvision
from torchvision import transforms

DEV = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")


class MnistCNN(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1)
        self.conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1)
        self.conv3 = nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1)

    def forward(self, xb):
        xb = xb.view(-1, 1, 28, 28)
        xb = F.relu(self.conv1(xb))
        xb = F.relu(self.conv2(xb))
        xb = F.relu(self.conv3(xb))
        xb = F.avg_pool2d(xb, 4)
Exemplo n.º 38
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    print('Setting up data...')
    Dataset = get_dataset(opt.dataset, opt.task)
    f = open(opt.data_cfg)
    data_config = json.load(f)
    trainset_paths = data_config['train']
    dataset_root = data_config['root']
    f.close()
    transforms = T.Compose([T.ToTensor()])
    dataset = Dataset(opt,
                      dataset_root,
                      trainset_paths, (1088, 608),
                      augment=True,
                      transforms=transforms)
    opt = opts().update_dataset_info_and_set_heads(opt, dataset)
    print(opt)

    logger = Logger(opt)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model,
                                                   optimizer, opt.resume,
                                                   opt.lr, opt.lr_step)

    Trainer = train_factory[opt.task]
    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    print('Setting up data...')

    train_loader = torch.utils.data.DataLoader(dataset,
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True,
                                               drop_last=True)

    print('Starting training...')
    best = 1e10
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))

        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch,
                       model, optimizer)
        logger.write('\n')
        if epoch in opt.lr_step:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
        if epoch % 5 == 0:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
    logger.close()
        x = self.feature_fusion(higher_res_features, x)
        x = self.classifier(x)
        outputs = []
        x = F.interpolate(x, size, mode='bilinear', align_corners=True)
        outputs.append(x)
        if self.aux:
            auxout = self.auxlayer(higher_res_features)
            auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True)
            outputs.append(auxout)
        return x
        # return tuple(outputs)


"""print layers and params of network"""
if __name__ == '__main__':
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = FastSCNNX25(classes=19).to(device)
    summary(model, (3, 512, 1024))

    from fvcore.nn.flop_count import flop_count  # https://github.com/facebookresearch/fvcore
    from tools.flops_counter.ptflops import get_model_complexity_info
    from thop import profile  # https://github.com/Lyken17/pytorch-OpCounter

    x = torch.randn(2, 3, 512, 1024).to(device)

    from fvcore.nn.jit_handles import batchnorm_flop_jit
    from fvcore.nn.jit_handles import generic_activation_jit

    supported_ops = {
        "aten::batch_norm": batchnorm_flop_jit,
    }
        self.log_train = 100  # Logging interval for printing the training loss
        self.log_test = 100  # Logging interval for printing the test accuracy
        self.save_model = False
        self.batchs_round = 1  # Number of mini-batchs of each selected user in each iteration
        self.no_cuda = True
        self.seed = 1
        self.ClipStyle = 'Flat'  # Clipping method, including Flat and Per-Layer
        self.c = torch.tensor([0.5])  # Split ratio of privacy budget
        self.quatile = 0.5   # Target unclipped gradient


args = Arguments()

use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

#定义模型
class Net(nn.Module):
    def __init__(self, input_size=50, hidden_size=120, output_size=2, num_layer=3):
        super(Net, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.num_layer = num_layer
        self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size, num_layers=num_layer, batch_first=True)
        self.linear = nn.Linear(hidden_size, output_size)

    def forward(self, input):
        output, hidden = self.lstm(input)
Exemplo n.º 41
0
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader

from transformer_split.util import getGraphStructure
from transformer_split.data_loader import PairedDataset
from transformer_split import util
from transformer_split.arguments import get_args
from transformer_split.vae_model import VAE_Model

import envs

args = get_args()

device = torch.device("cuda" if args.cuda else "cpu")

torch.manual_seed(args.seed)
np.random.seed(args.seed)

#env_names = ["ant-v0", "ant3-v0", "ant_jump-v0", "ant3_jump-v0", "ant_a-v0", "ant_b-v0"]
env_names = [
    "ant4-rnd-v0", "ant3-rnd-v0", "ant3-walk-v0", "ant4-walk-v0",
    "ant5-walk-v0", "ant5-rnd-v0", "ant6-walk-v0", "ant6-rnd-v0"
]

train_envs = [gym.make(n) for n in env_names]
graphs = [getGraphStructure(e.xml) for e in train_envs]
# All environments have the same dimension per limb.
num_limbs = len(graphs[0])  #torso + body limbs
body_limbs = num_limbs - 1
Exemplo n.º 42
0
import argparse
import time

import torch.multiprocessing as mp
from torch.multiprocessing import Process

from multiprocessing import Process, Manager
from multiprocessing.managers import BaseManager
from deform_visualize import plot_one_new

torch.manual_seed(27)  #Reproducibility

GPU = True
device_idx = 0
if GPU:
    device = torch.device(
        "cuda:" + str(device_idx) if torch.cuda.is_available() else "cpu")
else:
    device = torch.device("cpu")
print(device)

parser = argparse.ArgumentParser(
    description='Train or test neural net motor controller.')
parser.add_argument('--train',
                    dest='train',
                    action='store_true',
                    default=False)
parser.add_argument('--test', dest='test', action='store_true', default=False)

args = parser.parse_args()

Exemplo n.º 43
0
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
    with open(args.config) as config_file:
        state = yaml.load(config_file, Loader=yaml.FullLoader)

    if args.save_prefix is not None:
        state['save_prefix'] = args.save_prefix
    if args.model_name is not None:
        state['model_name'] = args.model_name
    if args.epsilon is not None:
        state['epsilon'] = args.epsilon
    state['target'] = args.targeted
    if 'defense' not in state:
        state['defense'] = False
    state["max_queries"] = args.max_queries

    device = torch.device(
        "cuda:{}".format(0) if torch.cuda.is_available() else "cpu")

    if args.targeted and args.dataset == "ImageNet":
        args.max_queries = 50000
    args.exp_dir = osp.join(args.exp_dir,
                            get_exp_dir_name(args.dataset, args.norm,
                                             args.targeted, args.target_type,
                                             args))  # 随机产生一个目录用于实验
    os.makedirs(args.exp_dir, exist_ok=True)
    if args.all_archs:
        if args.attack_defense:
            log_file_path = osp.join(
                args.exp_dir, 'run_defense_{}.log'.format(args.defense_model))
        else:
            log_file_path = osp.join(args.exp_dir, 'run.log')
    elif args.arch is not None:
Exemplo n.º 44
0
Arquivo: gan.py Projeto: er-Bot/gans
 def __init__(self):
     self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
Exemplo n.º 45
0
def build(args):
    # the `num_classes` naming here is somewhat misleading.
    # it indeed corresponds to `max_obj_id + 1`, where max_obj_id
    # is the maximum id for a class in your dataset. For example,
    # COCO has a max_obj_id of 90, so we pass `num_classes` to be 91.
    # As another example, for a dataset that has a single class with id 1,
    # you should pass `num_classes` to be 2 (max_obj_id + 1).
    # For more details on this, check the following discussion
    # https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223
    num_classes = 20 if args.dataset_file != 'coco' else 91
    if args.dataset_file == "coco_panoptic":
        # for panoptic, we just add a num_classes that is large enough to hold
        # max_obj_id + 1, but the exact value doesn't really matter
        num_classes = 250
    if args.device == 'tpu':
        device = xm.xla_device()
    else:
        device = torch.device(args.device)

    backbone = build_backbone(args)

    transformer = build_transformer(args)

    model = DETR(
        backbone,
        transformer,
        num_classes=num_classes,
        num_queries=args.num_queries,
        aux_loss=args.aux_loss,
    )
    if args.masks:
        model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None))
    matcher = build_matcher(args)
    weight_dict = {'loss_ce': 1, 'loss_bbox': args.bbox_loss_coef}
    weight_dict['loss_giou'] = args.giou_loss_coef
    if args.masks:
        weight_dict["loss_mask"] = args.mask_loss_coef
        weight_dict["loss_dice"] = args.dice_loss_coef
    # TODO this is a hack
    if args.aux_loss:
        aux_weight_dict = {}
        for i in range(args.dec_layers - 1):
            aux_weight_dict.update(
                {k + f'_{i}': v
                 for k, v in weight_dict.items()})
        weight_dict.update(aux_weight_dict)

    losses = ['labels', 'boxes', 'cardinality']
    if args.masks:
        losses += ["masks"]
    criterion = SetCriterion(num_classes,
                             matcher=matcher,
                             weight_dict=weight_dict,
                             eos_coef=args.eos_coef,
                             losses=losses)
    # criterion.to(device)
    postprocessors = {'bbox': PostProcess()}
    if args.masks:
        postprocessors['segm'] = PostProcessSegm()
        if args.dataset_file == "coco_panoptic":
            is_thing_map = {i: i <= 90 for i in range(201)}
            postprocessors["panoptic"] = PostProcessPanoptic(is_thing_map,
                                                             threshold=0.85)

    return model, criterion, postprocessors
Exemplo n.º 46
0
parser.add_argument('--testBatchSize', type=int, default=10, help='testing batch size')
parser.add_argument('--nEpochs', type=int, default=2, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.01, help='Learning Rate. Default=0.01')
parser.add_argument('--cuda', action='store_true', help='use cuda?')
parser.add_argument('--threads', type=int, default=4, help='number of threads for data loader to use')
parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')
opt = parser.parse_args()

print(opt)

if opt.cuda and not torch.cuda.is_available():
    raise Exception("No GPU found, please run without --cuda")

torch.manual_seed(opt.seed)

device = torch.device("cuda" if opt.cuda else "cpu")

print('===> Loading datasets')
train_set = get_training_set(opt.upscale_factor)
test_set = get_test_set(opt.upscale_factor)
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False)

print('===> Building model')
model = Net(upscale_factor=opt.upscale_factor).to(device)
criterion = nn.MSELoss()

optimizer = optim.Adam(model.parameters(), lr=opt.lr)


def train(epoch):
Exemplo n.º 47
0
    def train(self) -> None:
        r"""Main method for training PPO.

        Returns:
            None
        """

        self.envs = construct_envs(self.config,
                                   get_env_class(self.config.ENV_NAME))

        ppo_cfg = self.config.RL.PPO
        self.device = (torch.device("cuda", self.config.TORCH_GPU_ID)
                       if torch.cuda.is_available() else torch.device("cpu"))
        if not os.path.isdir(self.config.CHECKPOINT_FOLDER):
            os.makedirs(self.config.CHECKPOINT_FOLDER)
        self._setup_actor_critic_agent(ppo_cfg)
        logger.info("agent number of parameters: {}".format(
            sum(param.numel() for param in self.agent.parameters())))

        num_train_processes, num_val_processes = self.config.NUM_PROCESSES, self.config.NUM_VAL_PROCESSES
        total_processes = num_train_processes + num_val_processes
        self.num_processes = num_train_processes
        rollouts = RolloutStorage(ppo_cfg.num_steps, num_train_processes,
                                  self.envs.observation_spaces[0],
                                  self.envs.action_spaces[0],
                                  ppo_cfg.hidden_size,
                                  self.actor_critic.net.num_recurrent_layers)
        rollouts.to(self.device)

        observations = self.envs.reset()
        batch = batch_obs(observations, device=self.device)

        for sensor in rollouts.observations:
            rollouts.observations[sensor][0].copy_(
                batch[sensor][:num_train_processes])
        self.last_observations = batch
        self.last_recurrent_hidden_states = torch.zeros(
            self.actor_critic.net.num_recurrent_layers, total_processes,
            ppo_cfg.hidden_size).to(self.device)
        self.last_prev_actions = torch.zeros(
            total_processes, rollouts.prev_actions.shape[-1]).to(self.device)
        self.last_masks = torch.zeros(total_processes, 1).to(self.device)

        # batch and observations may contain shared PyTorch CUDA
        # tensors.  We must explicitly clear them here otherwise
        # they will be kept in memory for the entire duration of training!
        batch = None
        observations = None

        current_episode_reward = torch.zeros(self.envs.num_envs, 1)
        running_episode_stats = dict(
            count=torch.zeros(self.envs.num_envs, 1),
            reward=torch.zeros(self.envs.num_envs, 1),
        )
        window_episode_stats = defaultdict(
            lambda: deque(maxlen=ppo_cfg.reward_window_size))

        t_start = time.time()
        env_time = 0
        pth_time = 0
        count_steps = 0
        count_checkpoints = 0

        lr_scheduler = LambdaLR(
            optimizer=self.agent.optimizer,
            lr_lambda=lambda x: linear_decay(x, self.config.NUM_UPDATES),
        )

        with TensorboardWriter(self.config.TENSORBOARD_DIR,
                               flush_secs=self.flush_secs) as writer:
            for update in range(self.config.NUM_UPDATES):
                if ppo_cfg.use_linear_lr_decay:
                    lr_scheduler.step()

                if ppo_cfg.use_linear_clip_decay:
                    self.agent.clip_param = ppo_cfg.clip_param * linear_decay(
                        update, self.config.NUM_UPDATES)

                for step in range(ppo_cfg.num_steps):
                    (
                        delta_pth_time,
                        delta_env_time,
                        delta_steps,
                    ) = self._collect_rollout_step(rollouts,
                                                   current_episode_reward,
                                                   running_episode_stats)
                    pth_time += delta_pth_time
                    env_time += delta_env_time
                    count_steps += delta_steps

                (
                    delta_pth_time,
                    value_loss,
                    action_loss,
                    dist_entropy,
                ) = self._update_agent(ppo_cfg, rollouts)
                pth_time += delta_pth_time

                for k, v in running_episode_stats.items():
                    window_episode_stats[k].append(v.clone())

                deltas = {
                    k: ((v[-1][:self.num_processes] -
                         v[0][:self.num_processes]).sum().item() if len(v) > 1
                        else v[0][:self.num_processes].sum().item())
                    for k, v in window_episode_stats.items()
                }

                deltas["count"] = max(deltas["count"], 1.0)
                losses = [value_loss, action_loss]
                self.write_tb('train', writer, deltas, count_steps, losses)

                eval_deltas = {
                    k: ((v[-1][self.num_processes:] -
                         v[0][self.num_processes:]).sum().item() if len(v) > 1
                        else v[0][self.num_processes:].sum().item())
                    for k, v in window_episode_stats.items()
                }
                eval_deltas["count"] = max(eval_deltas["count"], 1.0)

                self.write_tb('val', writer, eval_deltas, count_steps)

                # log stats
                if update > 0 and update % self.config.LOG_INTERVAL == 0:
                    logger.info("update: {}\tfps: {:.3f}\t".format(
                        update, count_steps / (time.time() - t_start)))

                    logger.info(
                        "update: {}\tenv-time: {:.3f}s\tpth-time: {:.3f}s\t"
                        "frames: {}".format(update, env_time, pth_time,
                                            count_steps))
                    logger.info("Average window size: {}  {}".format(
                        len(window_episode_stats["count"]),
                        "  ".join("{}: {:.3f}".format(k, v / deltas["count"])
                                  for k, v in deltas.items() if k != "count"),
                    ))
                    logger.info("validation metrics: {}".format(
                        "  ".join("{}: {:.3f}".format(k, v /
                                                      eval_deltas["count"])
                                  for k, v in eval_deltas.items()
                                  if k != "count"), ))

                # checkpoint model
                if update % self.config.CHECKPOINT_INTERVAL == 0:
                    self.save_checkpoint(f"ckpt.{count_checkpoints}.pth",
                                         dict(step=count_steps))
                    count_checkpoints += 1

            self.envs.close()
Exemplo n.º 48
0
import numpy as np
import os
import cv2
import pickle as pkl
import torch
from tqdm import tqdm
import pandas as pd

# Detection imports
from hpe3d.models import hmr
from hpe3d.utils.img_utils import FakeCamera
from hpe3d.utils.kp_utils import get_joints_from_bvh, bbox_from_kp2d
import hpe3d.utils.config as cfg

device = torch.device('cuda') if torch.cuda.is_available() else torch.device(
    'cpu')

spin = hmr()

dataset_path = cfg.ROOT_MHAD


def read_calib(cam):

    ext_name = os.path.join(dataset_path, 'Calibration', 'RwTw_%s.txt' % cam)
    int_name = os.path.join(dataset_path, 'Calibration', 'camcfg_%s.yml' % cam)

    with open(ext_name) as fh:
        rw = fh.readline().split('=')[1].rstrip().split(' ')
        tw = fh.readline().split('=')[1].rstrip().split(' ')
        R = np.array(rw).astype(float).reshape(3, 3)
Exemplo n.º 49
0
import torch
#from skimage import io, transform
import numpy as np
#import matplotlib.pyplot as plt
import torch
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils,models
import torch.optim as optim
import torch.nn as nn
from torch.optim import lr_scheduler
from torch.autograd import Variable
import warnings
import nonechucks as nc
warnings.filterwarnings("ignore")
device = torch.device("cuda:0")

def load_dataset():
    data_transform = transforms.Compose([
        transforms.RandomSizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    
    path = './images_new'
    dataset_total1 = torchvision.datasets.ImageFolder(root=path,transform=data_transform)
    dataset_total = nc.SafeDataset(dataset_total1)
    train_size = int(0.75*len(dataset_total))
    test_size = len(dataset_total) - train_size
Exemplo n.º 50
0
import torch

DEVICE = torch.device('cuda')
TARGET_REPLACE_ITER = 1000
EPOCHS = 100000
BATCH_SIZE = 32
GAMMA = 0.99
EPSILON_START = 1
EPSILON_END = 0.1
EPSILON_DECAY_STEPS = 10**6
N_ACTION = 4

# GAME = "Breakout-v4"
# GAME = "BreakoutDeterministic-v4" # image shape (210,160,3)
# GAME = "CartPole-v0"
GAME = "BreakoutNoFrameskip-v4"

EPISODE_LIFE = True
CLIP_REWARDS = True
FRAME_STACK = True
SCALE = True  # need more than 100G RAM to store 100M frame under float32
MEMORY_SIZE = 10000
LR = 1e-5
STEPS = 3000000
Exemplo n.º 51
0
 def init(self, device, nhead):
     # device: str. e.g. "cuda:0"
     self.device = torch.device(device)
     assert nhead % 4 == 0
     self.nhead = nhead
Exemplo n.º 52
0
    # init wandb
    hyperparameters_defaults = dict(
        learning_rate = 1e-4,
        batch_size = 64,
        num_epochs = 2,
        model_depth = 50,
        weight_decay = 0.01,
        pretrained = True,
        aug = True,
    )

    run = wandb.init(project="owkin-chal", job_type='train', config=hyperparameters_defaults)
    config = wandb.config

    # init model
    device = torch.device("cuda")
    
    model_name = "resnet{depth}".format(depth=config.model_depth)
    model = torchvision.models.__dict__[model_name](pretrained=config.pretrained)
    model.fc = torch.nn.Linear(model.fc.in_features, 1)
    
    # set transforms
    augs = [transforms.ToTensor()]

    if config.aug:
        augs.append(transforms.RandomHorizontalFlip(p=0.5))
        augs.append(transforms.RandomVerticalFlip(p=0.5))

    tfms = transforms.Compose(augs)

    dataset_aug = datasets.ImageFolder("data/ready_to_train", transform=tfms)
Exemplo n.º 53
0
def main():
    # Training settings
    # Use the command line to modify the default settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size', type=int, default=32, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=10, metavar='N',
                        help='number of epochs to train (default: 14)')
    parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
                        help='learning rate (default: 1.0)')
    parser.add_argument('--step', type=int, default=1, metavar='N',
                        help='number of epochs between learning rate reductions (default: 1)')
    parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
                        help='Learning rate step gamma (default: 0.7)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')

    parser.add_argument('--traintest', action='store_true', default=False,
                        help='train on fractions of data and test')
    parser.add_argument('--show_wrong', action='store_true', default=False,
                        help='show incorrectly labelled test images')
    parser.add_argument('--show_features', action='store_true', default=False,
                        help='show first layer features')
    parser.add_argument('--show_tsne', action='store_true', default=False,
                        help='show first layer features')
    parser.add_argument('--evaluate', action='store_true', default=False,
                        help='evaluate your model on the official test set')
    parser.add_argument('--load-model', type=str,
                        help='model file path')

    parser.add_argument('--save-model', action='store_true', default=True,
                        help='For Saving the current Model')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")
    print("Device:", device)

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    # show features
    if args.show_features:
        assert os.path.exists(args.load_model)

        model = Net().to(device)
        model.load_state_dict(torch.load(args.load_model))

        conv1 = model.conv1.weight.detach().cpu()

        plt.figure()
        for i, filt in enumerate(conv1):
            plt.subplot(331 + i)
            plt.axis('off')
            plt.title(f'Filter {i+1}')
            plt.imshow(filt[0])
            if i == 8:
                break
        plt.show()
        return


    # Evaluate on the official test set
    if args.evaluate or args.show_wrong:
        assert os.path.exists(args.load_model)

        # Set the test model
        model = Net().to(device)
        model.load_state_dict(torch.load(args.load_model))

        test_dataset = datasets.MNIST('../data', train=False,
                    transform=transforms.Compose([
                        transforms.ToTensor(),
                        transforms.Normalize((0.1307,), (0.3081,))
                    ]))

        test_loader = torch.utils.data.DataLoader(
            test_dataset, batch_size=args.test_batch_size, shuffle=True, **kwargs)

        test(model, device, test_loader, show_wrong=args.show_wrong, tsne=args.show_tsne)

        return


    if args.traintest:
        test_dataset = datasets.MNIST('../data', train=False,
                                      transform=transforms.Compose([
                                          transforms.ToTensor(),
                                          transforms.Normalize((0.1307,),
                                                               (0.3081,))
                                      ]))

        test_loader = torch.utils.data.DataLoader(
            test_dataset, batch_size=args.test_batch_size, shuffle=True,
            **kwargs)

    # Pytorch has default MNIST dataloader which loads data at each iteration
    data_transforms = {"train": transforms.Compose([
                            transforms.RandomRotation(degrees=10),
                            #transforms.RandomAffine(degrees=10, scale=(.9, .9)),# scale=(.1, .1), shear=.1),
                            transforms.ToTensor(),
                            #transforms.RandomErasing(p=0.8, scale=(.05, .05)),
                            #transforms.Normalize((0.1307,), (0.3081,))
                            ]),
                        "val": transforms.Compose([
                            transforms.ToTensor(),
                            transforms.Normalize((0.1307,), (0.3081,))
                        ])
                    }
    train_dataset = datasets.MNIST('../data', train=True, download=True, transform=data_transforms["train"])
    val_dataset = datasets.MNIST('../data', train=False, download=True, transform=data_transforms["val"])

    # You can assign indices for training/validation or use a random subset for
    # training by using SubsetRandomSampler. Right now the train and validation
    # sets are built from the same indices - this is bad! Change it so that
    # the training and validation sets are disjoint and have the correct relative sizes.

    class_data = [[] for _ in range(10)]

    for i, elem in enumerate(tqdm(val_dataset)):
        class_data[elem[1]].append(i)

    split = .85
    subset_indices_train = []
    subset_indices_valid = []
    for i in range(10):
        np.random.shuffle(class_data[i])
        subset_indices_train += class_data[i][:int(len(class_data[i])*split)]
        subset_indices_valid += class_data[i][int(len(class_data[i])*split):]

    if args.traintest:
        train_fracs = [1., .5, .25, .125, .0625]
    else:
        train_fracs = [1.]

    train_frac_losses = []
    test_frac_losses = []
    train_frac_accs = []
    test_frac_accs = []
    for train_frac in train_fracs:
        train_loader = torch.utils.data.DataLoader(
            train_dataset, batch_size=args.batch_size,
            sampler=SubsetRandomSampler(subset_indices_train[:int(len(subset_indices_train)*train_frac)])
        )
        val_loader = torch.utils.data.DataLoader(
            val_dataset, batch_size=args.test_batch_size,
            sampler=SubsetRandomSampler(subset_indices_valid[:int(len(subset_indices_valid)*train_frac)])
        )

        #for imgs, label in train_loader:
        #    for img in imgs:
        #        plt.figure()
        #        plt.imshow(img[0])
        #        plt.show()

        # Load your model [fcNet, ConvNet, Net]
        model = Net().to(device)

        # Try different optimzers here [Adam, SGD, RMSprop]
        optimizer = optim.Adadelta(model.parameters(), lr=args.lr)

        # Set your learning rate scheduler
        scheduler = StepLR(optimizer, step_size=args.step, gamma=args.gamma)

        # Training loop
        train_losses = []
        test_losses = []
        train_accs = []
        test_accs = []
        for epoch in range(1, args.epochs + 1):
            train_loss, train_acc = train(args, model, device, train_loader, optimizer, epoch)
            test_loss, test_acc = test(model, device, val_loader)
            scheduler.step()    # learning rate scheduler
            train_losses.append(train_loss)
            train_accs.append(train_acc)
            test_losses.append(test_loss)
            test_accs.append(test_acc)
            # You may optionally save your model at each epoch here

        if not args.traintest:
            figs, axs = plt.subplots(2)
            axs[0].set_title("Loss")
            axs[0].set_ylabel("Loss")
            axs[0].plot(test_losses)
            axs[0].plot(train_losses)
            axs[1].set_title("Accuracy")
            axs[1].set_ylabel("Accuracy")
            axs[1].plot(test_accs)
            axs[1].set_xlabel("Epoch")
            plt.show()
        else:
            test_loss, test_acc = test(model, device, test_loader)
            test_frac_losses.append(test_loss)
            train_frac_losses.append(train_loss)
            test_frac_accs.append(test_acc)
            train_frac_accs.append(train_acc)

    if args.traintest:
        plt.figure()
        plt.subplot(2, 1, 1)
        plt.title("Loss")
        plt.plot(train_fracs, test_frac_losses)
        plt.plot(train_fracs, train_frac_losses)
        plt.legend(["Test", "Train"])
        plt.ylabel("Loss (log)")
        plt.yscale("log")
        plt.xscale("log")

        plt.subplot(2, 1, 2)
        plt.title("Accuracy")
        plt.plot(train_fracs, test_frac_accs)
        plt.plot(train_fracs, train_frac_accs)
        plt.legend(["Test", "Train"])
        plt.xlabel("Fraction of Training Set (log)")
        plt.ylabel("Accuracy (log)")
        plt.yscale("log")
        plt.xscale("log")
        plt.show()

    if args.save_model:
        torch.save(model.state_dict(), "mnist_model.pt")
Exemplo n.º 54
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    storage = get_data(
        saving_dir=os.path.join(settings.data_dir, "rvr6x6.pck"))
    model = ActorCritic(map_size=(6, 6))
    # model = ActorCritic(map_size=(8, 8))
    writer = SummaryWriter()

    # input()
    model.to(device)

    iteration = int(1e6)
    batch_size = 128
    criteria = torch.nn.NLLLoss()
    optimizer = optim.Adam(model.parameters(), lr=1e-4, weight_decay=3e-6)

    for i in range(iteration):
        loss = 0
        sample_dict = storage.sample(batch_size)
        for key in sample_dict:
            if key not in model.activated_agents:
                continue
            if sample_dict[key]:
                # (states, units, units_actions)
                spatial_features, unit_features, actions = sample_dict[key]

                spatial_features = torch.from_numpy(
                    spatial_features).float().to(device)
                unit_features = torch.from_numpy(unit_features).float().to(
                    device)

                encoded_utt = torch.from_numpy(
                    encoded_utt_dict[key]).unsqueeze(0).float().repeat(
                        unit_features.size(0), 1).to(device)
                # cat utt and the individual feature together
                unit_features = torch.cat([unit_features, encoded_utt], dim=1)
                actions = torch.from_numpy(actions).long().to(device)
                # print(states.device, units.device)
                probs = model.actor_forward(key, spatial_features,
                                            unit_features)
                # print(probs.device)
                # input()
                # _actions = torch.zeros_like(prob)
                # for i in range(len(actions)):
                #     _actions[i][actions[i]] = 1

                log_probs = torch.log(probs)
                loss += criteria(log_probs, actions)
        if i % 100 == 0:
            writer.add_scalar("all losses", loss, i)
            print("iter{}, loss:{}".format(i, loss))

        optimizer.zero_grad()

        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), .1)
        optimizer.step()
        # print(prob[i])

    torch.save(model.state_dict(),
               os.path.join(settings.microrts_path, "models", "1M.pth"))
Exemplo n.º 55
0
def train_correspondence_block(json_file, cls, gpu, synthetic, epochs=50, batch_size=64, val_ratio=0.2,
                               save_model=True, iter_print=10):
    """
    Training a UNnet for each class using real train and/or synthetic data
    Args:
        json_file: .txt file which stores the directory of the training images
        cls: the class to train on, from 1 to 6
        gpu: gpu id to use
        synthetic: whether use synthetic data or not
        epochs: number of epochs to train
        batch_size: batch size
        val_ratio: validation ratio during training
        save_model: save model or not
        iter_print: print training results per iter_print iterations

    """
    train_data = NOCSDataset(json_file, cls, synthetic=synthetic, resize=64,
                             transform=transforms.Compose([transforms.ColorJitter(brightness=(0.6, 1.4),
                                                                                  contrast=(0.8, 1.2),
                                                                                  saturation=(0.8, 1.2),
                                                                                  hue=(-0.01, 0.01)),
                                                           AddGaussianNoise(10 / 255)]))
    print('Size of trainset ', len(train_data))
    indices = list(range(len(train_data)))
    np.random.shuffle(indices)

    num_train = len(indices)
    split = int(np.floor(num_train * val_ratio))
    train_idx, valid_idx = indices[split:], indices[:split]

    # define samplers for obtaining training and validation batches
    train_sampler = SubsetRandomSampler(train_idx)
    valid_sampler = SubsetRandomSampler(valid_idx)

    # prepare data loaders (combine dataset and sampler)
    num_workers = 4
    train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
                                               sampler=train_sampler, num_workers=num_workers)
    val_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
                                             sampler=valid_sampler, num_workers=num_workers)
    device = torch.device(f"cuda:{gpu}" if torch.cuda.is_available() else "cpu")
    print("device: ", f"cuda:{gpu}" if torch.cuda.is_available() else "cpu")
    # architecture for correspondence block - 13 objects + backgound = 14 channels for ID masks
    correspondence_block = UNet()
    correspondence_block = correspondence_block.to(device)

    # custom loss function and optimizer
    criterion_x = nn.CrossEntropyLoss()
    criterion_y = nn.CrossEntropyLoss()
    criterion_z = nn.CrossEntropyLoss()

    # specify optimizer
    optimizer = optim.Adam(correspondence_block.parameters(), lr=3e-4, weight_decay=3e-5)

    # training loop
    val_loss_min = np.Inf
    save_path = model_save_path(cls)
    writer = SummaryWriter(save_path.parent / save_path.stem / datetime.now().strftime("%d%H%M"))

    for epoch in range(epochs):
        t0 = time.time()
        train_loss = 0
        val_loss = 0
        print("------ Epoch ", epoch, " ---------")
        correspondence_block.train()
        print("training")
        for iter, (rgb, xmask, ymask, zmask, adr_rgb) in enumerate(train_loader):

            rgb = rgb.to(device)
            xmask = xmask.to(device)
            ymask = ymask.to(device)
            zmask = zmask.to(device)

            optimizer.zero_grad()
            xmask_pred, ymask_pred, zmask_pred = correspondence_block(rgb)

            loss_x = criterion_x(xmask_pred, xmask)
            loss_y = criterion_y(ymask_pred, ymask)
            loss_z = criterion_z(zmask_pred, zmask)

            loss = loss_x + loss_y + loss_z

            loss.backward()
            optimizer.step()
            train_loss += loss.item()

            if iter % iter_print == 0:
                print(
                    'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.
                        format(epoch, iter * len(rgb), len(train_loader.dataset),
                               100. * iter / len(train_loader), loss.item()))

        correspondence_block.eval()

        print("validating")
        for rgb, xmask, ymask, zmask, _ in val_loader:
            rgb = rgb.to(device)
            xmask = xmask.to(device)
            ymask = ymask.to(device)
            zmask = zmask.to(device)

            xmask_pred, ymask_pred, zmask_pred = correspondence_block(rgb)

            loss_x = criterion_x(xmask_pred, xmask)
            loss_y = criterion_y(ymask_pred, ymask)
            loss_z = criterion_z(zmask_pred, zmask)

            loss = loss_x + loss_y + loss_z
            val_loss += loss.item()

        # calculate average losses
        train_loss = train_loss / len(train_loader.sampler)
        val_loss = val_loss / len(val_loader.sampler)
        t_end = time.time()
        print(f'{t_end - t0} seconds')
        writer.add_scalar('train loss', train_loss, epoch)
        writer.add_scalar('val loss', val_loss, epoch)
        writer.add_scalar('epoch time', t_end - t0, epoch)

        print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
            epoch, train_loss, val_loss))

        # save model if validation loss has decreased
        if val_loss <= val_loss_min:
            print('Validation loss decreased ({:.6f} --> {:.6f}).  Saving model ...'.format(
                val_loss_min,
                val_loss))
            if save_model:
                torch.save(correspondence_block.state_dict(), save_path)
            val_loss_min = val_loss
    writer.close()
Exemplo n.º 56
0
def main():
    start = time.time()
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=5,
                        metavar='N',
                        help='number of epochs to train (default: 5)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=True,
                        help='For Saving the current Model')
    parser.add_argument('--file-name',
                        type=str,
                        default='test_' + str(int(start))[-3:],
                        metavar='filename',
                        help='Name of file to store model and losses')
    parser.add_argument(
        '--quant-type',
        type=str,
        default='none',
        metavar='qtype',
        help='Type of quantisation used on activation functions')
    parser.add_argument('--bit-res',
                        type=int,
                        default=4,
                        metavar='bitres',
                        help='Bit resolution of activation funtion')
    args = parser.parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    qt = args.quant_type
    if qt == 'dumb':
        model = DumbNet().to(device)
        print("Building dumb {0} bit network".format(args.bit_res))
    elif qt == 'lit':
        model = LitNet().to(device)
        print("Building LIT {0} bit network".format(args.bit_res))
    else:
        model = Net().to(device)
        print("\nBuilding full resolution network")

    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)

    losses_train = np.zeros((args.epochs))
    losses_test = np.zeros((args.epochs))

    start = time.time()

    for epoch in range(1, args.epochs + 1):
        epoch_train_loss = train(args, model, device, train_loader, optimizer,
                                 epoch)
        epoch_test_loss = test(args, model, device, test_loader)
        losses_train[epoch - 1] = epoch_train_loss
        losses_test[epoch - 1] = epoch_test_loss
        current_time = time.time() - start
        print('\nEpoch: {:d}'.format(epoch))
        print('Training set loss: {:.6f}'.format(epoch_train_loss))
        print('Test set loss: {:.6f}'.format(epoch_test_loss))
        print('Time taken: {:.6f}s'.format(current_time))

    if (args.save_model):
        if not os.path.exists('models'):
            os.mkdir('models')
        torch.save(model.state_dict(), 'models/' + args.file_name + '.pt')
        if not os.path.exists('data'):
            os.mkdir('data')
        losses = np.stack((losses_train, losses_test), axis=1)
        np.savetxt('data/' + args.file_name + '.txt', losses, delimiter=', ')

    fig = plt.figure()
    ax = fig.gca()
    ax.set_title('Loss per Epoch')
    plt.plot(losses_train)
    plt.plot(losses_test)
    plt.ylabel('loss')
    plt.xlabel('epoch')
    blue_line = mpatches.Patch(color='blue', label='Training Loss')
    orange_line = mpatches.Patch(color='orange', label='Testing Loss')
    plt.legend(handles=[blue_line, orange_line])
    plt.show()
Exemplo n.º 57
0
 def device(self):
     return torch.device(
         "cuda") if torch.cuda.is_available() else torch.device("cpu")
Exemplo n.º 58
0
import glob
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from sklearn import preprocessing
from torchvision import transforms
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import itertools

#Set device configuration
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

#Set Hyper parameters
num_epochs = 200
num_classes = 5
batch_size = 20
dropout_rate = 0.4
learning_rate = 0.00008


#Build the image loader
class ImageLoader(Dataset):
    def __init__(self, x, y, isCuda=False):
        self.X = x
        self.y = y
Exemplo n.º 59
0
    def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
                 groups=1, width_per_group=64, replace_stride_with_dilation=None,
                 norm_layer=None, device=torch.device('cuda:0')):
        super(ResNet, self).__init__()

        self.device = device

        # Dictionary that stores FullGrad information
        self.fullgrad_info = {
            'get_biases': False,
            'get_features': False,
            'biases':[],
            'features': []
        }

        if norm_layer is None:
            norm_layer = nn.BatchNorm2d
        self._norm_layer = norm_layer

        self.inplanes = 64
        self.dilation = 1
        if replace_stride_with_dilation is None:
            # each element in the tuple indicates if we should replace
            # the 2x2 stride with a dilated convolution instead
            replace_stride_with_dilation = [False, False, False]
        if len(replace_stride_with_dilation) != 3:
            raise ValueError("replace_stride_with_dilation should be None "
                             "or a 3-element tuple, got {}".format(replace_stride_with_dilation))
        self.groups = groups
        self.base_width = width_per_group
        self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = norm_layer(self.inplanes)
        self.relu = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
                                       dilate=replace_stride_with_dilation[0])
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
                                       dilate=replace_stride_with_dilation[1])
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
                                       dilate=replace_stride_with_dilation[2])
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(512 * block.expansion, num_classes)


        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        # Zero-initialize the last BN in each residual branch,
        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    nn.init.constant_(m.bn3.weight, 0)
                elif isinstance(m, BasicBlock):
                    nn.init.constant_(m.bn2.weight, 0)
Exemplo n.º 60
0
 def test_unet_training(self):
     loss = run_test(device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu:0"))
     print(loss)
     self.assertGreaterEqual(0.85, loss)