Пример #1
0
def odin_infer(loader,
               model,
               num_bs,
               num_classes,
               with_acc=False,
               seed=0,
               T=1000,
               eps=0.0014):
    loss_fn = torch.nn.CrossEntropyLoss()
    torch.manual_seed(seed)
    model.eval()
    a_test_ = Exponential(torch.ones([1, 400]))
    a_test = a_test_.sample((num_bs, ))
    acc = 0.
    outputs = np.zeros([num_bs, len(loader.dataset), num_classes + 1])
    beg = 0
    for i, (img, label) in enumerate(loader):
        index = list(range(beg, beg + img.size(0)))
        beg = beg + img.size(0)
        label = label.numpy().reshape(-1, 1)
        img_ = img.cuda()
        img_.requires_grad = True
        output = model(img_, torch.zeros([img.shape[0], 400]))

        output = output / T
        pseudo_label = output.argmax(-1).cuda()
        loss = loss_fn(output, pseudo_label)
        loss.backward()

        gradient = torch.ge(img_.grad.data, 0)
        gradient = (gradient.float() - 0.5) * 2

        gradient.index_copy_(
            1,
            torch.LongTensor([0]).cuda(),
            gradient.index_select(1,
                                  torch.LongTensor([0]).cuda()) / (0.2023))
        gradient.index_copy_(
            1,
            torch.LongTensor([1]).cuda(),
            gradient.index_select(1,
                                  torch.LongTensor([1]).cuda()) / (0.1994))
        gradient.index_copy_(
            1,
            torch.LongTensor([2]).cuda(),
            gradient.index_select(1,
                                  torch.LongTensor([2]).cuda()) / (0.2010))

        img_new = torch.add(img_.data, -eps, gradient)
        for _ in range(num_bs):
            w_test = a_test[_].repeat_interleave(img.shape[0], dim=0)
            output_new = model(img_new, w_test).cpu().detach().numpy()
            outputs[_, index] = np.concatenate([output_new, label], axis=1)

    if with_acc:
        pred = outputs.sum(0)[:, :-1].argmax(1)
        label = outputs[0][:, -1]
        acc = pred == label
        print(f"[Test] acc : {acc.mean()}")
    return outputs
    def forward(self, x, t, mask):
        #t : TxBSx2
        d_j = t[:, :, 0]  #TxBS
        batch_size, seq_length = x.size(1), x.size(0)
        past_influences = []
        for idx in range(self.m):
            t_pad = torch.cat(
                [torch.zeros(idx + 1, batch_size).to(device),
                 d_j])[:-(idx + 1), :][:, :, None]  #TxBSx1
            past_influences.append(t_pad)
        past_influences = torch.cat(
            past_influences, dim=-1) * self.alpha[None, None, :]  #TxBSxm
        total_influence = torch.sum(past_influences,
                                    dim=-1) + self.gamma[None, :].exp()
        #To consider from time step 1
        m = Exponential(total_influence[1:, :])  #T-1xBS
        ll_loss = (m.log_prob(d_j[1:, :])).sum()

        metric_dict = {
            'true_ll': -ll_loss.detach(),
            "marker_acc": 0.,
            "marker_acc_count": 1.
        }
        with torch.no_grad():
            time_mse = ((d_j[1:, :] - 1. / total_influence[1:, :]) *
                        mask[1:, :])**2.
            metric_dict['time_mse'] = time_mse.sum().detach().cpu().numpy()
            metric_dict['time_mse_count'] = mask[
                1:, :].sum().detach().cpu().numpy()
        return -ll_loss, metric_dict
Пример #3
0
    def __init__(self, in_features, a=None, trainable=True):
        '''
        Initialization.
        Args:
            in_features: shape of the input
            a: trainable parameter
            trainable: sets `a` as a trainable parameter
            
            `a` is initialized to 1 by default, higher values = higher-frequency, 
            5-50 is a good starting point if you already think your data is periodic, 
            consider starting lower e.g. 0.5 if you think not, but don't worry, 
            `a` will be trained along with the rest of your model
        '''
        super(Snake, self).__init__()
        self.in_features = in_features if isinstance(in_features,
                                                     list) else [in_features]

        # Initialize `a`
        if a is not None:
            self.a = Parameter(torch.ones(self.in_features) *
                               a)  # create a tensor out of alpha
        else:
            m = Exponential(torch.tensor([0.1]))
            self.a = Parameter(
                (m.rsample(self.in_features)
                 ).squeeze())  # random init = mix of frequencies

        self.a.requiresGrad = trainable  # set the training of `a` to true
Пример #4
0
 def _train_a_query(self, indice):
     indexer = np.stack(
         [np.array(indice[i::self.num_groups])
          for i in range(self.num_groups)])
     sampler = SubsetRandomSampler(indice)
     loader = DataLoader(self.dataset, batch_size=128, sampler=sampler,
                         num_workers=4, pin_memory=True)
     alpha_generator = Exponential(torch.ones([1, self.num_groups]))
     t_iter = tqdm(range(self.num_epoch),
                   total=self.num_epoch,
                   desc="Training")
     for t in t_iter:
         alpha = alpha_generator.sample().cuda()
         for img, label, index in loader:
             self.model.train()
             n0 = img.size(0)
             u_is = []
             for i in index:
                 u_i = np.where(indexer == i.item())[0][0]
                 u_is += [u_i]
             w = alpha[0, u_is].cuda()
             output = self.model(img.cuda(), alpha.repeat_interleave(n0, 0))
             loss = self.loss_fn(output, label.cuda(), w)
             self.optim.zero_grad()
             loss.backward()
             self.optim.step()
         self.sched.step()
Пример #5
0
 def __init__(self, rate, upper):
     self.base = Exponential(rate)
     self._batch_shape = self.base.rate.size()
     self._upper = upper
     self.upper = torch.full_like(self.base.rate, upper)
     # normaliser
     self.normaliser = self.base.cdf(self.upper)
     self.uniform = Uniform(torch.zeros_like(self.upper), self.normaliser)
Пример #6
0
def infer_a_sample(img, model, num_classes, num_bs, fac):
    model.eval()
    a_test = Exponential(torch.ones([1, 400]))
    w_test = a_test.sample((num_bs, ))
    output = np.zeros([num_bs, num_classes])
    for _ in range(num_bs):
        o = model(img, w_test[_], fac).cpu().numpy()
        output[_] = o
    return output
    def __init__(self, config):
        self.config = config
        torch.manual_seed(config["random_seed"])

        self.M = int(config["M"])
        self.K = int(config["K"])

        self.G = Exponential(1).sample((self.M, self.K)).float().cpu().detach().numpy()
        self.G = self.G[:, np.argsort(self.G.sum(axis=0))]
        self.P = float(config["transmission_power"])
Пример #8
0
    def __init__(self, in_features, alpha=None, alpha_trainable=True):
        """
        Init method.
        """
        super(Snake, self).__init__()
        self.in_features = in_features
        if alpha is not None:
            self.alpha = Parameter(torch.ones(in_features) * alpha)
        else:
            m = Exponential(torch.tensor([0.1]))
            self.alpha = Parameter(m.sample(in_features))

        self.alpha.requiresGrad = alpha_trainable
class ExpTimeToOpen(Base):
    def __init__(self, rate):
        self.dist = Exponential(rate)

    def log_prob(self, times):
        dt = times[:, 0]
        return self.dist.log_prob(dt)
Пример #10
0
 def __init__(self, scale, alpha, validate_args=None):
     self.scale, self.alpha = broadcast_all(scale, alpha)
     base_dist = Exponential(self.alpha, validate_args=validate_args)
     transforms = [ExpTransform(), AffineTransform(loc=0, scale=self.scale)]
     super(Pareto, self).__init__(base_dist,
                                  transforms,
                                  validate_args=validate_args)
Пример #11
0
class ExpTimeToOpen(Base):
    def __init__(self, rate):
        self.dist = Exponential(rate)

    def log_prob(self, times):
        dt = times[:, 1] - times[:, 0]
        dt.apply_(lambda x: x + 24
                  if x < 0 else x)  # find more time effective way to compute
        return self.dist.log_prob(dt)
Пример #12
0
 def __init__(self, scale, concentration, validate_args=None):
     self.scale, self.concentration = broadcast_all(scale, concentration)
     self.concentration_reciprocal = self.concentration.reciprocal()
     base_dist = Exponential(torch.ones_like(self.scale), validate_args=validate_args)
     transforms = [PowerTransform(exponent=self.concentration_reciprocal),
                   AffineTransform(loc=0, scale=self.scale)]
     super(Weibull, self).__init__(base_dist,
                                   transforms,
                                   validate_args=validate_args)
Пример #13
0
    def __init__(
        self,
        synapses,
        neurons,
        input_channel=1,
        output_channel=1,
        step=16,
        leak=32,
        fodep=None,
        delay=None,
        w_init=None,
        theta=None,
        dense=None,
    ):
        super(RecurColumn, self).__init__()

        self.synapses = synapses
        self.neurons = neurons
        self.input_channel = input_channel
        self.output_channel = output_channel

        assert theta or dense, 'either theta or dense should be specified'
        self.theta = theta = theta or dense * (synapses * input_channel)
        self.dense = dense = dense or theta / (synapses * input_channel)
        assert dense < 2 * input_channel * \
            synapses, 'invalid theta or density, try setting a smaller value'
        w_init = w_init or dense
        # default response function: StepFireLeak
        self.response_function = StepFireLeak(step, leak)
        self.fodep = fodep = fodep or self.response_function.fodep
        assert fodep >= self.response_function.fodep, f'forced depression should be at least {self.response_function.fodep}'
        self.delay = delay or self.fodep
        # initialize weight to w_init
        self.weight_i = nn.parameter.Parameter(Exponential(1 / w_init).sample(
            (self.output_channel * self.neurons,
             self.input_channel * self.synapses)).clip(0, 1),
                                               requires_grad=True)
        self.weight_s = nn.parameter.Parameter(Exponential(1 / w_init).sample(
            (self.output_channel * self.neurons,
             self.input_channel * self.synapses)).clip(0, 1),
                                               requires_grad=True)
        print(
            f'Building full connected TNN layer with theta={theta:.4f}, dense={dense:.4f}, fodep={fodep}'
        )
Пример #14
0
class HomogeneousPoissonProcess:
    def __init__(self, rate=1):
        self.rate = rate
        self.exp = Exponential(rate)

    def sample(self, size, max_seq_len, max_time=math.inf):
        gaps = self.exp.sample((size, max_seq_len))
        times = torch.cumsum(gaps, dim=1)
        masks = (times <= max_time).float()
        return times, masks
Пример #15
0
    def _infer_gbs(self, with_acc=False, seed=0):
        torch.manual_seed(seed)
        a_test_ = Exponential(torch.ones([1, self.args.n_a]))
        a_test = a_test_.sample([self.args.num_bs, ])
        outputs = np.zeros([self.args.num_bs, len(self.loader.dataset), self.args.num_classes + 1])
        beg = 0
        for i, (img, label) in enumerate(self.loader):
            end = beg + img.size(0)
            label = label.numpy().reshape(-1, 1)
            for _ in range(self.args.num_bs):
                w_test = a_test[_].repeat_interleave(img.size(0), dim=0)
                output = self._infer_a_batch_a_bs(img, w_test)
                outputs[_, beg: end] = np.concatenate([output, label], axis=1)
            beg = end

        if with_acc:
            pred = outputs.sum(0)[:, :-1].argmax(1)
            label = outputs[0][:, -1]
            acc = (pred == label).mean()
            print(f"[Test] acc : {acc}")
        return outputs
Пример #16
0
def odin_infer_a_sample(img,
                        model,
                        num_classes,
                        num_bs,
                        fac,
                        T=1000,
                        eps=0.0001):
    model.eval()
    loss_fn = torch.nn.CrossEntropyLoss()
    img = img.cuda()
    img.requires_grad = True
    model.zero_grad()
    a_test = Exponential(torch.ones([1, 400]))
    w_test = a_test.sample((num_bs, ))
    outputs = np.zeros([num_bs, num_classes])
    for _ in range(num_bs):
        # w_test = a_test[_].repeat_interleave(img.shape[0], dim=0)
        img_ = img.cuda()
        img_.requires_grad = True
        output = model(img_, w_test[_], fac)

        output = output / T
        pseudo_label = output.argmax(-1).cuda()
        loss = loss_fn(output, pseudo_label)
        loss.backward()

        gradient = torch.ge(img_.grad.data, 0)
        # gradient = (gradient.float() - 0.5) * 2

        # gradient.index_copy_(1, torch.LongTensor([0]).cuda(),
        #  gradient.index_select(1, torch.LongTensor([0]).cuda()) / (0.2023))
        # gradient.index_copy_(1, torch.LongTensor([1]).cuda(),
        #  gradient.index_select(1, torch.LongTensor([1]).cuda()) / (0.1994))
        # gradient.index_copy_(1, torch.LongTensor([2]).cuda(),
        #  gradient.index_select(1, torch.LongTensor([2]).cuda()) / (0.2010))

        img_new = torch.add(img_.data, -eps, gradient)
        output_new = model(img_new, w_test[_], fac).cpu().detach().numpy()
        outputs[_] = output_new
    return outputs
Пример #17
0
class RightTruncatedExponential(torch.distributions.Distribution):
    def __init__(self, rate, upper):
        self.base = Exponential(rate)
        self._batch_shape = self.base.rate.size()
        self._upper = upper
        self.upper = torch.full_like(self.base.rate, upper)
        # normaliser
        self.normaliser = self.base.cdf(self.upper)
        self.uniform = Uniform(torch.zeros_like(self.upper), self.normaliser)

    def rsample(self, sample_shape=torch.Size()):
        # sample from truncated support (0, normaliser)
        # where normaliser = base.cdf(upper)
        u = self.uniform.rsample(sample_shape)
        x = self.base.icdf(u)
        return x

    def log_prob(self, value):
        return self.base.log_prob(value) - torch.log(self.normaliser)

    def cdf(self, value):
        return self.base.cdf(value) / self.normaliser

    def icdf(self, value):
        return self.base.icdf(value * self.normaliser)

    def cross_entropy(self, other):
        assert isinstance(other, RightTruncatedExponential)
        assert type(self.base) is type(
            other.base) and self._upper == other._upper
        a = torch.log(other.base.rate) - torch.log(other.normaliser)
        log_b = torch.log(self.base.rate) + torch.log(
            other.base.rate) - torch.log(self.normaliser)
        b = torch.exp(log_b)
        c = (torch.exp(-self.base.rate) *
             (-self.base.rate - 1) + 1) / (self.base.rate**2)
        return -(a - b * c)

    def entropy(self):
        return self.cross_entropy(self)
Пример #18
0
def infer(loader,
          model,
          num_bs,
          num_classes,
          with_acc=False,
          with_indice=False,
          is_mcd=False,
          seed=0):
    torch.manual_seed(seed)
    model.eval()
    a_test_ = Exponential(torch.ones([1, 400]))
    a_test = a_test_.sample((num_bs, ))
    acc = 0.
    outputs = np.zeros([num_bs, len(loader.dataset), num_classes + 1])
    beg = 0
    indice = []
    ret = []
    for i, (img, label, _) in enumerate(loader):
        img = img.cuda()
        index = list(range(beg, beg + img.size(0)))
        beg = beg + img.size(0)
        label = label.numpy().reshape(-1, 1)
        indice += [_]
        for _ in range(num_bs):
            w_test = a_test[_].repeat_interleave(img.shape[0], dim=0)
            if is_mcd:
                model.apply(apply_dropout)
            output = model(img, w_test.cuda()).cpu().numpy()
            outputs[_, index] = np.concatenate([output, label], axis=1)
    ret += [outputs]
    if with_acc:
        pred = outputs.sum(0)[:, :-1].argmax(1)
        label = outputs[0][:, -1]
        acc = pred == label
        print(f"[Test] acc : {acc.mean()}")
        ret += [acc.mean()]

    if with_indice:
        ret += [torch.cat(indice)]
    return ret
Пример #19
0
    def __init__(self,
                 synapses,
                 neurons,
                 input_channel=1,
                 output_channel=1,
                 step=16,
                 leak=32,
                 bias=0.5,
                 winners=None,
                 fodep=None,
                 w_init=None,
                 theta=None,
                 dense=None,
                 alpha1=0.02,
                 alpha2=0.02,
                 beta1=0.99,
                 beta2=0.999):
        super(FullColumn, self).__init__()
        # model skeleton parameters
        self.synapses = synapses
        self.neurons = neurons
        self.input_channel = input_channel
        self.output_channel = output_channel

        # threshold parameters
        assert theta or dense, 'either theta or dense should be specified'
        self.theta = theta = theta or dense * (synapses * input_channel)
        self.dense = dense = dense or theta / (synapses * input_channel)
        w_init = w_init or dense

        # spiking control parameters
        self.response_function = StepFireLeak(step, leak)
        self.fodep = fodep = fodep or self.response_function.fodep
        assert fodep >= self.response_function.fodep, f'forced depression should be at least {self.response_function.fodep}'
        self.winners = winners = winners or neurons

        # initialize weight and bias
        self.bias = nn.parameter.Parameter(
            torch.zeros(self.output_channel * self.neurons) + bias,
            requires_grad=False)
        self.weight = nn.parameter.Parameter(Exponential(1 / w_init).sample(
            (self.output_channel * self.neurons,
             self.input_channel * self.synapses)).clip(min=0),
                                             requires_grad=True)
        self.dual = SignalDualBackground()
        self.optimizer = AdamBSTDP(self.weight, alpha1, alpha2, beta1, beta2)
        print(
            'Building full connected TNN layer with '
            f'theta={theta:.4f}, '
            f'dense={dense:.4f}, '
            f'fodep={fodep}, ', f'winners={winners}, '
            f'bias={bias}')
Пример #20
0
    def __init__(self, *, structure: structure_prior.StructurePrior, intensity_mu_sig: tuple, lifetime: float,
                 frame_range: tuple, xy_unit: str, px_size: tuple, density=None, em_avg=None, intensity_th=None):
        """

        Args:
            structure:
            intensity_mu_sig:
            lifetime:
            xy_unit:
            px_size:
            frame_range: specifies the frame range
            density:
            em_avg:
            intensity_th:

        """
        super().__init__(structure=structure,
                         photon_range=None,
                         xy_unit=xy_unit,
                         px_size=px_size,
                         density=density,
                         em_avg=em_avg)

        self.n_sampler = np.random.poisson
        self.frame_range = frame_range
        self.intensity_mu_sig = intensity_mu_sig
        self.intensity_dist = torch.distributions.normal.Normal(self.intensity_mu_sig[0],
                                                                self.intensity_mu_sig[1])
        self.intensity_th = intensity_th if intensity_th is not None else 1e-8
        self.lifetime_avg = lifetime
        self.lifetime_dist = Exponential(1 / self.lifetime_avg)  # parse the rate not the scale ...

        self.t0_dist = torch.distributions.uniform.Uniform(*self._frame_range_plus)

        """
        Determine the total number of emitters. Depends on lifetime, frames and emitters.
        (lifetime + 1) because of binning effect.
        """
        self._emitter_av_total = self._em_avg * self._num_frames_plus / (self.lifetime_avg + 1)
Пример #21
0
    def __init__(self, in_features, alpha = None, alpha_trainable = True):
        '''
        Initialization.
        INPUT:
            - in_features: shape of the input
            - alpha: trainable parameter
            
            alpha is initialized to 1 by default, higher values = higher-frequency, 
            5-50 is a good starting point if you already think your data is periodic, 
            consider starting lower e.g. 0.5 if you think not, but don't worry, 
            alpha will be trained along with the rest of your model. 
        '''
        super(Snake,self).__init__()
        self.in_features = in_features

        # initialize alpha
        if alpha is not None:
            self.alpha = Parameter(torch.ones(in_features) * alpha) # create a tensor out of alpha
        else:            
            m = Exponential(torch.tensor([0.1]))
            self.alpha = Parameter(m.sample(in_features)) # Random init = mix of frequencies

        self.alpha.requiresGrad = alpha_trainable # Usually we'll want to train alpha, but maybe for some experiments we won't?
Пример #22
0
    def __init__(self,
                 input_channel=1,
                 output_channel=1,
                 kernel=3,
                 stride=2,
                 step=16,
                 leak=32,
                 bias=0.5,
                 winners=0.5,
                 fodep=None,
                 w_init=None,
                 theta=None,
                 dense=None):
        super(ConvColumn, self).__init__()
        # model skeleton parameters
        self.input_channel = input_channel
        self.output_channel = output_channel
        self.kernel = kernel
        self.stride = stride

        # threshold parameters
        assert theta or dense, 'either theta or dense should be specified'
        self.theta = theta = theta or dense * (kernel * kernel * input_channel)
        self.dense = dense = dense or theta / (kernel * kernel * input_channel)
        assert dense < 2 * input_channel * kernel * \
            kernel, 'invalid theta or density, try setting a smaller value'
        w_init = w_init or dense

        # spiking control parameters
        self.response_function = StepFireLeak(step, leak)
        self.fodep = fodep = fodep or self.response_function.fodep
        assert fodep >= self.response_function.fodep, f'forced depression should be at least {self.response_function.fodep}'
        self.winners = winners

        # initialize weight and bias
        self.bias = nn.parameter.Parameter(torch.zeros(self.output_channel) +
                                           bias,
                                           requires_grad=False)
        self.weight = nn.parameter.Parameter(Exponential(1 / w_init).sample(
            (self.output_channel, self.input_channel, self.kernel,
             self.kernel)).clip(0, 1),
                                             requires_grad=True)
        print(
            'Building convolutional connected TNN layer with '
            f'theta={theta:.4f}, '
            f'dense={dense:.4f}, '
            f'fodep={fodep}, ', f'winners={winners}, '
            f'bias={bias}')
class Beamforming(EnvWrapper):
    def __init__(self, config):
        self.config = config
        torch.manual_seed(config["random_seed"])

        self.M = int(config["M"])
        self.K = int(config["K"])

        self.G = Exponential(1).sample((self.M, self.K)).float().cpu().detach().numpy()
        self.G = self.G[:, np.argsort(self.G.sum(axis=0))]
        self.P = float(config["transmission_power"])


    def sinr(self, W):
        W2 = np.square(W)
        gamma = np.zeros(self.K, dtype='float32')

        for k in range(self.K):
            nom = np.dot(self.G[:,k].reshape(1,self.M), W2[:,k].reshape(self.M,1))

#            denom = (self.G.sum(axis=1) - self.G[:,k]).reshape(self.M, 1)
#            denom += self.G[:, list(range(k))].sum(axis=1).reshape(self.M, 1)
#            denom += (self.G.sum(axis=1) * (self.K-2) + self.G[:,k]).reshape(self.M, 1)

            denom = self.G[:, list(range(k))].sum(axis=1)
            denom = denom.reshape(self.M, 1)
            denom = np.dot(W2[:,k].reshape(1,self.M), denom)
            denom += np.float32(1.0/self.P)

            gamma[k] = nom/denom

        return torch.from_numpy(gamma).float().to("cpu")

    def reset(self):
        W = torch.rand(self.M, self.K)
        W = torch.nn.functional.normalize(W, p=2)
        return self.sinr(W)

    def step(self, action):
        state = self.sinr(action.reshape(self.M, self.K))
        reward = (1+state).log2().sum()
        return state, reward, 0
Пример #24
0
    def __init__(self,
                 actor_critic,
                 clip_param,
                 ppo_epoch,
                 num_mini_batch,
                 value_loss_coef,
                 entropy_coef,
                 lr=None,
                 eps=None,
                 max_grad_norm=None,
                 use_clipped_value_loss=True,
                 ftrl_mode=False,
                 correlated_mode=False):

        self.correlated_mode = correlated_mode  # we inject embedding in this case
        self.ftrl_mode = ftrl_mode  # we use regularization for FTRL.

        self.actor_critic = actor_critic

        self.clip_param = clip_param
        self.ppo_epoch = ppo_epoch
        self.num_mini_batch = num_mini_batch

        self.value_loss_coef = value_loss_coef
        self.entropy_coef = entropy_coef

        self.max_grad_norm = max_grad_norm
        self.use_clipped_value_loss = use_clipped_value_loss

        self.optimizer = optim.Adam(actor_critic.parameters(), lr=lr, eps=eps)

        self.device = torch.device(
            "cuda:0" if torch.cuda.is_available() else "cpu")
        params = torch.Tensor().to(self.device)
        for param in self.actor_critic.parameters():
            params = torch.cat((params, param.view(-1)))
        self.randomness = Exponential(torch.ones(len(params))).sample().to(
            self.device)
Пример #25
0
class EmitterSamplerBlinking(EmitterSamplerFrameIndependent):
    def __init__(self, *, structure: structure_prior.StructurePrior, intensity_mu_sig: tuple, lifetime: float,
                 frame_range: tuple, xy_unit: str, px_size: tuple, density=None, em_avg=None, intensity_th=None):
        """

        Args:
            structure:
            intensity_mu_sig:
            lifetime:
            xy_unit:
            px_size:
            frame_range: specifies the frame range
            density:
            em_avg:
            intensity_th:

        """
        super().__init__(structure=structure,
                         photon_range=None,
                         xy_unit=xy_unit,
                         px_size=px_size,
                         density=density,
                         em_avg=em_avg)

        self.n_sampler = np.random.poisson
        self.frame_range = frame_range
        self.intensity_mu_sig = intensity_mu_sig
        self.intensity_dist = torch.distributions.normal.Normal(self.intensity_mu_sig[0],
                                                                self.intensity_mu_sig[1])
        self.intensity_th = intensity_th if intensity_th is not None else 1e-8
        self.lifetime_avg = lifetime
        self.lifetime_dist = Exponential(1 / self.lifetime_avg)  # parse the rate not the scale ...

        self.t0_dist = torch.distributions.uniform.Uniform(*self._frame_range_plus)

        """
        Determine the total number of emitters. Depends on lifetime, frames and emitters.
        (lifetime + 1) because of binning effect.
        """
        self._emitter_av_total = self._em_avg * self._num_frames_plus / (self.lifetime_avg + 1)

    @property
    def _frame_range_plus(self):
        """
        Frame range including buffer in front and end to account for build up effects.

        """
        return self.frame_range[0] - 3 * self.lifetime_avg, self.frame_range[1] + 3 * self.lifetime_avg

    @property
    def num_frames(self):
        return self.frame_range[1] - self.frame_range[0] + 1

    @property
    def _num_frames_plus(self):
        return self._frame_range_plus[1] - self._frame_range_plus[0] + 1

    def sample(self):
        """
        Return sampled EmitterSet in the specified frame range.

        Returns:
            EmitterSet

        """

        n = self.n_sampler(self._emitter_av_total)

        loose_em = self.sample_loose_emitter(n=n)
        em = loose_em.return_emitterset()
        em = em.get_subset_frame(*self.frame_range)  # because the simulated frame range is larger

        return em

    def sample_n(self, *args, **kwargs):
        raise NotImplementedError

    def sample_loose_emitter(self, n) -> decode.generic.emitter.LooseEmitterSet:
        """
        Generate loose EmitterSet. Loose emitters are emitters that are not yet binned to frames.

        Args:
            n: number of 'loose' emitters

        Returns:
            LooseEmitterSet

        """

        xyz = self.structure.sample(n)

        """Draw from intensity distribution but clamp the value so as not to fall below 0."""
        intensity = torch.clamp(self.intensity_dist.sample((n,)), self.intensity_th)

        """Distribute emitters in time. Increase the range a bit."""
        t0 = self.t0_dist.sample((n,))
        ontime = self.lifetime_dist.rsample((n,))

        return decode.generic.emitter.LooseEmitterSet(xyz, intensity, ontime, t0, id=torch.arange(n).long(),
                                                      xy_unit=self.xy_unit, px_size=self.px_size)

    @classmethod
    def parse(cls, param, structure, frames: tuple):
        return cls(structure=structure,
                   intensity_mu_sig=param.Simulation.intensity_mu_sig,
                   lifetime=param.Simulation.lifetime_avg,
                   xy_unit=param.Simulation.xy_unit,
                   px_size=param.Camera.px_size,
                   frame_range=frames,
                   density=param.Simulation.density,
                   em_avg=param.Simulation.emitter_av,
                   intensity_th=param.Simulation.intensity_th)
Пример #26
0
for param in x_player.parameters():  # the same number of params for y_player
    params = torch.cat((params, param.view(-1)))

for t in range(NUM_STEPS):
    # player x's turn
    x_optimizer.zero_grad()
    x_loss = torch.zeros(1, 1)
    y = y_player()
    for param in x_queue:
        x_player.load_state_dict(param)
        x = x_player()
        x_loss += torch.mm(torch.mm(x, A), y.T)
    x_loss /= len(x_queue)

    # perturbation
    randomness = Exponential(x_rate * torch.ones(len(params))).sample()
    params = torch.Tensor()
    for param in x_player.parameters():
        params = torch.cat((params, param.view(-1)))
    x_loss -= torch.dot(params, randomness)

    x_loss.backward()
    x_optimizer.step()
    x_queue, m = queue_update(queue=x_queue,
                              m=m,
                              K=K,
                              t=t + 1,
                              ft=copy.deepcopy(x_player.state_dict()),
                              inc=inc)

    # player y's turn
Пример #27
0
 def _update_weight(self):
     if self.epoch > self.epoch_th:
         self.alpha = Exponential(torch.ones([1, self.n_a])).sample()
Пример #28
0
class NbsRunner(CnnRunner):
    def __init__(self, loader, model, optim, lr_scheduler, num_epoch,
                 loss_with_weight, val_metric, test_metric, logger, model_path,
                 rank, epoch_th, num_mc):
        self.num_mc = num_mc
        self.n_a = loader.n_a
        self.epoch_th = epoch_th
        self.group_indices = loader.groups
        self.alpha = torch.ones([1, self.n_a])
        super().__init__(loader, model, optim, lr_scheduler, num_epoch,
                         loss_with_weight, val_metric, test_metric, logger,
                         model_path, rank)
        self.save_kwargs['alpha'] = self.alpha

    def _update_weight(self):
        if self.epoch > self.epoch_th:
            self.alpha = Exponential(torch.ones([1, self.n_a])).sample()

    def _calc_loss(self, img, label, idx):
        n0 = img.size(0)
        u_is = []
        for i in idx:
            u_i = np.where(self.group_indices == i.item())[0][0]
            u_is += [u_i]

        w = self.alpha[0, u_is].cuda()

        output = self.model(img.cuda(non_blocking=True),
                            self.alpha.repeat_interleave(n0, 0))
        label = label.cuda(non_blocking=True)
        loss_ = 0
        for loss, w in self.loss_with_weight:
            _loss = w * loss(output, label, w)
            loss_ += _loss
        return loss_

    @torch.no_grad()
    def _valid_a_batch(self, img, label, _, with_output=False):
        self._update_weight()
        self.model.eval()
        output = self.model(img.cuda(non_blocking=True), self.num_mc)
        label = label.cuda(non_blocking=True)
        result = self.val_metric(output.mean(0), label)
        if with_output:
            result = [result, output]
        return result

    def test(self):
        self.load('model.pth')
        loader = self.loader.load('test')
        if self.rank == 0:
            t_iter = tqdm(loader, total=self.loader.len)
        else:
            t_iter = loader

        outputs = []
        labels = []
        self.model.eval()
        for img, label, index in t_iter:
            _, output = self._valid_a_batch(img, label, with_output=True)
            outputs += [gather_tensor(output).cpu().numpy()]
            labels += [gather_tensor(label).cpu().numpy()]
        labels = np.concatenate(labels)
        outputs = np.concatenate(outputs, axis=1)
        acc = (outputs.mean(0).argmax(-1) == labels).mean() * 100
        ece = calc_ece(outputs.mean(0), labels)
        nll, brier = calc_nll_brier(
            outputs.mean(0), labels,
            one_hot(torch.from_numpy(labels.astype(int)),
                    self.model.module.classifer.out_features).numpy())
        log = f"[Test] ACC: {acc:.2f}, ECE : {ece:.2f}, "
        log += f"NLL : {nll:.2f}, Brier : {brier:.2f}"
        self.log(log, 'info')
        with h5py.File(f"{self.model_path}/output.h5", 'w') as h:
            h.create_dataset('output', data=outputs)
            h.create_dataset('label', data=labels)
Пример #29
0
class NbsRunner(CnnRunner):
    def __init__(self, loader, model, optim, lr_scheduler, num_epoch,
                 loss_with_weight, val_metric, test_metric, logger, model_path,
                 rank, epoch_th, num_mc, adv_training):
        self.num_mc = num_mc
        self.n_a = loader.n_a
        self.epoch_th = epoch_th
        self.alpha = torch.ones([1, self.n_a])
        super().__init__(loader, model, optim, lr_scheduler, num_epoch,
                         loss_with_weight, val_metric, test_metric, logger,
                         model_path, rank, adv_training)
        self.save_kwargs['alpha'] = self.alpha
        self._update_weight()

    def _update_weight(self):
        if self.epoch > self.epoch_th:
            self.alpha = Exponential(torch.ones([1, self.n_a])).sample()

    def _calc_loss(self, img, label, idx):
        n0 = img.size(0)
        w = self.alpha[0, idx].cuda()

        output = self.model(img.cuda(non_blocking=True),
                            self.alpha.repeat_interleave(n0, 0))
        for _ in range(output.dim() - w.dim()):
            w.unsqueeze_(-1)
        label = label.cuda(non_blocking=True)
        loss_ = 0
        for loss, _w in self.loss_with_weight:
            _loss = _w * loss(output, label, w)
            loss_ += _loss
        return loss_

    @torch.no_grad()
    def _valid_a_batch(self, img, label, with_output=False):
        self._update_weight()
        self.model.eval()
        output = self.model(img.cuda(non_blocking=True), self.num_mc)
        label = label.cuda(non_blocking=True)
        result = self.val_metric(output.mean(0), label)
        if with_output:
            result = [result, output]
        return result

    def test(self, is_seg):
        self.load('model.pth')
        loader = self.loader.load('test')
        if self.rank == 0:
            t_iter = tqdm(loader, total=self.loader.len)
        else:
            t_iter = loader

        outputs = []
        labels = []
        metrics = []
        self.model.eval()
        for img, label in t_iter:
            _metric, output = self._valid_a_batch(img, label, with_output=True)
            labels += [gather_tensor(label).cpu().numpy()]
            outputs += [gather_tensor(output).cpu().numpy()]
            metrics += [gather_tensor(_metric).cpu().numpy()]
        if is_seg:
            met = np.concatenate(metrics).mean()
            self.log(f"[Test] MeanIOU: {met:.2f}", 'info')
            save_path = Path(self.model_path) / 'infer'
            save_path.mkdir(parents=True, exist_ok=True)
            index = 0
            for out, label in zip(outputs, labels):
                for i in range(label.shape[0]):
                    l = label[i]
                    o = out[:, i]

                    with h5py.File(f"{save_path}/{index}.h5", 'w') as h:
                        h.create_dataset('output', data=o)
                        h.create_dataset('label', data=l)
                    index += 1
        else:
            labels = np.concatenate(labels)
            outputs = np.concatenate(outputs, axis=1)
            acc = (outputs.mean(0).argmax(-1) == labels).mean() * 100
            ece = calc_ece(softmax(outputs, -1).mean(0), labels)
            nll, brier = calc_nll_brier_mc(outputs, labels)
            log = f"[Test] ACC: {acc:.2f}, ECE : {ece:.2f}, "
            log += f"NLL : {nll:.2f}, Brier : {brier:.2f}"
            self.log(log, 'info')
            with h5py.File(f"{self.model_path}/output.h5", 'w') as h:
                h.create_dataset('output', data=outputs)
                h.create_dataset('label', data=labels)
Пример #30
0
 def __init__(self, rate=1):
     self.rate = rate
     self.exp = Exponential(rate)