Esempio n. 1
0
 def __init__(self, ae, loss, noise_source=noise_gaussian, lr=0.0005):
     super(DAEOptimiser,
           self).__init__(ae,
                          base_optimiser=torch.optim.Adam(ae.parameters(),
                                                          lr=lr))
     self.__loss = loss
     self.cma = CMA(loss.__name__)
Esempio n. 2
0
 def __init__(self, ae, loss=F.binary_cross_entropy_with_logits, lr=0.0005):
     super(AEOptimiser,
           self).__init__(ae,
                          base_optimiser=torch.optim.Adam(ae.parameters(),
                                                          lr=lr))
     self.__loss = loss
     self.cma = CMA(loss.__name__)
Esempio n. 3
0
    def __init__(self, vae, loss, beta=1., lr=0.0005):
        super(VAEOptimiser,
              self).__init__(vae,
                             base_optimiser=torch.optim.Adam(vae.parameters(),
                                                             lr=lr))

        self.beta = beta
        self.__loss = loss
        self.cma = CMA('total_loss', loss.__name__, 'kld_loss')
 def __init__(self, model, margin = 0.2, mode = mode.all, k = 16, lr=0.0005):
     super(TripletOptimiser, self).__init__(model)
     self.mode = mode
     self.__top = [(False, False), (True, True), (True, False), (False, True)] #topk_n, topk_p
     self.k = int(k) #should be related to the batch size or number of p/n examples expected
     
     self.optim = torch.optim.Adam(self.model.parameters(), lr=lr)
     self.cma = CMA('loss')
     self.margin = margin
Esempio n. 5
0
class AEOptimiser(TorchOptimiser):
    def __init__(self, ae, loss=F.binary_cross_entropy_with_logits, lr=0.0005):
        super(AEOptimiser,
              self).__init__(ae,
                             base_optimiser=torch.optim.Adam(ae.parameters(),
                                                             lr=lr))
        self.__loss = loss
        self.cma = CMA(loss.__name__)
        #self.__loss = torch.nn.MSELoss(reduction='sum')

    def step(self, x):
        loss = self.__loss(self.model(x), x.to(self.model.device))
        self.cma.push(loss.item())
        return loss
Esempio n. 6
0
class AEOptimiser(TorchOptimiser):
    def __init__(self, ae, loss=F.binary_cross_entropy_with_logits, lr=0.0005):
        super(AEOptimiser,
              self).__init__(ae,
                             base_optimiser=torch.optim.Adam(ae.parameters(),
                                                             lr=lr))
        self.__loss = loss
        self.cma = CMA(loss.__name__)

    def step(self, x, y=None):
        if y is None:
            y = x
        loss = self.__loss(self.model(x), y.to(x.device))
        self.cma.push(loss.item())
        return loss
Esempio n. 7
0
class VAEOptimiser(TorchOptimiser):
    def __init__(self, vae, loss, beta=1., lr=0.0005):
        super(VAEOptimiser,
              self).__init__(vae,
                             base_optimiser=torch.optim.Adam(vae.parameters(),
                                                             lr=lr))

        self.beta = beta
        self.__loss = loss
        self.cma = CMA('total_loss', loss.__name__, 'kld_loss')

    def step(self, x):
        x, mu_z, logvar_z = self.model(x)
        x_target = x.to(self.model.device)
        kld_loss = self.beta * -0.5 * (
            1. + logvar_z - mu_z.pow(2) -
            logvar_z.exp()).sum()  #.view(batch_size, -1).mean()
        x_loss = self.__loss(x, x_target)
        loss = x_loss + kld_loss
        self.cma.push(loss.item(), kld_loss.item(), x_loss.item())
        return loss
Esempio n. 8
0
class DAEOptimiser(TorchOptimiser):
    def noise_gaussian(x, p=0.3):
        return x + np.random.randn(*x.shape)

    def noise_pepper(x, p=0.3):
        return x * (np.random.uniform(size=x.shape) > p)

    def noise_saltpepper(x, p=0.3):
        i = np.random.uniform(size=x.shape) < p
        x[i] = (np.random.uniform(size=np.sum(i)) > 0.5)
        return x

    def __init__(self, ae, loss, noise_source=noise_gaussian, lr=0.0005):
        super(DAEOptimiser,
              self).__init__(ae,
                             base_optimiser=torch.optim.Adam(ae.parameters(),
                                                             lr=lr))
        self.__loss = loss
        self.cma = CMA(loss.__name__)

    def step(self, x):
        loss = self.__loss(self.model(x), x.to(self.model.device))
        self.cma.push(loss.item())
        return loss
Esempio n. 9
0
class TripletOptimiser(TorchOptimiser):
    def __init__(self, model, margin=0.2, mode=mode.all, k=16, lr=0.0005):
        super(TripletOptimiser, self).__init__(model)
        self.mode = mode
        self.__top = [(False, False), (True, True), (True, False),
                      (False, True)]  #topk_n, topk_p
        self.k = int(
            k
        )  #should be related to the batch size or number of p/n examples expected

        self.optim = torch.optim.Adam(self.model.parameters(), lr=lr)
        self.cma = CMA('loss')
        self.margin = margin

    def step(self, x, y):
        #self.optim.zero_grad()
        loss = self.loss(x, y.squeeze(), *self.__top[self.mode])
        self.cma.push(loss.item())
        #loss.backward()
        #self.optim.step()
        return loss

    def loss(self, x, y, topk_n=False, topk_p=False):
        x_ = self.model(x)
        #d = self.distance_matrix(x_)
        unique = np.unique(y)
        loss = torch.FloatTensor(np.array([0.])).to(self.model.device)

        for u in unique:
            pi = np.nonzero(y == u)[0]
            ni = np.nonzero(y != u)[0]

            #xp_t = d[pi][:,pi]
            #xn_t = d[pi][:,ni]
            #slightly more efficient below
            xp_ = x_[pi]
            xn_ = x_[ni]
            xp = self.distance_matrix(xp_, xp_)
            xn = self.distance_matrix(xp_, xn_)

            if topk_p:
                xp = self.topk2(xp, self.k, large=True)
            if topk_n:
                xn = self.topk2(xn, self.k, large=False)

            #3D tensor, (a - p) - (a - n)

            xf = xp.unsqueeze(2) - xn
            xf = F.relu(xf + self.margin)  #triplet loss
            loss += xf.sum()

        return loss

    def distance_matrix(self, x1, x2=None):  #L22 distance by default
        # TODO speed up...
        if x2 is None:
            x2 = x1
        n_dif = x1.unsqueeze(1) - x2.unsqueeze(0)
        return torch.sum(n_dif * n_dif, -1)

    ''' #speed up??!
    def dmatrix(x1,x2=None):
        if x2 is None:
            x2 = x1
        dists = -2 * np.dot(x1, x2.T) + np.sum(x1**2, axis=1) + np.sum(x2**2, axis=1)[:, np.newaxis]
        return dists
    '''

    def topk(self, x, k, large=False):
        # if we want the top k in the whole matrix, this makes later computations a bit tricky...
        # use topk2
        indx = torch.topk(x.view(-1), k, largest=large)[1]
        return indx / x.shape[1], indx % x.shape[1]

    def topk2(self, x, k, large=False):
        if k >= x.shape[1]:
            return x
        else:
            return torch.topk(x, k, dim=1, largest=large)[0]