def forward(self, inputs):
        if self.normalize_signal:
            inputs = (inputs - torch.mean(inputs, dim=(1,2), keepdim=True)) /\
                     (inputs.std(dim=(1,2)) + self.epsilon)

        real_part = []
        imag_part = []
        for n in range(inputs.shape[1]):
            real_part.append(self.real_conv(inputs[:, n, :][:, None, :]))
            imag_part.append(self.imag_conv(inputs[:, n, :][:, None, :]))

        real_part = torch.stack(real_part, dim=-1)
        imag_part = torch.stack(imag_part, dim=-1)
        if self.mode == "abs":
            fft = torch.sqrt(real_part**2 + imag_part**2)
        if self.mode == "phase":
            fft = tf.atan(real_part / imag_part)
        elif self.mode == "real":
            fft = real_part
        elif self.mode == "imag":
            fft = imag_part
        elif self.mode == "complex":
            fft = torch.concatenate((real_part, imag_part), axis=-1)
        elif self.mode == "log":
            fft = torch.sqrt(real_part**2 + imag_part**2 + self.epsilon)
            fft = torch.log(fft) / np.log(10)

        fft = fft.permute((
            0,
            3,
            1,
            2,
        ))[:, :, :self.nfft // 2, :]
        if self.normalize_feature:
            if self.mode == "complex":
                warnings.warn(
                    "spectrum normalization will not applied with mode == \"complex\""
                )
            else:
                #pdb.set_trace()
                std = torch.sqrt(
                    torch.clamp(((fft - torch.mean(fft, dim=2, keepdim=True))**
                                 2).sum(dim=2, keepdim=True) / fft.shape[2],
                                min=self.epsilon**2))
                fft = (fft - torch.mean(fft, dim=2, keepdim=True)) / (std)
                """
                    if fft.std(dim=2, keepdim=True, unbiased=False).sum() < 1e-10:
                        pdb.set_trace()
                
                    fft = (fft - torch.mean(fft, dim=2, keepdim=True)) / (
                                fft.std(dim=2, keepdim=True, unbiased=False) + self.epsilon)
                """
        # fft = fft[:, self.bottom:-1 * self.top, :, :]

        return fft
Beispiel #2
0
    def dlogp(self, *args):
        
        assert len(args) == self.depth
        
        dlogps = []
        dlogps.append(self.dists[0].dlogp(args[0]))

        for i in range(1,self.depth):
            dlogps.append( self.dists[i].dlogp(args[i-1], args[i]) )
        dlogp = torch.concatenate(dlogps, axis=1)
        return dlogp
Beispiel #3
0
    def __init__(self, dists, name, seed=None):
        
        self.name  = name
        self.dists = dists
        self.depth = len(dists)
        self.seed  = seed

        self.ps = torch.concatenate([c.ps for c in self.dists])
        pps     = OrderedDict([("p"+str(i), self.dists[i].pp) for i in range(self.depth)])
        self.pp = ParameterParser.from_pps(pps)
        self.pidxs = OrderedDict([(k, v[0]) for (k,v) in self.pp.idxs_and_shapes.items()])
    def forward(self, x):
        """Wenn das DeepConvolutional aufgerufen wird"""

        # vgl. Definiton vom Stem
        #x = self.stem(x)

        return concatenate([
            self.branch_a(x),
            self.branch_b(x),
            self.branch_c(x),
            self.branch_d(x)
        ],
                           dim=1)
def check_accuracy(loader, model):
    #GPU or CPU?
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    num_correct = 0
    num_samples = 0
    wrong_x=torch.([])
    wrong_y=trch.([])
    model.eval()  # set model to evaluation mode
    with torch.no_grad():
        for x, y in loader:
            x = x.to(device=device, dtype=torch.float32)  # move to device, e.g. GPU
            y = y.to(device=device, dtype=torch.long)
            scores = model(x)
            #Calulate the loss
            try:
                loss = torch.nn.functional.cross_entropy(scores[0], y)
            except:
                loss = torch.nn.functional.cross_entropy(scores, y)

            _, preds = scores.max(1)
            correct=(preds==y)
            num_correct += correct.sum()
            num_samples += preds.size(0)

            #Extract the wrong samples
            index=torch.argwhere(correct).flatten()
            wrong_x=torch.concatenate((wrong_x,x[index]))
            wrong_y=torch.concatenate((wrong_y,y[index]))

            if(num_samples>2000):
                break

        train_partial(model,wrong_x,wrong_y,optimizer)
        acc = float(num_correct) / num_samples
        return acc,loss
Beispiel #6
0
 def forward(self, x):
     output = []
     y = self.model(x)
     for key in self.keys:
         output.append(y[key])
     return torch.concatenate(output, self.dim)
Beispiel #7
0
 def forward(self, x):
     h1 = self.conv1(x)
     h2 = self.conv2(h1)
     h3 = self.conv3(h2)
     return torch.concatenate([h1, h2, h3])
Beispiel #8
0
 def forward(self, state, action):
     s = self.state_emb(state)
     a = self.action_emb(action)
     x = torch.tanh(torch.concatenate([s, a]))
     x = torch.tanh(self.affine(x))
     return self.value_head(x)