Beispiel #1
0
    def test_MaxUnpool2d_output_size(self):
        m = nn.MaxPool2d(3, stride=2, return_indices=True)
        mu = nn.MaxUnpool2d(3, stride=2)
        big_t = torch.rand(1, 1, 6, 6)
        big_t[0][0][4][4] = 100
        output_big, indices_big = m(Variable(big_t))
        self.assertRaises(RuntimeError, lambda: mu(output_big, indices_big))

        small_t = torch.rand(1, 1, 5, 5)
        for i in range(0, 4, 2):
            for j in range(0, 4, 2):
                small_t[:, :, i, j] = 100
        output_small, indices_small = m(Variable(small_t))
        for h in range(3, 10):
            for w in range(3, 10):
                if 4 <= h <= 6 and 4 <= w <= 6:
                    size = (h, w)
                    if h == 5:
                        size = torch.LongStorage(size)
                    elif h == 6:
                        size = torch.LongStorage((1, 1) + size)
                    mu(output_small, indices_small, output_size=size)
                else:
                    self.assertRaises(
                        ValueError, lambda: mu(output_small, indices_small,
                                               (h, w)))
def utt_collate_fn(batch):
    batch_size = len(batch)
    # print(batch[0].size())
    feat_dim = batch[0][0].shape[1]
    if _use_shared_memory:
        utt_lengths = torch.LongStorage()._new_shared(batch_size).zero_()
    else:
        utt_lengths = torch.LongTensor(batch_size)

    for i in range(batch_size):
        utt_lengths[i] = batch[i][0].shape[0]
    # lengths = torch.LongTensor(np.array(lengths))

    # Sort the feat lengths
    ####
    # (utt_lengths_out, indices) = torch.sort(utt_lengths, descending=True)
    # utt_lengths = utt_lengths[indices]
    (utt_lengths, indices) = torch.sort(utt_lengths, descending=True)

    # Sort the labels according to the lengths of the feats

    # labels_sort = labels[indices]
    # label_lengths = torch.IntTensor(label_lengths)
    # lengths_total = torch.sum(label_lengths)
    max_length = utt_lengths[0]
    #### FIND MAX LABEL LENGTH
    if _use_shared_memory:
        utt_stack = torch.FloatStorage()._new_shared(
            utt_lengths[0] * batch_size * feat_dim).new(
                utt_lengths[0], batch_size, feat_dim).zero_()
        labels_out = torch.LongStorage()._new_shared(batch_size).zero_()
    else:
        utt_stack = torch.zeros(utt_lengths[0], batch_size, feat_dim).float(
        )  # L, B, D, +1 in dimension is to keep the length here
        labels_out = torch.zeros(batch_size).long()

    # Create padded stacks of utterances and labels
    # utt_stack = torch.zeros(lengths[0], len(feats), feats[0].shape[1]+1).float() # L, B, D, +1 in dimension is to keep the length here

    # utt_stack = torch.zeros(lengths[0], batch_size, feats[0].shape[1]).float() # L, B, D, +1 in dimension is to keep the length here
    # labels_out = torch.zeros(int(lengths_total)).int()
    # label_stack = torch.zeros(torch.max(label_lengths), len(labels_sort), 2).long() # L_label, B, 2, +1 in dimension to keep the length here
    # Modify this part to create
    for i in range(batch_size):
        utt_length = utt_lengths[i]
        utt_stack[:utt_length, i, :] = batch[indices[i]][0]
        # print(batch[indices[i]][1])
        # print(type(labels_out[i]))
        labels_out[i] = int(batch[indices[i]][1])  # +1 because Warp-CTC
    return (utt_stack, utt_lengths, labels_out)
Beispiel #3
0
 def test_ConvTranspose2d_output_size(self):
     m = nn.ConvTranspose2d(3, 4, 3, 3, 0, 2)
     i = Variable(torch.randn(2, 3, 6, 6))
     for h in range(15, 22):
         for w in range(15, 22):
             if 18 <= h <= 20 and 18 <= w <= 20:
                 size = (h, w)
                 if h == 19:
                     size = torch.LongStorage(size)
                 elif h == 2:
                     size = torch.LongStorage((2, 4) + size)
                 m(i, output_size=(h, w))
             else:
                 self.assertRaises(ValueError, lambda: m(i, (h, w)))
Beispiel #4
0
 def reset(self):
     """
 Resets the meter with empty member variables
 """
     self.scores = torch.FloatTensor(torch.FloatStorage())
     self.targets = torch.LongTensor(torch.LongStorage())
     self.weights = torch.FloatTensor(torch.FloatStorage())
Beispiel #5
0
    def read_tensor(reader, version):
        # source:
        # https://github.com/torch/torch7/blob/master/generic/Tensor.c#L1243
        ndim = reader.read_int()

        # read size:
        size = torch.LongStorage(reader.read_long_array(ndim))
        # read stride:
        stride = torch.LongStorage(reader.read_long_array(ndim))
        # storage offset:
        storage_offset = reader.read_long() - 1
        # read storage:
        storage = reader.read()

        if storage is None or ndim == 0 or len(size) == 0 or len(stride) == 0:
            # empty torch tensor
            return python_class()

        return python_class().set_(storage, storage_offset, torch.Size(size), tuple(stride))
Beispiel #6
0
 def reset(self):
     """Resets the meter with empty member variables"""
     self.scores = torch.FloatTensor(
         torch.FloatStorage()
     )  # self.scores will store all the dataset(train_set or test set) output info
     # #print("In the class AveragePrecisionMeter function reset(): self.score.shape=,self.score=,self.score.type",
     #       self.scores.shape, "\n",self.scores,"\n",type(self.scores))
     self.targets = torch.LongTensor(
         torch.LongStorage()
     )  # self.scores will store all the dataset(train_set or test set) labels info
def _tensor_str(self):
    n = PRINT_OPTS.edgeitems
    has_hdots = self.size()[-1] > 2 * n
    has_vdots = self.size()[-2] > 2 * n
    print_full_mat = not has_hdots and not has_vdots
    formatter = _number_format(self, min_sz=3 if not print_full_mat else 0)
    print_dots = self.numel() >= PRINT_OPTS.threshold

    dim_sz = max(2, max(len(str(x)) for x in self.size()))
    dim_fmt = "{:^" + str(dim_sz) + "}"
    dot_fmt = u"{:^" + str(dim_sz + 1) + "}"

    counter_dim = self.ndimension() - 2
    counter = torch.LongStorage(counter_dim).fill_(0)
    counter[counter.size() - 1] = -1
    finished = False
    strt = ''
    while True:
        nrestarted = [False for i in counter]
        nskipped = [False for i in counter]
        for i in _range(counter_dim - 1, -1, -1):
            counter[i] += 1
            if print_dots and counter[i] == n and self.size(i) > 2 * n:
                counter[i] = self.size(i) - n
                nskipped[i] = True
            if counter[i] == self.size(i):
                if i == 0:
                    finished = True
                counter[i] = 0
                nrestarted[i] = True
            else:
                break
        if finished:
            break
        elif print_dots:
            if any(nskipped):
                for hdot in nskipped:
                    strt += dot_fmt.format('...') if hdot \
                        else dot_fmt.format('')
                strt += '\n'
            if any(nrestarted):
                strt += ' '
                for vdot in nrestarted:
                    strt += dot_fmt.format(u'\u22EE' if vdot else '')
                strt += '\n'
        if strt != '':
            strt += '\n'
        strt += '({},.,.) = \n'.format(
            ','.join(dim_fmt.format(i) for i in counter))
        submatrix = reduce(lambda t, i: t.select(0, i), counter, self)
        strt += _matrix_str(submatrix, ' ', formatter, print_dots)
    return strt
Beispiel #8
0
    def __preprocess_neighbors_maxpool(self, batch_graph):

        max_deg = max([graph.max_neighbors for graph in batch_graph])

        padded_neighbors_list = []
        start_idx = [0]

        for i, graph in enumerate(batch_graph):
            start_idx.append(start_idx[i] + len(graph.g))
            padded_neighbors = []
            for j in range(len(graph.neighbors)):

                pad = [n + start_idx[i] for n in graph.neighbors[j]]

                pad.extend([-1] * (max_deg - len(pad)))

                if not self.learn_eps:
                    pad.append(j + start_idx[i])

                padded_neighbors.append(pad)
            padded_neighbors_list.extend(padded_neighbors)

        return torch.LongStorage(padded_neighbors_list)
Beispiel #9
0
def _tensor_str(self):
    counter_dim = self.ndimension() - 2
    counter = torch.LongStorage(counter_dim).fill_(0)
    counter[0] = -1
    finished = False
    strt = ''
    while True:
        for i in _range(counter_dim):
            counter[i] += 1
            if counter[i] == self.size(i):
                if i == counter_dim - 1:
                    finished = True
                counter[i] = 0
            else:
                break
        if finished:
            break
        if strt != '':
            strt += '\n'
        strt += '({},.,.) = \n'.format(','.join(str(i) for i in counter))
        submatrix = reduce(lambda t, i: t.select(0, i), counter, self)
        strt += _matrix_str(submatrix, ' ')
    return strt
Beispiel #10
0
 def reset(self):
     self.scores = torch.DoubleTensor(torch.DoubleStorage()).numpy()
     self.targets = torch.LongTensor(torch.LongStorage()).numpy()
Beispiel #11
0
 def reset(self):
     """Reset stored scores and targets."""
     self.scores = torch.DoubleTensor(torch.DoubleStorage()).numpy()
     self.targets = torch.LongTensor(torch.LongStorage()).numpy()
Beispiel #12
0
 def reset(self):
     self.scores = torch.FloatTensor(torch.FloatStorage())
     self.targets = torch.LongTensor(torch.LongStorage())
Beispiel #13
0
 def reset(self):
     """Resets the meter with empty member variables"""
     self.scores = torch.FloatTensor(torch.FloatStorage())
     self.targets = torch.LongTensor(torch.LongStorage())
     print('scores shape....', self.scores.shape)
     print('targets shape....', self.targets.shape)
 def reset(self):
     """Resets the scores and targets buffers."""
     self.scores = torch.DoubleTensor(torch.DoubleStorage()).numpy()
     self.targets = torch.LongTensor(torch.LongStorage()).numpy()
     self.cached = None