示例#1
0
    def make_batch(self, melody_batch, chord_batch, meta_batch):
        # 一度np.arrayをかませてからLongTensorに渡すと爆速になったので採用
        # しかしバッチサイズが1000くらい大きくならないと同じ速度にならない
        melody_batch, chord_batch = np.array(melody_batch), np.array(chord_batch)
        
        if 'mnp' in meta_batch[0]:            
            steps_batch = np.array([meta['mnp']['steps'] for meta in meta_batch])
            weights_batch = np.array([meta['mnp']['weights'] for meta in meta_batch])
            labels_batch = np.array([meta['mnp']['labels'] for meta in meta_batch])
            
            batch = {
                'melody': torch.FloatTensor(melody_batch),
                'chord': torch.FloatTensor(chord_batch),
                'mnp_steps': torch.LongTensor(steps_batch),
                'mnp_weights': torch.ShortTensor(weights_batch),
                'mnp_labels': torch.ShortTensor(labels_batch),
                'meta': meta_batch # AttrDictを通すとlistではなくtupleになるので注意
            }
        else:
            batch = {
                'melody': torch.FloatTensor(melody_batch),
                'chord': torch.FloatTensor(chord_batch),
                'meta': meta_batch # AttrDictを通すとlistではなくtupleになるので注意
            }

        return AttrDict(batch)
示例#2
0
    def collate(samples, pad_idx, eos_idx, bos_idx):
        if len(samples) == 0:
            return {}

        def merge(key, add_bos, add_eos):
            return LanguagePairDataset.collate_tokens(
                [s[key] for s in samples], pad_idx, eos_idx, bos_idx, add_bos,
                add_eos)

        id = torch.IntTensor([s['id'] for s in samples])
        src_tokens = merge('source', add_bos=False, add_eos=False)
        # sort by descending source length
        src_lengths = torch.ShortTensor([s['source'].numel() for s in samples])
        src_lengths, sort_order = src_lengths.sort(descending=True)  # FIXME
        id = id.index_select(0, sort_order)
        src_tokens = src_tokens.index_select(0, sort_order)
        ntokens = None
        target_out = merge('target', add_bos=False, add_eos=True)
        target_in = merge('target', add_bos=True, add_eos=False)
        trg_lengths = torch.ShortTensor([s['target'].numel() for s in samples])
        # follow the source order:
        target_in = target_in.index_select(0, sort_order)
        target_out = target_out.index_select(0, sort_order)
        ntokens = sum(len(s['target']) for s in samples)

        return {
            'id': id,
            'ntokens': ntokens,
            'source': src_tokens.cuda(),
            'source_lengths': src_lengths.cuda(),
            'target_in': target_in.cuda(),
            'target_out': target_out.cuda(),
            'target_lengths': trg_lengths.cuda()
        }
	def thresh_item(self,Item,NO_item,item_count,dim,add_dim=0):
		'''	Thresholding of items, if even number we add random vector for breaking ties 
		Parameters
		----------
		Item: accumulated HD vector, torch short tensor shape	if add_dim=0: (NO_item,dim)
																if add_dim!=0:(NO_item,add_dim,dim)
		NO_item: number of items to threshold 
		item_count: number of additions per Item for determining threshold, numpy array shape (NO_item)
		dim : HD dimension 
		add_dim: additional dimension
		Return  
		------
		Item: Thresholded item 
		'''	
		
		for i in range(NO_item): 
			if item_count[i] % 2 == 0: # even number of counts 
				if add_dim == 0: # add a dim- dimensional tensor
					Item[i].add_(torch.ShortTensor(dim).bernoulli_().to(self.device)) # add random vector 
				else: # add a dim x add_dim - dimensional tensor 
					Item[i].add_(torch.ShortTensor(add_dim,dim).bernoulli_().to(self.device)) # add random vector 

				item_count[i] += 1 

			# Tresholding 
			Item[i] = Item[i] > int(item_count[i]/2)

		return Item
示例#4
0
def transfer4backend1(tag, send_buf, flag=False):

    if not flag:
        left, right = get_left_right(tag)
        dist.send(tensor=torch.ShortTensor([send_buf.size()]).view(-1),
                  dst=right)
        send_opt = dist.isend(tensor=send_buf, dst=right)
        send_opt.wait()
        return None

    else:
        left, right = get_left_right(tag)
        dist.send(tensor=torch.ShortTensor([send_buf.size()]).view(-1),
                  dst=right)
        send_opt = dist.isend(tensor=send_buf, dst=right)
        try:
            shape_buf = torch.zeros([1], dtype=torch.short)
            dist.recv(tensor=shape_buf, src=left)
            recv_buf = torch.zeros(torch.Size(shape_buf.tolist()))
            dist.recv(tensor=recv_buf, src=left)
        except RuntimeError as error:
            print("runtime error")
            return None
        send_opt.wait()
        return recv_buf
示例#5
0
def preprocess(feats, labels):
    # feats, lens = zip(*[[torch.ShortTensor((f * 32767).astype(np.int16)), f.shape[0]] for f in feats])
    feats = [torch.ShortTensor((f * 32767).astype(np.int16)) for f in feats]
    labels = [torch.ShortTensor((l * 32767).astype(np.int16)) for l in labels]
    # lens = torch.LongTensor(lens)
    feats = torch.nn.utils.rnn.pad_sequence(feats, batch_first=True)
    labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True)
    return feats, labels
示例#6
0
    def get_positions(self, positions):
        """Evaluates valid sampling coordinates.

        For each position, check if the patterns are applyable, if yes, the
        position is added to the dataset. Allocates a numpy array of
        maximum possible size. This array is returned with the index of the
        last element, so that the caller can extract only the relevant part of
        this array.

        Parameters
        ----------
        positions : tuple(list[int], ..)
            tuple containing lists of int, each list for a dimension, each
            int for a coordinate.

        Returns
        -------
        tuple(:class:`torch.ShortTensor`, int)
            tensor of coordinates and the number of valid elements in it.

        """

        # if there is a mask use it
        if 'image_mask' in self._data.keys():
            img_array = self._data['image_mask'].to_torch()
            use_mask = True
        elif 'image_ref' in self._data.keys():
            img_array = self._data['image_ref'].to_torch()
            use_mask = False
        else:
            raise ValueError(
                "data map must contain at least a reference image.")

        max_coord_nb = 1
        for n_coord in [len(l) for l in positions]:
            max_coord_nb *= n_coord
        coordinates = torch.ShortTensor(max_coord_nb, len(positions))

        index = 0
        for position in product(*positions):
            if not use_mask or (use_mask and img_array[position] == 1):

                # for each pixel, see if the patterns are applyable
                # if so, store the position for future extraction
                can_extract = []
                for desc, composition in self._pattern_map.items():
                    input_name, pattern = composition
                    if desc.startswith('input'):
                        can_extract.append(
                            pattern.can_apply(
                                self._data[input_name].to_torch(), position))

                # if all of the patterns can be extracted
                if len(can_extract) > 0 and all(can_extract):
                    coordinates[index] = torch.ShortTensor(position)
                    index += 1

        return coordinates, index
def preprocessAndSaveToDisk(df, directory=MAESTRO_DIRECTORY, override=False):
    dirname = F'{directory}-tensors'
    if os.path.isdir(dirname):
        if override:
            print(F'overriding existing directory {dirname}')
        else:
            print(F'directory {dirname} already exists, exiting')
            return

    os.makedirs(dirname)

    num_tracks = len(df)
    print(F'Processing {num_tracks} tracks and saving to {dirname}')
    print('This will take about 10 minutes.')
    print('Tracks processed: ', end='', flush=True)

    start = time.time()
    for i in range(num_tracks):
        filename = df['midi_filename'][i]
        mf = readMidi(F'{directory}/{filename}')
        x = torch.ShortTensor(midiToIdxs(mf))
        os.makedirs(F'{dirname}/{os.path.dirname(filename)}', exist_ok=True)
        torch.save(x, F'{dirname}/{filename}.pt')
        if i % 10 == 0:
            print(i + 1, end=' ', flush=True)
    print()

    time_elapsed = time.time() - start
    print(F'done. time elapsed: {time_elapsed}')
示例#8
0
    def __init__(self,
                 feat_dim,
                 spat_dim,
                 HD_dim=1000,
                 d=11,
                 encoding='single',
                 code='thermometer',
                 sparsity=0.5,
                 learning='average',
                 n_classes=4,
                 cuda_device='cuda:0',
                 k=1):

        super().__init__(feat_dim, spat_dim, HD_dim, d, encoding, code,
                         sparsity, learning, n_classes, cuda_device)

        self.k = k  # number of centroids per class
        self.AssMem = torch.ShortTensor(self.NO_classes, k,
                                        self.HD_dim).zero_().to(self.device)
        self.kMean = KMeans(n_clusters=k,
                            init='k-means++',
                            n_init=10,
                            max_iter=300,
                            tol=1e-4,
                            cuda_device=cuda_device)
        self.fit = self.kmeans_fit
示例#9
0
 def gotoFrame(self, frame=0):
     self.currFrameNum = frame
     if frame == 0:
         self.currentFrame = torch.ShortTensor(
             np.zeros((self.imageHeight, self.imageWidth), dtype=np.int16))
     else:
         self.currentFrame = self.readFrame(frame)
	def average_fit(self,samples, labels, n_iter = 0):
		'''	Training of HD classifier, perceptron learning
		Parameters
		----------
		samples: feature sample, numpy array shape 	spatial:(NO_samples,temp_dim,spat_dim,N_feat) 
													single:(NO_samples,temp_dim,N_feat)
		lables: label of training data 
		n_iter: number of aditinal training iterations for assotiative memory 

		'''	
		samples,labels = self.flatten_samples(samples,labels)

		# place holder for Assotiative memory 
		ClassItem = torch.ShortTensor(self.NO_classes,self.HD_dim).zero_().to(self.device)
		
		NO_samples = samples.shape[0]
		# counts occurences of class for thresholding 
		class_count = np.zeros(self.NO_classes) 


		#assotiative average learning 
		for i in range(NO_samples):
			#self.get_statistics(samples[i])
			S, cnt = self.transform(samples[i], clipping = False) # get transformed HD_vector(not thresholded yet)
			ClassItem[labels[i]-1].add_(S) 
			class_count[labels[i]-1] += cnt 

		# Thresholding of Assotiative Memory  	
		self.ClassItem = self.thresh_item(ClassItem,self.NO_classes, class_count,self.HD_dim)	# Thresholding of
示例#11
0
    def convertToIdx(self,
                     labels,
                     unkWord,
                     bos_word=None,
                     eos_word=None,
                     type='int64'):
        """
        Convert `labels` to indices. Use `unkWord` if not found.
        Optionally insert `bos_word` at the beginning and `eos_word` at the .
        """
        vec = []

        if bos_word is not None:
            vec += [self.lookup(bos_word)]

        unk = self.lookup(unkWord)
        for label in labels:
            vec.append(self.lookup(label, default=unk))
        # vec += [self.lookup(label, default=unk) for label in labels]

        if eos_word is not None:
            vec += [self.lookup(eos_word)]

        if type == 'int64':
            return torch.LongTensor(vec)
        elif type == 'int32' or type == 'int':
            return torch.IntTensor(vec)
        elif type == 'int16':
            return torch.ShortTensor(vec)
        else:
            raise NotImplementedError
示例#12
0
    def transform(self, image):
        '''
        This function transform the 3D image of np.ndarray (z,x,y) to a torch.ShortTensor (B,z,x,y).

        '''
        image_torch = torch.ShortTensor(image)
        return image_torch
示例#13
0
def _graph_constant(g, value, dims, type, *args, **kwargs):
    assert isinstance(value, numbers.Number)
    assert type is not None
    isscalar = False
    if dims is None or dims == 0 or set(dims) == set([0]):
        dims = [1]
        isscalar = True
    type = type.lower()
    if type == "char":
        tensor = torch.CharTensor(*dims)
    elif type == "short":
        tensor = torch.ShortTensor(*dims)
    elif type == "int":
        tensor = torch.IntTensor(*dims)
    elif type == "long":
        tensor = torch.LongTensor(*dims)
    elif type == "half":
        tensor = torch.HalfTensor(*dims)
    elif type == "float":
        tensor = torch.FloatTensor(*dims)
    elif type == "double":
        tensor = torch.DoubleTensor(*dims)
    else:
        raise ValueError(
            "Unknown type, type should be one of the following strings: "
            "char, short, int, long, half, float, double")
    tensor.fill_(value)
    if isscalar:
        return g.op("Constant", *args, value_z=tensor, **kwargs)
    return g.op("Constant", *args, value_t=tensor, **kwargs)
示例#14
0
  def preprocessAndSaveToDisk(self, directory=MAESTRO_DIRECTORY):
    length = len(self)
    os.makedirs(directory + '-tensors', exist_ok=True)
    print('saving', length, self.phase, 'tracks: ', end=' ')

    total_duration = 0
    total_events = 0
    max_events = 0

    for i in range(length):
      filename = self.df['midi_filename'][i]
      mf = readMidi(directory + '/' + filename)
      x = torch.ShortTensor(midiToIdxs(mf))
      os.makedirs(directory + '-tensors/' + os.path.dirname(filename), exist_ok=True)
      torch.save(x, directory + '-tensors/' + filename + '.pt')

      total_duration += self.df['duration'][i]
      max_events = max(max_events, len(x))
      total_events += len(x)

      if i % 10 == 0:
        print(i, end=' ')

    print()
    print(
      '# of songs:', 
      length, 
      'total duration:', 
      total_duration, 
      '# of events:', 
      total_events, 
      'most events in one song:', 
      max_events
    )
示例#15
0
    def test_1_channel_tensor_to_pil_image(self):
        to_tensor = transforms.ToTensor()

        img_data_float = torch.Tensor(1, 4, 4).uniform_()
        img_data_byte = torch.ByteTensor(1, 4, 4).random_(0, 255)
        img_data_short = torch.ShortTensor(1, 4, 4).random_()
        img_data_int = torch.IntTensor(1, 4, 4).random_()

        inputs = [img_data_float, img_data_byte, img_data_short, img_data_int]
        expected_outputs = [
            img_data_float.mul(255).int().float().div(255).numpy(),
            img_data_byte.float().div(255.0).numpy(),
            img_data_short.numpy(),
            img_data_int.numpy()
        ]
        expected_modes = ['L', 'L', 'I;16', 'I']

        for img_data, expected_output, mode in zip(inputs, expected_outputs,
                                                   expected_modes):
            for transform in [
                    transforms.ToPILImage(),
                    transforms.ToPILImage(mode=mode)
            ]:
                img = transform(img_data)
                assert img.mode == mode
                assert np.allclose(expected_output, to_tensor(img).numpy())
示例#16
0
    def convert_samples_to_variables(
        cls, data_config, info, ontologies, patients, labeler
    ):
        """
        Name is confusing but this constructs _targets_ from next day codes and terms. 
        """

        max_length = 0
        indices = []
        targets = []

        for i, patient in enumerate(patients):
            labels = deque(labeler.label(patient))

            index = -1
            for day_offset, day in enumerate(patient.days):
                if len(labels) == 0:
                    break

                if len(day.observations) != 0:
                    index += 1

                if day_offset == labels[0].day_index:
                    indices.append((i, index))
                    targets.append(labels[0].is_positive)
                    labels.popleft()
                    max_length = max(max_length, index + 1)

        indices = torch.ShortTensor(
            [(i * max_length + index) for i, index in indices]
        )
        targets = torch.ByteTensor(targets)

        return (indices, targets)
示例#17
0
    def _wordGrammEncoding(self, word):
        '''
		Compute ngramm encoding for a given word
		:param word: word to encode
		:return: return word ngramm
		'''

        n = len(word)
        # alloc shift register
        shift_reg = t.ShortTensor(n, self._D).zero_()
        # fill shift register with initial item memories
        for i, letter in enumerate(word):
            key = self._charToKey(letter)
            if key >= self._nitem or key < 0:
                print("Error! Key not valid: char = {}".format(letter))
            else:
                shift_reg[n - i - 1] = self._lookupItemMemory(key)
        # print("Letter encodings")
        # print(shift_reg)
        # shift item memories
        for i in range(1, n):
            shift_reg[i] = self._circshift(shift_reg[i], i)
        # print("Shifted letter encodings")
        # print(shift_reg)

        # calculate ngramm of _block (_Y)
        wordgramm = shift_reg[0]
        # print("XOR 0: {}".format(wordgramm))

        for i in range(1, n):
            wordgramm = self._bind(wordgramm, shift_reg[i])
            # print("XOR {}: {}".format(i, wordgramm))

        return wordgramm
示例#18
0
    def load(self, audio_path, midi_path):
        """
        load an audio track and the corresponding labels

        """
        audio, sr = soundfile.read(audio_path, dtype='int16')
        assert sr == SAMPLE_RATE
        fs = sr / self.hop_size

        audio = th.ShortTensor(audio)
        audio_length = len(audio)

        n_keys = MAX_MIDI - MIN_MIDI + 1
        mel_length = audio_length // self.hop_size + 1

        midi = pretty_midi.PrettyMIDI(midi_path)
        midi_length_sec = midi.get_end_time()
        frame_length = np.min((int(midi_length_sec * fs), mel_length))

        audio = audio[:frame_length * self.hop_size]
        frame = midi.get_piano_roll(fs=fs)
        onset = np.zeros_like(frame)
        for inst in midi.instruments:
            for note in inst.notes:
                onset[note.pitch, int(note.start * fs)] = 1

        frame = th.from_numpy(frame[21:108 +
                                    1].T)  # to shape (times x 88 pitch)
        onset = th.from_numpy(onset[21:108 + 1].T)
        data = dict(path=audio_path, audio=audio, frame=frame, onset=onset)
        return data
示例#19
0
    def __getitem__(self, index):
        path, path_output = self.imgs[index]
        img = self.loader(
            path
        )  #.astype(int)   image has dimension height x width x n_channels
        output = self.loader(path_output)  #.astype(int)
        #img = imresize(img, (512, 512))
        #output = imresize(output, (512, 512))

        img = img.astype('int16')
        output = output.astype('int16')

        # if we want to crop the image at the centre
        if self.crop:
            h, w, channels = img.shape
            img = img[(h // 2 -
                       self.size_cropped // 2):(h // 2 +
                                                self.size_cropped // 2),
                      (w // 2 -
                       self.size_cropped // 2):(w // 2 +
                                                self.size_cropped // 2), :]

            h, w = output.shape
            output = output[h // 2 - self.size_cropped // 2:h // 2 +
                            self.size_cropped // 2,
                            w // 2 - self.size_cropped // 2:w // 2 +
                            self.size_cropped // 2]

        img = np.transpose(img, (2, 0, 1))

        # if we want to normalize the images to [-1,1]
        if self.normalize:
            img = img.astype(float)
            img = (img - 128) / 128
            img = torch.FloatTensor(img)
        else:
            img = torch.ShortTensor(img)


#        if self.transform is not None:
#           img = self.transform(img)
#        if self.target_transform is not None:
#            output = self.target_transform(output)

        img = img.float()
        img_id = os.path.basename(path).split('.')[0]
        return img_id, img, torch.ShortTensor(output).long()
示例#20
0
 def get_caption(self, idx):
     """Get caption and convert list of strings to tensor of word indices"""
     tokenized_caption = self.captions[idx]
     token_idx = torch.ShortTensor([
         self.vocab.word2idx.get(token, self.vocab.UNK_INDEX)
         for token in tokenized_caption
     ])
     return token_idx
示例#21
0
def NN(epoch, net, lemniscate, trainloader, testloader, recompute_memory=0):
    net.eval()
    net_time = AverageMeter()
    cls_time = AverageMeter()
    losses = AverageMeter()
    correct = 0.
    total = 0
    testsize = testloader.dataset.__len__()

    trainFeatures = lemniscate.memory.t()
    if hasattr(trainloader.dataset, 'imgs'):
        trainLabels = torch.LongTensor([y for y in trainloader.dataset.targets]).cuda()
    else:
        trainLabels = torch.ShortTensor(trainloader.dataset.train_labels).cuda()

    if recompute_memory:
        transform_bak = trainloader.dataset.transform
        trainloader.dataset.transform = testloader.dataset.transform
        temploader = torch.utils.data.DataLoader(trainloader.dataset, batch_size=100, shuffle=False, num_workers=1)
        for batch_idx, (inputs, targets, indexes) in enumerate(temploader):
            targets = targets.cuda(non_blocking=True)
            batchSize = inputs.size(0)
            features = net(inputs)
            trainFeatures[:, batch_idx*batchSize:batch_idx*batchSize+batchSize] = features.data.t()
        trainLabels = torch.LongTensor(temploader.dataset.train_labels).cuda()
        trainloader.dataset.transform = transform_bak
    
    end = time.time()
    with torch.no_grad():
        for batch_idx, (inputs, targets, indexes) in enumerate(testloader):
            targets = targets.cuda(non_blocking=True)
            batchSize = inputs.size(0)
            features = net(inputs)
            net_time.update(time.time() - end)
            end = time.time()

            dist = torch.mm(features, trainFeatures)

            yd, yi = dist.topk(1, dim=1, largest=True, sorted=True)
            candidates = trainLabels.view(1,-1).expand(batchSize, -1)
            retrieval = torch.gather(candidates, 1, yi)

            retrieval = retrieval.narrow(1, 0, 1).clone().view(-1)
            yd = yd.narrow(1, 0, 1)

            total += targets.size(0)
            correct += retrieval.eq(targets.data).sum().item()
            
            cls_time.update(time.time() - end)
            end = time.time()

            print('Test [{}/{}]\t'
                  'Net Time {net_time.val:.3f} ({net_time.avg:.3f})\t'
                  'Cls Time {cls_time.val:.3f} ({cls_time.avg:.3f})\t'
                  'Top1: {:.2f}'.format(
                  total, testsize, correct*100./total, net_time=net_time, cls_time=cls_time))

    return correct/total
示例#22
0
def data_load(args, device):
    X_train, y_train, seq_len_train, X_test, y_test, seq_len_test = np.load('data/dataset.npz').values()

    X_train = torch.FloatTensor(X_train).to(device)
    y_train = torch.LongTensor(y_train).to(device)
    seq_len_train = torch.ShortTensor(seq_len_train).to(device)

    X_test = torch.FloatTensor(X_test).to(device)
    y_test = torch.LongTensor(y_test).to(device)
    seq_len_test = torch.ShortTensor(seq_len_test).to(device)

    train_dataset = torch.utils.data.TensorDataset(X_train, y_train, seq_len_train)
    test_dataset = torch.utils.data.TensorDataset(X_test, y_test, seq_len_test)

    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size_train, shuffle=True)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size_test, shuffle=False)

    return train_loader, test_loader
示例#23
0
    def readFrame(self, frame=None):
        if frame == None:
            self.currFrameNum += 1
            self._f.seek(self._shift +
                         (self.currFrameNum - 1) * self._trueImageSize)
            dat = self._f.read(self.imageSize)
            self.currentFrame = torch.ShortTensor(
                np.reshape(np.array(list(dat), dtype=np.int16),
                           (self.imageHeight, self.imageWidth)))
            self._currTime, self._currTimeMS, self._currTimeUS = struct.unpack(
                '<LHH', self._f.read(8))
        if type(frame) == int:
            self._f.seek(self._shift + (frame - 1) * self._trueImageSize)
            dat = self._f.read(self.imageSize)

            return torch.ShortTensor(
                np.reshape(np.array(list(dat), dtype=np.uint8),
                           (self.imageHeight, self.imageWidth)))
示例#24
0
    def create_event_tensor(self):
        size = self.total_event_inputs()
        event_tensor = torch.ShortTensor(size)
        chord_tensor = torch.ByteTensor(size)

        for i, event in enumerate(self.iterate_events()):
            event_tensor[i] = event[0]
            chord_tensor[i] = event[1]

        return event_tensor, chord_tensor
示例#25
0
    def create_volume_tensor(self):
        size = self.total_event_inputs()
        event_tensor = torch.ShortTensor(size)
        volume_tensor = torch.FloatTensor(size)

        for i, event in enumerate(self.iterate_volumes()):
            event_tensor[i] = event[0]
            volume_tensor[i] = event[1]

        return event_tensor, volume_tensor
示例#26
0
 def _load_sample_data(self, snd_np, snd_dtype):
     """
     Populates self.snd_data
     """
     if snd_dtype is np.uint8:
         snd_data = torch.ByteTensor(snd_np)
     elif snd_dtype is np.uint16:
         snd_data = torch.ShortTensor(snd_np)
     elif snd_dtype is np.int32:
         snd_data = torch.IntTensor(snd_np)
     self.snd_data = snd_data
示例#27
0
 def __getitem__(self, idx):
     X = torch.LongTensor(
         self.movies_list[idx].overview_indices[:self.max_len])
     X_len = torch.ShortTensor([X.shape[0]]).squeeze()
     X = F.pad(X, (0, self.max_len - X.shape[0])).to(self.device)
     y = torch.FloatTensor([
         ix in self.movies_list[idx].genres_indices
         for ix in range(self.num_classes)
     ]).to(self.device)
     sample = {"idx": idx, "X": X, "X_len": X_len, "y": y}
     return sample
示例#28
0
 def test_all_dtypes():
     return (
         torch.BoolTensor([2]),
         torch.LongTensor([3]),
         torch.ByteTensor([4]),
         torch.CharTensor([5]),
         torch.DoubleTensor([6]),
         torch.FloatTensor([7]),
         torch.IntTensor([8]),
         torch.ShortTensor([1]),
         torch.HalfTensor([1]),
     )
示例#29
0
    def encodeText(self, text):

        words = text.split()
        # print(words)
        res = t.ShortTensor(self._D).zero_()

        for word in words:
            ngramm = self._wordGrammEncoding(word)
            res = res | ngramm
            # print("Ngramm: {}\nRes: {}\n".format(ngramm, res))

        return res
示例#30
0
def parse_code(code_str, dictionary, append_eos):
    code, duration = torch.unique_consecutive(torch.ShortTensor(
        list(map(int, code_str.split()))),
                                              return_counts=True)
    code = " ".join(map(str, code.tolist()))
    code = dictionary.encode_line(code, append_eos).short()

    if append_eos:
        duration = torch.cat((duration, duration.new_zeros((1, ))),
                             dim=0)  # eos
    duration = duration.short()
    return code, duration