Exemple #1
0
def list_examples(batch, device=None, padding=None):

    if len(batch) == 0:
        raise ValueError('batch is empty')

    if padding is not None:
        raise NotImplementedError

    first_elem = batch[0]

    if isinstance(first_elem, tuple):
        result = []
        if not isinstance(padding, tuple):
            padding = [padding] * len(first_elem)

        for i in six.moves.range(len(first_elem)):
            result.append([to_device(device, example[i]) for example in batch])

        return tuple(result)

    elif isinstance(first_elem, dict):
        result = {}
        if not isinstance(padding, dict):
            padding = {key: padding for key in first_elem}

        for key in first_elem:
            result[key] = [to_device(device, example[key])
                           for example in batch]

        return result

    else:
        raise NotImplementedError
        return to_device(device, _concat_arrays(batch, padding))
def convert_hybrid(batch, gpu):
    def to_device_batch(batch, gpu):
        if gpu is None:
            return batch
        elif gpu < 0:
            return [chainer.dataset.to_device(gpu, x) for x in batch]
        else:
            xp = cuda.cupy.get_array_module(*batch)
            concat = xp.concatenate(batch, axis=0)
            sections = np.cumsum([len(x) for x in batch[:-1]], dtype='i')
            concat_dev = chainer.dataset.to_device(gpu, concat)
            batch_dev = cuda.cupy.split(concat_dev, sections)
            return batch_dev

    return {
        'ids': [x1 for x1, t1, t2, t3 in batch],
        'ts_link':
        to_device(x=np.array([t1 for _, t1, t2, t3 in batch], dtype='i'),
                  device=gpu),
        'ts_type':
        to_device(x=np.array([t2 for _, t1, t2, t3 in batch], dtype='i'),
                  device=gpu),
        'ts_link_type':
        to_device(x=np.array([t3 for _, t1, t2, t3 in batch], dtype='i'),
                  device=gpu)
    }
def coupling_converter(batch, device):

    list_array = list()
    list_dists = list()
    list_targets = list()
    list_pairs_index = list()

    with_target = 'fc' in batch[0]['targets'].columns

    for i, d in enumerate(batch):
        list_array.append(d['graphs'].input_array)
        list_dists.append(d['graphs'].dists)
        if with_target:
            list_targets.append(
                d['targets'][['fc', 'sd', 'pso', 'dso']].values.astype(np.float32))

        sample_index = np.full((len(d['targets']), 1), i)
        atom_index = d['targets'][['atom_index_0', 'atom_index_1']].values

        list_pairs_index.append(np.concatenate([sample_index, atom_index], axis=1))

    input_array = to_device(device, np.stack(list_array))
    dists = to_device(device, np.stack(list_dists))
    pairs_index = np.concatenate(list_pairs_index)

    array = {'input_array': input_array, 'dists': dists, 'pairs_index': pairs_index}

    if with_target:
        array['targets'] = to_device(device, np.concatenate(list_targets))

    return array
Exemple #4
0
def concat_yolo(batch, device=None):
    images = []
    bbox = []
    label = []
    for b in batch:
        images.append(b[0])
        bbox.append(to_device(device, np.array(b[1], dtype=np.float32)))
        label.append(to_device(device, np.array(b[2], dtype=np.int32)))
    images = concat_examples(images, device)
    return images, bbox, label
def data_convert(
    batch: List[Data],
    device=None,
):
    if len(batch) == 0:
        raise ValueError('batch is empty')

    return dict(
        input_array=[to_device(device, d.input_array) for d in batch],
        target_ids=[to_device(device, d.target_ids) for d in batch],
        vec=[to_device(device, d.vec) for d in batch],
    )
    def converter(self, batch, device=-1):
        # alternative to chainer.dataset.concat_examples
        DATA_SHAPE = 40 * 3  # 40 log filterbank

        xs = [np.load(path).astype(np.float32) for path, _ in batch]
        delta_x = [delta(x, 3) for x in xs]
        delta_delta_x = [delta(x, 3) for x in delta_x]

        Xs = [
            to_device(self.device,
                      np.concatenate((a, b, c), axis=1).astype(np.float32))
            for a, (b, c) in zip(xs, zip(delta_x, delta_delta_x))
        ]

        # Xs = [F.concat((X, self.xp.zeros(((self.stacked_frames + self.skip_size) - len(X), DATA_SHAPE), dtype=self.xp.float32)), axis=0) if len(X) < (self.stacked_frames + self.skip_size) else X for X in Xs]

        # Xs = [F.pad_sequence([X[i:i + self.stacked_frames] for i in range(0, len(X), self.skip_size)]).reshape(-1, DATA_SHAPE * self.stacked_frames) for X in Xs]

        word_label = [
            self.xp.asarray(lab[0]).astype(self.xp.int32) for _, lab in batch
        ]
        char_lable = [
            self.xp.asarray(lab[1]).astype(self.xp.int32) for _, lab in batch
        ]

        lable_batch = char_lable

        return Xs, lable_batch
Exemple #7
0
def animate_ff(model, frames, actions, mean_image, args):
    initial_frames = np.concatenate([
        normalize_frame(frame) - mean_image
        for frame in frames[0:args.skip_frames]
    ])
    initial_action = actions[args.skip_frames - 1].astype(np.float32)

    input_frames, input_action = concat_examples(batch=[(initial_frames,
                                                         initial_action)],
                                                 device=args.gpu)
    print('input_frames shape: {}'.format(input_frames.shape))

    predicted_frames = []
    ground_truths = []
    with chainer.no_backprop_mode():
        for next_index in range(args.skip_frames, len(frames)):
            print('next_index: {}'.format(next_index))
            predicted_frame = model((input_frames, input_action))
            next_frames = F.concat((input_frames, predicted_frame),
                                   axis=1)[:, 3:, :, :]
            next_action = actions[next_index].astype(np.float32)

            input_frames = next_frames
            input_action = to_device(device=args.gpu,
                                     x=next_action.reshape((-1, 3, 1)))
            # print('next_frames shape: {}'.format(input_frames.shape))
            # print('next_action shape: {}'.format(input_action.shape))
            # Keep predicted image
            predicted_frame.to_cpu()
            predicted_frames.append(
                converter.chw2hwc(predicted_frame.data[0] + mean_image))
            ground_truth = frames[next_index]
            ground_truths.append(converter.chw2hwc(ground_truth))

    return ground_truths, predicted_frames
def concat_examples_batch(batch, device=None):
    train_frame = []
    train_actions = []
    target_frames = []
    for ((frame, actions), target) in batch:
        train_frame.append(frame)
        train_actions.append(actions)
        target_frames.append(target)
    train_frame = np.array(train_frame)
    train_actions = np.array(train_actions)
    target_frames = np.array(target_frames)
    if not device is None:
        train_frame = to_device(device, train_frame)
        train_actions = to_device(device, train_actions)
        target_frames = to_device(device, target_frames)
    # print('train frame shape: {}, train actions shape: {}, target_frames shape: {}'.format(train_frame.shape, train_actions.shape, target_frames.shape))
    return dict(input=(train_frame, train_actions), target=target_frames)
Exemple #9
0
def predict(start_image, goal_image, generator, posterior, transition,
            subtractor, classifier, args):
    start = to_device(device=args.gpu,
                      x=start_image.reshape((1, ) + start_image.shape))
    start = subtract_background(subtractor, start)
    goal = to_device(device=args.gpu,
                     x=goal_image.reshape((1, ) + goal_image.shape))
    goal = subtract_background(subtractor, goal)
    print('start image shape: ', start.shape)
    print('goal image shape: ', goal.shape)

    xp = np if args.gpu < 0 else cp

    loc = xp.zeros(shape=(1), dtype=np.float32)
    scale = xp.ones(shape=(1), dtype=np.float32)
    _Normal = Normal(loc=loc, scale=scale)

    sequence = []
    chainer.config.train = False

    start_state, _ = find_closest_latent_state(start, generator, transition,
                                               classifier, args)
    goal_state, _ = find_closest_latent_state(goal, generator, transition,
                                              classifier, args)

    with chainer.no_backprop_mode():
        start_state = start_state.reshape((1, ) + start_state.shape)
        goal_state = goal_state.reshape((1, ) + goal_state.shape)

        s_current = start_state
        print('current_state: ', s_current.shape)
        step = (goal_state - start_state) / args.steps
        z = _Normal.sample(sample_shape=(1, 4))
        z = F.squeeze(z)
        z = z.reshape((1, ) + z.shape)
        for i in range(1, args.steps):
            s_next = start_state + step * i
            x = F.concat((z, s_current, s_next), axis=1)
            x = F.reshape(x, shape=x.shape + (1, 1))
            o = generator(x)
            _, o_next = F.split_axis(o, 2, axis=1, force_tuple=True)

            o_next.to_cpu()
            sequence.append(o_next.data[0][0])
            s_current = s_next
    return sequence
def convert_sg(batch, device):
    if len(batch) == 0: raise ValueError('batch is empty')
    result = [
        to_device(device,
                  _concat_arrays([example[0] for example in batch], None)),
        [example[1] for example in batch]
    ]
    return tuple(result)
Exemple #11
0
    def test_to_device(self):
        src_xp = get_xp(self.src_gpu)
        dst_xp = get_xp(self.dst_gpu)
        x = src_xp.array([1], 'i')
        y = dataset.to_device(self.device, x)
        self.assertIsInstance(y, dst_xp.ndarray)

        self.assertEqual(int(y.device), self.device)
Exemple #12
0
def animate_lstm(model, frames, actions, mean_image, args):
    with chainer.no_backprop_mode():
        for step in range(args.init_steps - 1):
            print('next_index: {}'.format(step))
            frame = frames[step]
            normalized_frame = normalize_frame(frame)
            input_frame = normalized_frame - mean_image
            input_frame = to_device(device=args.gpu,
                                    x=input_frame.reshape((-1, ) +
                                                          input_frame.shape))
            print('input_frame shape: {}'.format(input_frame.shape))
            input_action = actions[step + 1].astype(np.float32)
            input_action = to_device(device=args.gpu,
                                     x=input_action.reshape((-1, 3, 1)))

            model((input_frame, input_action))

    predicted_frames = []
    ground_truths = []

    frame = frames[args.init_steps - 1]
    normalized_frame = normalize_frame(frame)
    next_frame = normalized_frame - mean_image
    next_frame = to_device(device=args.gpu,
                           x=input_frame.reshape((-1, ) + next_frame.shape))

    with chainer.no_backprop_mode():
        for next_index in range(args.init_steps - 1, len(frames) - 1):
            print('next_index: {}'.format(next_index))
            input_frame = next_frame
            input_action = actions[next_index].astype(np.float32)
            input_action = to_device(device=args.gpu,
                                     x=input_action.reshape((-1, 3, 1)))

            predicted_frame = model((input_frame, input_action))

            next_frame = chainer.Variable(predicted_frame.array)
            # Keep predicted image
            predicted_frame.to_cpu()
            predicted_frames.append(
                converter.chw2hwc(predicted_frame.data[0] + mean_image))
            ground_truth = frames[next_index]
            ground_truths.append(converter.chw2hwc(ground_truth))

    return ground_truths, predicted_frames
Exemple #13
0
 def kdnet_converter(batch, device=None, padding=None):
     # concat_examples to CPU at first.
     result = concat_examples(batch, device=None, padding=padding)
     out_list = []
     for elem in result:
         if elem.dtype != object:
             # Send to GPU for int/float dtype array.
             out_list.append(to_device(device, elem))
         else:
             # Do NOT send to GPU for dtype=object array.
             out_list.append(elem)
     return tuple(out_list)
Exemple #14
0
    def test_to_device(self):
        src_xp = get_xp(self.src_gpu)
        dst_xp = get_xp(self.dst_gpu)
        x = src_xp.array([1], 'i')
        y = dataset.to_device(self.device, x)
        self.assertIsInstance(y, dst_xp.ndarray)

        if self.device is not None and self.device >= 0:
            self.assertEqual(int(y.device), self.device)

        if self.device is None and self.src_gpu:
            self.assertEqual(int(x.device), int(y.device))
Exemple #15
0
    def forward(self, *args, **kwargs):

        if isinstance(self.label_key, int):
            if not (-len(args) <= self.label_key < len(args)):
                msg = 'Label key %d is out of bounds' % self.label_key
                raise ValueError(msg)
            t = args[self.label_key]
            if self.label_key == -1:
                args = args[:-1]
            else:
                args = args[:self.label_key] + args[self.label_key + 1:]
        elif isinstance(self.label_key, str):
            if self.label_key not in kwargs:
                msg = 'Label key "%s" is not found' % self.label_key
                raise ValueError(msg)
            t = kwargs[self.label_key]
            del kwargs[self.label_key]

        self.y = None
        self.loss = None
        self.accuracy = None

        self.y = self.predictor(*args, **kwargs)
        self.loss = self.lossfun(self.y, t)

        if -1000 < self.loss.data / self.batch_size < 1000:
            reporter.report({'loss': self.loss / self.batch_size}, self)
            # reporter.report({'char_loss': char_loss}, self)
        else:
            print('loss f****d up!!!!!!!!!!!!!')

        if self.compute_accuracy:
            wer = 0
            ys = [y.data[:n] for y, n in zip(F.stack(self.y[0], 1), self.y[1])]

            target = to_device(-1, t)
            print(len(ys[0]), len(target[0]))
            out = remove_blank(F.argmax(ys[0], axis=1).data)
            out = [int(o) for o in out]
            print(out)
            print(target[0])

            for yy, tt in zip(ys, target):
                out = remove_blank(F.argmax(yy, axis=1).data)
                out = [int(o) for o in out]

                wer += _wer(out, tt)

            reporter.report({'accuracy': wer / len(ys)}, self)
        return self.loss
Exemple #16
0
def concat_example(batch, device):
    if len(batch) == 0:
        raise ValueError('batch is empty')

    first_elem = batch[0]
    assert isinstance(first_elem, tuple)

    result = []
    for i in range(len(first_elem)):
        result.append([example[i] for example in batch])
    # labels should be ndarray
    result[-1] = dataset.to_device(device, numpy.array(result[-1],
                                                       numpy.int32))
    return tuple(result)
Exemple #17
0
def convert(batch, gpu):
    """
    """

    def to_device_batch(batch, gpu):
        if gpu is None:
            return batch
        elif gpu < 0:
            return [chainer.dataset.to_device(gpu, x) for x in batch]
        else:
            xp = cuda.cupy.get_array_module(*batch)
            concat = xp.concatenate(batch, axis=0)
            sections = np.cumsum([len(x) for x in batch[:-1]], dtype='i')
            concat_dev = chainer.dataset.to_device(gpu, concat)
            batch_dev = cuda.cupy.split(concat_dev, sections)
            return batch_dev

    return {'x1s': to_device_batch([x1 for x1, x2, t in batch], gpu=gpu),
            'x2s': to_device_batch([x2 for x1, x2, t in batch], gpu=gpu),
            't': to_device(x=np.array([t for _, _, t in batch], dtype='i'), device=gpu)
            }
Exemple #18
0
def convert(
        minibatch: List[Tuple[
            np.ndarray,
            np.ndarray, np.ndarray, np.ndarray, np.ndarray,
            np.ndarray
        ]],
        device: Optional[int]
) -> Tuple[ndarray, ndarray, ndarray, ndarray, ndarray, ndarray]:
    # Append eos to the end of sentence
    eos_ = np.array([EOS], 'i')
    (
        src_batch,
        ga_batch, wo_batch, ni_batch, ga2_batch,
        tgt_batch
    ) = zip(*minibatch)
    with chainer.no_backprop_mode():
        src_sentences = \
            [Variable(np.hstack((sentence, eos_))) for sentence in src_batch]
        ga_sentences = \
            [Variable(np.hstack((sentence, eos_))) for sentence in ga_batch]
        wo_sentences = \
            [Variable(np.hstack((sentence, eos_))) for sentence in wo_batch]
        ni_sentences = \
            [Variable(np.hstack((sentence, eos_))) for sentence in ni_batch]
        ga2_sentences = \
            [Variable(np.hstack((sentence, eos_))) for sentence in ga2_batch]
        tgt_sentences = \
            [Variable(np.hstack((sentence, eos_))) for sentence in tgt_batch]

        src_block = F.pad_sequence(src_sentences, padding=PAD).data
        ga_block = F.pad_sequence(ga_sentences, padding=PAD).data
        wo_block = F.pad_sequence(wo_sentences, padding=PAD).data
        ni_block = F.pad_sequence(ni_sentences, padding=PAD).data
        ga2_block = F.pad_sequence(ga2_sentences, padding=PAD).data
        tgt_block = F.pad_sequence(tgt_sentences, padding=PAD).data

    return (
        to_device(device, src_block),
        to_device(device, ga_block),
        to_device(device, wo_block),
        to_device(device, ni_block),
        to_device(device, ga2_block),
        to_device(device, tgt_block)
    )
Exemple #19
0
    def converter(self, batch, device=-1):
        # alternative to chainer.dataset.concat_examples
        DATA_SHAPE = 40  # 40 log filterbank

        Xs = [
            to_device(self.device,
                      np.load(path).astype(np.float32)) for path, _ in batch
        ]

        Xs = [
            F.concat((X,
                      self.xp.zeros(
                          ((self.stacked_frames + self.skip_size) - len(X),
                           DATA_SHAPE),
                          dtype=self.xp.float32)),
                     axis=0) if len(X) <
            (self.stacked_frames + self.skip_size) else X for X in Xs
        ]

        Xs = [
            F.pad_sequence([
                X[i:i + self.stacked_frames]
                for i in range(0, len(X), self.skip_size)
            ]).reshape(-1, DATA_SHAPE * self.stacked_frames) for X in Xs
        ]

        word_label = [
            self.xp.asarray(lab[0]).astype(self.xp.int32) for _, lab in batch
        ]
        char_lable = [
            self.xp.asarray(lab[1]).astype(self.xp.int32) for _, lab in batch
        ]

        lable_batch = (word_label, char_lable)

        return Xs, lable_batch