Пример #1
0
    def check_call(self):
        xp = self.link.xp

        # Suppress warning that arises from zero division in BatchNormalization
        with numpy.errstate(divide='ignore'):
            x1 = Variable(xp.asarray(numpy.random.uniform(
                -1, 1, (1, 3, 224, 224)).astype(numpy.float32)))
            y1 = cuda.to_cpu(self.link(x1)['prob'].data)
            self.assertEqual(y1.shape, (1, 1000))

            x2 = Variable(xp.asarray(numpy.random.uniform(
                -1, 1, (1, 3, 128, 128)).astype(numpy.float32)))
            y2 = cuda.to_cpu(self.link(x2, layers=['pool5'])['pool5'].data)
            self.assertEqual(y2.shape, (1, 2048))
Пример #2
0
    def check_call_prob(self):
        xp = self.link.xp

        x = Variable(xp.asarray(numpy.random.uniform(
            -1, 1, (1, 3, 224, 224)).astype(self.dtype)))
        y = cuda.to_cpu(self.link(x)['prob'].data)
        self.assertEqual(y.shape, (1, 1000))
Пример #3
0
    def check_call_loss2_fc2(self):
        xp = self.link.xp

        x = Variable(xp.asarray(numpy.random.uniform(
            -1, 1, (1, 3, 224, 224)).astype(numpy.float32)))
        y = cuda.to_cpu(self.link(x, ['loss2_fc2'])['loss2_fc2'].data)
        self.assertEqual(y.shape, (1, 1000))
Пример #4
0
    def predict(self, images, oversample=True):
        """Computes all the probabilities of given images.

        Args:
            images (iterable of PIL.Image or numpy.ndarray): Input images.
                When you specify a color image as a :class:`numpy.ndarray`,
                make sure that color order is RGB.
            oversample (bool): If ``True``, it averages results across
                center, corners, and mirrors. Otherwise, it uses only the
                center.

        Returns:
            ~chainer.Variable: Output that contains the class probabilities
            of given images.

        """

        x = concat_examples([prepare(img, size=(256, 256)) for img in images])
        if oversample:
            x = imgproc.oversample(x, crop_dims=(224, 224))
        else:
            x = x[:, :, 16:240, 16:240]
        # Use no_backprop_mode to reduce memory consumption
        with function.no_backprop_mode(), chainer.using_config('train', False):
            x = Variable(self.xp.asarray(x))
            y = self(x, layers=['prob'])['prob']
            if oversample:
                n = len(y) // 10
                y_shape = y.shape[1:]
                y = reshape(y, (n, 10) + y_shape)
                y = sum(y, axis=1) / 10
        return y
Пример #5
0
    def check_call(self):
        xp = self.link.xp

        x1 = Variable(xp.asarray(numpy.random.uniform(
            -1, 1, (1, 3, 224, 224)).astype(numpy.float32)))
        y1 = cuda.to_cpu(self.link(x1)['prob'].data)
        self.assertEqual(y1.shape, (1, 1000))
Пример #6
0
    def extract(self,
                images,
                layers=['pool5'],
                size=(224, 224),
                test=True,
                volatile=flag.OFF):
        """Extracts all the feature maps of given images.

        The difference of directly executing ``__call__`` is that
        it directly accepts images as an input and automatically
        transforms them to a proper variable. That is,
        it is also interpreted as a shortcut method that implicitly calls
        ``prepare`` and ``__call__`` functions.

        Args:
            images (iterable of PIL.Image or numpy.ndarray): Input images.
            layers (list of str): The list of layer names you want to extract.
            size (pair of ints): The resolution of resized images used as
                an input of CNN. All the given images are not resized
                if this argument is ``None``, but the resolutions of
                all the images should be the same.
            test (bool): If ``True``, BatchNormalization runs in test mode.
            volatile (~chainer.Flag): Volatility flag used for input variables.

        Returns:
            Dictionary of ~chainer.Variable: A directory in which
            the key contains the layer name and the value contains
            the corresponding feature map variable.

        """

        x = concat_examples([prepare(img, size=size) for img in images])
        x = Variable(self.xp.asarray(x), volatile=volatile)
        return self(x, layers=layers, test=test)
Пример #7
0
 def extract(self, images, layers=["digitcaps"]):
     """
     """
     x = concat_examples([preprocess(image) for image in images])
     x = Variable(self.xp.asarray(x))
     activations = self(x, layers=layers)
     return activations
Пример #8
0
    def predict(self, images, oversample=True):
        """Computes all the probabilities of given images.

        Args:
            images (iterable of PIL.Image or numpy.ndarray): Input images.
            oversample (bool): If ``True``, it averages results across
                center, corners, and mirrors. Otherwise, it uses only the
                center.

        Returns:
            ~chainer.Variable: Output that contains the class probabilities
            of given images.

        """

        x = concat_examples([prepare(img, size=(256, 256)) for img in images])
        if oversample:
            x = imgproc.oversample(x, crop_dims=(224, 224))
        else:
            x = x[:, :, 16:240, 16:240]
        # Set volatile option to ON to reduce memory consumption
        x = Variable(self.xp.asarray(x), volatile=flag.ON)
        y = self(x, layers=['prob'])['prob']
        if oversample:
            n = y.data.shape[0] // 10
            y_shape = y.data.shape[1:]
            y = reshape(y, (n, 10) + y_shape)
            y = sum(y, axis=1) / 10
        return y
Пример #9
0
    def __call__(self, tree):
        # skip the node if whose child is only one
        while len(tree.children) == 1 and not tree.is_leaf():
            tree = tree.children[0]
        if tree.is_leaf():
            word = tree.get_word()
            # avg
            if self.is_leaf_as_chunk:
                vector = None
                for tok in word.split('/'):
                    embed = self.get_word_vec(tok)
                    if vector is None:
                        vector = self.embed2hidden(embed)
                    else:
                        vector += self.embed2hidden(embed)
                vector /= len(word.split('/'))
            else:
                embed = self.get_word_vec(word)
                vector = self.embed2hidden(embed)
            c = Variable(np.zeros((1, self.mem_units), dtype=np.float32))
        else:
            left_tree, right_tree = tree.children
            leftc = self(left_tree)
            rightc = self(right_tree)
            # skip the node if whose child is only one
            while len(left_tree.children) == 1 and not left_tree.is_leaf():
                left_tree = left_tree.children[0]
            while len(right_tree.children) == 1 and not right_tree.is_leaf():
                right_tree = right_tree.children[0]
            left_vec = left_tree.data['vector']
            right_vec = right_tree.data['vector']

            # composition by tree lstm
            left_attention_vec = self.calc_attention(left_tree)
            right_attention_vec = self.calc_attention(right_tree)
            concat = F.concat(
                (left_vec, right_vec, left_attention_vec, right_attention_vec))
            u_l = self.updatel(concat)
            u_r = self.updater(concat)
            i_l = self.inputl(concat)
            i_r = self.inputr(concat)
            if self.comp_type == Composition.tree_attention_lstm:
                concatl = F.concat((left_vec, left_attention_vec))
                concatr = F.concat((right_vec, right_attention_vec))
                f_l = self.forgetl(concatr)
                f_r = self.forgetr(concatl)
            elif self.comp_type == Composition.attention_slstm:
                f_l = self.forgetl(concat)
                f_r = self.forgetr(concat)
            o_l = self.outputl(concat)
            o_r = self.outputr(concat)
            l_v = F.concat((u_l, i_l, f_l, o_l))
            r_v = F.concat((u_r, i_r, f_r, o_r))
            c, vector = F.slstm(leftc, rightc, l_v, r_v)

        tree.data['vector'] = vector
        if tree.is_root():
            self.calc_attention(tree)
        return c
Пример #10
0
    def check_call_loss1_fc2(self):
        xp = self.link.xp

        x = Variable(
            xp.asarray(
                numpy.random.uniform(-1, 1,
                                     (1, 3, 224, 224)).astype(self.dtype)))
        y = cuda.to_cpu(self.link(x, ['loss1_fc2'])['loss1_fc2'].data)
        assert y.shape == (1, 1000)
Пример #11
0
    def check_call(self):
        xp = self.link.xp

        x1 = Variable(
            xp.asarray(
                numpy.random.uniform(-1, 1,
                                     (1, 3, 224, 224)).astype(self.dtype)))
        y1 = cuda.to_cpu(self.link(x1)['prob'].data)
        assert y1.shape == (1, 1000)
Пример #12
0
    def extract(self, images, layers=None, size=(224, 224), **kwargs):
        """extract(self, images, layers=['pool5'], size=(224, 224))

        Extracts all the feature maps of given images.

        The difference of directly executing ``forward`` is that
        it directly accepts images as an input and automatically
        transforms them to a proper variable. That is,
        it is also interpreted as a shortcut method that implicitly calls
        ``prepare`` and ``forward`` functions.

        Unlike ``predict`` method, this method does not override
        ``chainer.config.train`` and ``chainer.config.enable_backprop``
        configuration. If you want to extract features without updating
        model parameters, you need to manually set configuration when
        calling this method as follows:

         .. code-block:: python

             # model is an instance of ResNetLayers (50 or 101 or 152 layers)
             with chainer.using_config('train', False):
                 with chainer.using_config('enable_backprop', False):
                     feature = model.extract([image])

        Args:
            images (iterable of PIL.Image or numpy.ndarray): Input images.
            layers (list of str): The list of layer names you want to extract.
            size (pair of ints): The resolution of resized images used as
                an input of CNN. All the given images are not resized
                if this argument is ``None``, but the resolutions of
                all the images should be the same.

        Returns:
            Dictionary of ~chainer.Variable: A directory in which
            the key contains the layer name and the value contains
            the corresponding feature map variable.

        """

        if layers is None:
            layers = ['pool5']

        if kwargs:
            argument.check_unexpected_kwargs(
                kwargs,
                test='test argument is not supported anymore. '
                'Use chainer.using_config',
                volatile='volatile argument is not supported anymore. '
                'Use chainer.using_config')
            argument.assert_kwargs_empty(kwargs)

        x = concat_examples([prepare(img, size=size) for img in images])
        x = Variable(self.xp.asarray(x))
        return self(x, layers=layers)
Пример #13
0
    def extract(self, images, layers=None, size=(224, 224), **kwargs):
        """extract(self, images, layers=['pool5'], size=(224, 224))

        Extracts all the feature maps of given images.

        The difference of directly executing ``__call__`` is that
        it directly accepts images as an input and automatically
        transforms them to a proper variable. That is,
        it is also interpreted as a shortcut method that implicitly calls
        ``prepare`` and ``__call__`` functions.

        .. warning::

           ``train`` and ``volatile`` arguments are not supported anymore since
           v2.
           Instead, use ``chainer.using_config('train', train)`` and
           ``chainer.using_config('enable_backprop', not volatile)``
           respectively.
           See :func:`chainer.using_config`.

        Args:
            images (iterable of PIL.Image or numpy.ndarray): Input images.
            layers (list of str): The list of layer names you want to extract.
            size (pair of ints): The resolution of resized images used as
                an input of CNN. All the given images are not resized
                if this argument is ``None``, but the resolutions of
                all the images should be the same.

        Returns:
            Dictionary of ~chainer.Variable: A directory in which
            the key contains the layer name and the value contains
            the corresponding feature map variable.

        """

        if layers is None:
            layers = ['pool5']

        argument.check_unexpected_kwargs(
            kwargs,
            train='train argument is not supported anymore. '
            'Use chainer.using_config',
            volatile='volatile argument is not supported anymore. '
            'Use chainer.using_config')
        argument.assert_kwargs_empty(kwargs)

        x = concat_examples([prepare(img, size=size) for img in images])
        x = Variable(self.xp.asarray(x))
        return self(x, layers=layers)
Пример #14
0
    def __call__(self, x, test=False, output_inter=False):
        bs = []
        numlinks = len(self.links)

        if output_inter:
            interm_results = [x]

        for i, link in enumerate(self.links):
            if isinstance(link, Sequential):
                # detach if in different stages
                #if reduce(lambda x,y: x and y, [stage not in link._stages for stage in self._stages]):
                if self.current_stage not in link._stages:
                    y = Variable(x.data, x.volatile)
                else:
                    y = x
                b = link(y, test=test)
                bs.append(b[0])
                # Currently not support branch inside a branch
            # elif isinstance(link, function.dropout):
            #     x = link(x, train=not test)
            elif isinstance(link, chainer.links.BatchNormalization):
                x = link(x, test=test)
            elif hasattr(link,'__call__') and 'train' in inspect.getargspec(link.__call__)[0]:
                #print("train",link)
                x = link(x, train=not test)
            elif hasattr(link,'__call__') and 'test' in inspect.getargspec(link.__call__)[0]:
                #print("test",link)
                x = link(x, test=test)
            else:
                x = link(x)
            # do not update this branch if not the current stage
            if self.current_stage not in self._stages:
                x.unchain_backward()

            if output_inter:
                interm_results.append(x.data)

        bs.append(x)

        if output_inter:
            return tuple(bs), interm_results
        else:
            return tuple(bs)
Пример #15
0
    def __call__(self, trainer=None):

        iterator = self._iterators['main']
        linearmodel = self._targets['main'].predictor

        if self.eval_hook:
            self.eval_hook(self)
        it = copy.copy(iterator)

        for batch in it:
            observation = {}
            with reporter_module.report_scope(observation):
                in_arrays = self.converter(batch, self.device)
                in_vars = tuple(Variable(x) for x in in_arrays)

                bold = in_vars[0]
                pred_z = linearmodel(bold).data

                if args.gpu_device != -1:
                    pred_z = pred_z.get()

                savemat(self.filen, {'z': pred_z})
Пример #16
0
from PIL import Image
import cv2
import numpy as np
from chainer import Chain
from chainer.links import Linear, Convolution2D
from chainer.functions import relu, softmax_cross_entropy
from chainer.variable import Variable

from raspi_ip import IP

conv = Convolution2D(3, 1, 3)
edge_filter = np.array([[[[0, -1, 0], [-1, 1, -1], [0, -1, 0]],
                         [[0, -1, 0], [-1, 1, -1], [0, -1, 0]],
                         [[0, -1, 0], [-1, 1, -1], [0, -1, 0]]]],
                       dtype=np.float32)
conv.W = Variable(edge_filter)


def gen_name(dir):
    i = 0

    while True:
        if not os.path.exists(dir + "/" + f"img-{i}.jpg"):
            break

        i += 1

    return dir + "/" + f"img-{i}.jpg"


def detect_line(img):
Пример #17
0
    def extract(self, images, layers=None, size=(224, 224), **kwargs):
        """extract(self, images, layers=['fc7'], size=(224, 224))

        Extracts all the feature maps of given images.

        The difference of directly executing ``forward`` is that
        it directly accepts images as an input and automatically
        transforms them to a proper variable. That is,
        it is also interpreted as a shortcut method that implicitly calls
        ``prepare`` and ``forward`` functions.

        Unlike ``predict`` method, this method does not override
        ``chainer.config.train`` and ``chainer.config.enable_backprop``
        configuration. If you want to extract features without updating
        model parameters, you need to manually set configuration when
        calling this method as follows:

         .. code-block:: python

             # model is an instance of VGGLayers (16 or 19 layers)
             with chainer.using_config('train', False):
                 with chainer.using_config('enable_backprop', False):
                     feature = model.extract([image])

        .. warning::

           ``test`` and ``volatile`` arguments are not supported
           anymore since v2. Instead, users should configure
           training and volatile modes with ``train`` and
           ``enable_backprop``, respectively.

           Note that default behavior of this method is different
           between v1 and later versions. Specifically,
           the default values of ``test`` in v1 were ``True`` (test mode).
           But that of ``chainer.config.train`` is also ``True``
           (train mode). Therefore, users need to explicitly switch
           ``train`` to ``False`` to run the code in test mode and
           ``enable_backprop`` to ``False`` to turn off
           coputational graph construction.

           See the `upgrade guide <https://docs.chainer.org/en/stable\
           /upgrade_v2.html#training-mode-is-configured-by-a-thread-local-flag>`_.

        Args:
            images (iterable of PIL.Image or numpy.ndarray): Input images.
            layers (list of str): The list of layer names you want to extract.
            size (pair of ints): The resolution of resized images used as
                an input of CNN. All the given images are not resized
                if this argument is ``None``, but the resolutions of
                all the images should be the same.

        Returns:
            Dictionary of ~chainer.Variable: A directory in which
            the key contains the layer name and the value contains
            the corresponding feature map variable.

        """

        if layers is None:
            layers = ['fc7']

        if kwargs:
            argument.check_unexpected_kwargs(
                kwargs,
                test='test argument is not supported anymore. '
                'Use chainer.using_config',
                volatile='volatile argument is not supported anymore. '
                'Use chainer.using_config')
            argument.assert_kwargs_empty(kwargs)

        x = concat_examples([prepare(img, size=size) for img in images])
        x = Variable(self.xp.asarray(x))
        return self(x, layers=layers)
Пример #18
0
 def __call__(self, x):
     xp = cuda.get_array_module(x.data)
     ln_var = math.log(self.std ** 2)
     noise = chainer.functions.gaussian(Variable(xp.zeros_like(x.data)), Variable(xp.full_like(x.data, ln_var)))
     return x + noise
Пример #19
0
import chainer.computational_graph as c
from chainercv.datasets import voc_bbox_label_names
import numpy as np
from chainer.variable import Variable
import pydotplus
from multi_task.multi_task_300 import Multi_task_300

x_data = np.zeros([1, 3, 300, 300], dtype=np.float32)

x = Variable(x_data)

model = Multi_task_300(n_fg_class=len(voc_bbox_label_names), pretrained_model='imagenet',detection=True,segmentation=True,attention=True)

detection,mask = model(x)
a,b=detection
#loc,conf=detection
g = c.build_computational_graph([a,b,mask],remove_variable=True)

dot_format = g._to_dot()


graph=pydotplus.graph_from_dot_data(dot_format)

graph.write_pdf('visualization.pdf')

with open('visualization.gv', 'w') as o:
    o.write(g.dump())
Пример #20
0
sigma = noiseLevel*255.0
print("やるぞ")

gradList = []
for _ in range(sampleSize):
    print("槍中")
    x = np.asarray(image, dtype=np.float32)
    # RGB to BGR
    x = x[:,:,::-1]
    # 平均を引く
    x -= np.array([103.939, 116.779, 123.68], dtype=np.float32)
    x = x.transpose((2, 0, 1))
    x = x[np.newaxis]
    # ノイズを追加
    x += sigma*np.random.randn(x.shape[0],x.shape[1],x.shape[2],x.shape[3])
    x = Variable(np.asarray(x))
    # FPして最終層を取り出す
    y = model(x, layers=['prob'])['prob']
    # 予測が最大のラベルでBP
    t = np.zeros((x.data.shape[0]),dtype=np.int32)
    #t[:] = np.argmax(y.data)
    y_data = y.data
    #print(y_data)
    #print(y_data.shape)
    t[:] = y_data.argsort()[0][-2]      # 2番目に高いラベル
    t = Variable(np.asarray(t))
    loss = F.softmax_cross_entropy(y,t)
    loss.backward()
    # 勾配をリストに追加
    grad = np.copy(x.grad)
    gradList.append(grad)
Пример #21
0
model = L.Classifier(lm)

serializers.load_npz(args.model, model)

if args.gpu >= 0:
    cuda.get_device(args.gpu).use()
    model.to_gpu()

model.predictor.reset_state()

primetext = args.primetext
if isinstance(primetext, six.binary_type):
    primetext = primetext.decode('utf-8')

if primetext in vocab:
    prev_word = Variable(xp.array([vocab[primetext]], xp.int32))
else:
    print('ERROR: Unfortunately ' + primetext + ' is unknown.')
    exit()

prob = F.softmax(model.predictor(prev_word))
sys.stdout.write(primetext + ' ')

for i in six.moves.range(args.length):
    prob = F.softmax(model.predictor(prev_word))
    if args.sample > 0:
        probability = cuda.to_cpu(prob.data)[0].astype(np.float64)
        probability /= np.sum(probability)
        index = np.random.choice(range(len(probability)), p=probability)
    else:
        index = np.argmax(cuda.to_cpu(prob.data))