Beispiel #1
0
    def __init__(self, pretrained_model, n_layers):
        super(ResNetLayers, self).__init__()

        if pretrained_model:
            # As a sampling process is time-consuming,
            # we employ a zero initializer for faster computation.
            kwargs = {'initialW': constant.Zero()}
        else:
            # employ default initializers used in the original paper
            kwargs = {'initialW': normal.HeNormal(scale=1.0)}

        if n_layers == 50:
            block = [3, 4, 6, 3]
        elif n_layers == 101:
            block = [3, 4, 23, 3]
        elif n_layers == 152:
            block = [3, 8, 36, 3]
        else:
            raise ValueError('The n_layers argument should be either 50, 101,'
                             ' or 152, but {} was given.'.format(n_layers))

        with self.init_scope():
            self.conv1 = Convolution2D(3, 64, 7, 2, 3, **kwargs)
            self.bn1 = BatchNormalization(64)
            self.res2 = BuildingBlock(block[0], 64, 64, 256, 1, **kwargs)
            self.res3 = BuildingBlock(block[1], 256, 128, 512, 2, **kwargs)
            self.res4 = BuildingBlock(block[2], 512, 256, 1024, 2, **kwargs)
            self.res5 = BuildingBlock(block[3], 1024, 512, 2048, 2, **kwargs)
            self.fc6 = Linear(2048, 1000)

        if pretrained_model and pretrained_model.endswith('.caffemodel'):
            _retrieve(n_layers, 'ResNet-{}-model.npz'.format(n_layers),
                      pretrained_model, self)
        elif pretrained_model:
            npz.load_npz(pretrained_model, self)
Beispiel #2
0
 def __init__(self, pretrained_model='auto'):
     if pretrained_model:
         # As a sampling process is time-consuming,
         # we employ a zero initializer for faster computation.
         kwargs = {'initialW': constant.Zero()}
     else:
         # employ default initializers used in the original paper
         kwargs = {'initialW': normal.HeNormal(scale=1.0)}
     super(ResNet50Layers, self).__init__(
         conv1=Convolution2D(3, 64, 7, 2, 3, **kwargs),
         bn1=BatchNormalization(64),
         res2=BuildingBlock(3, 64, 64, 256, 1, **kwargs),
         res3=BuildingBlock(4, 256, 128, 512, 2, **kwargs),
         res4=BuildingBlock(6, 512, 256, 1024, 2, **kwargs),
         res5=BuildingBlock(3, 1024, 512, 2048, 2, **kwargs),
         fc6=Linear(2048, 1000),
     )
     if pretrained_model == 'auto':
         _retrieve(
             'ResNet-50-model.npz', 'ResNet-50-model.caffemodel', self)
     elif pretrained_model:
         npz.load_npz(pretrained_model, self)
     self.functions = OrderedDict([
         ('conv1', [self.conv1, self.bn1, relu]),
         ('pool1', [lambda x: max_pooling_2d(x, ksize=3, stride=2)]),
         ('res2', [self.res2]),
         ('res3', [self.res3]),
         ('res4', [self.res4]),
         ('res5', [self.res5]),
         ('pool5', [_global_average_pooling_2d]),
         ('fc6', [self.fc6]),
         ('prob', [softmax]),
     ])
Beispiel #3
0
    def test_load(self):
        obj = mock.MagicMock()
        npz.load_npz(self.temp_file_path, obj)

        self.assertEqual(obj.serialize.call_count, 1)
        (serializer,), _ = obj.serialize.call_args
        self.assertIsInstance(serializer, npz.NpzDeserializer)
Beispiel #4
0
 def test_load_with_path(self):
     target = link.Chain()
     with target.init_scope():
         target.child_linear = links.Linear(2, 3)
     npz.load_npz(self.file, target, 'child/')
     numpy.testing.assert_array_equal(
         self.source_child.child_linear.W.data, target.child_linear.W.data)
Beispiel #5
0
 def test_load_without_path(self):
     target = link.Chain()
     with target.init_scope():
         target.parent_linear = links.Linear(3, 2)
     npz.load_npz(self.file, target, path='')
     numpy.testing.assert_array_equal(
         self.source_parent.parent_linear.W.data,
         target.parent_linear.W.data)
Beispiel #6
0
def _make_npz(path_npz, url, model):
    path_caffemodel = download.cached_download(url)
    sys.stderr.write(
        'Now loading caffemodel (usually it may take few minutes)\n')
    sys.stderr.flush()
    GoogLeNet.convert_caffemodel_to_npz(path_caffemodel, path_npz)
    npz.load_npz(path_npz, model)
    return model
Beispiel #7
0
    def test_load_without_strict(self):
        obj = mock.MagicMock()
        npz.load_npz(self.file, obj, strict=False)

        self.assertEqual(obj.serialize.call_count, 1)
        (serializer,), _ = obj.serialize.call_args
        self.assertFalse(serializer.strict)
        self.assertIsInstance(serializer, npz.NpzDeserializer)
Beispiel #8
0
    def __init__(self, pretrained_model='auto', n_layers=16):
        super(VGGLayers, self).__init__()
        if pretrained_model:
            # As a sampling process is time-consuming,
            # we employ a zero initializer for faster computation.
            init = constant.Zero()
            kwargs = {'initialW': init, 'initial_bias': init}
        else:
            # employ default initializers used in the original paper
            kwargs = {
                'initialW': normal.Normal(0.01),
                'initial_bias': constant.Zero(),
            }

        if n_layers not in [16, 19]:
            raise ValueError(
                'The n_layers argument should be either 16 or 19,'
                'but {} was given.'.format(n_layers)
            )

        with self.init_scope():
            self.conv1_1 = Convolution2D(3, 64, 3, 1, 1, **kwargs)
            self.conv1_2 = Convolution2D(64, 64, 3, 1, 1, **kwargs)
            self.conv2_1 = Convolution2D(64, 128, 3, 1, 1, **kwargs)
            self.conv2_2 = Convolution2D(128, 128, 3, 1, 1, **kwargs)
            self.conv3_1 = Convolution2D(128, 256, 3, 1, 1, **kwargs)
            self.conv3_2 = Convolution2D(256, 256, 3, 1, 1, **kwargs)
            self.conv3_3 = Convolution2D(256, 256, 3, 1, 1, **kwargs)
            self.conv4_1 = Convolution2D(256, 512, 3, 1, 1, **kwargs)
            self.conv4_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv4_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv5_1 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv5_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv5_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.fc6 = Linear(512 * 7 * 7, 4096, **kwargs)
            self.fc7 = Linear(4096, 4096, **kwargs)
            self.fc8 = Linear(4096, 1000, **kwargs)
            if n_layers == 19:
                self.conv3_4 = Convolution2D(256, 256, 3, 1, 1, **kwargs)
                self.conv4_4 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
                self.conv5_4 = Convolution2D(512, 512, 3, 1, 1, **kwargs)

        if pretrained_model == 'auto':
            if n_layers == 16:
                _retrieve(
                    'VGG_ILSVRC_16_layers.npz',
                    'https://www.robots.ox.ac.uk/%7Evgg/software/very_deep/'
                    'caffe/VGG_ILSVRC_16_layers.caffemodel',
                    self)
            else:
                _retrieve(
                    'VGG_ILSVRC_19_layers.npz',
                    'http://www.robots.ox.ac.uk/%7Evgg/software/very_deep/'
                    'caffe/VGG_ILSVRC_19_layers.caffemodel',
                    self)
        elif pretrained_model:
            npz.load_npz(pretrained_model, self)
Beispiel #9
0
 def test_load_npz_ignore_names(self):
     chain = link.Chain()
     with chain.init_scope():
         chain.x = chainer.variable.Parameter(shape=())
         chain.yy = chainer.variable.Parameter(shape=(2, 3))
     npz.load_npz(
         self.temp_file_path, chain, ignore_names=self.ignore_names)
     self.assertEqual(chain.x.data, self.x)
     self.assertFalse(numpy.all(chain.yy.data == self.yy))
Beispiel #10
0
 def test_load_optimizer(self):
     for param in self.parent.params():
         param.data.fill(1)
     npz.save_npz(self.temp_file_path, self.parent, self.compress)
     for param in self.parent.params():
         param.data.fill(0)
     npz.load_npz(self.temp_file_path, self.parent)
     for param in self.parent.params():
         self.assertTrue((param.data == 1).all())
Beispiel #11
0
 def test_load_optimizer_with_strict(self):
     for param in self.parent.params():
         param.data.fill(1)
     self._save_npz(self.file, self.parent, self.compress)
     for param in self.parent.params():
         param.data.fill(0)
     npz.load_npz(self.file, self.parent)
     for param in self.parent.params():
         self.assertTrue((param.data == 1).all())
Beispiel #12
0
def _make_npz(path_npz, path_caffemodel, model):
    print('Now loading caffemodel (usually it may take few minutes)')
    if not os.path.exists(path_caffemodel):
        raise IOError(
            'The pre-trained caffemodel does not exist. Please download it '
            'from \'https://github.com/KaimingHe/deep-residual-networks\', '
            'and place it on {}'.format(path_caffemodel))
    ResNet50Layers.convert_caffemodel_to_npz(path_caffemodel, path_npz)
    npz.load_npz(path_npz, model)
    return model
Beispiel #13
0
 def test_load_optimizer_without_strict(self):
     for param in self.parent.params():
         param.data.fill(1)
     self._save_npz(self.file, self.parent, self.compress)
     # Remove a param
     del self.parent.child.linear.b
     for param in self.parent.params():
         param.data.fill(0)
     npz.load_npz(self.file, self.parent, strict=False)
     for param in self.parent.params():
         self.assertTrue((param.data == 1).all())
     self.assertFalse(hasattr(self.parent.child.linear, 'b'))
 def test_serialization(self):
     lin1 = links.SimplifiedDropconnect(None, self.out_size)
     x = chainer.Variable(self.x)
     # Must call the link to initialize weights.
     lin1(x)
     w1 = lin1.W.data
     fd, temp_file_path = tempfile.mkstemp()
     os.close(fd)
     npz.save_npz(temp_file_path, lin1)
     lin2 = links.SimplifiedDropconnect(None, self.out_size)
     npz.load_npz(temp_file_path, lin2)
     w2 = lin2.W.data
     self.assertEqual((w1 == w2).all(), True)
Beispiel #15
0
def _retrieve(name_npz, name_caffemodel, model):
    root = download.get_dataset_directory('pfnet/chainer/models/')
    path = os.path.join(root, name_npz)
    path_caffemodel = os.path.join(root, name_caffemodel)
    return download.cache_or_load_file(
        path, lambda path: _make_npz(path, path_caffemodel, model),
        lambda path: npz.load_npz(path, model))
Beispiel #16
0
    def __init__(self, pretrained_model, n_layers):
        if pretrained_model:
            # As a sampling process is time-consuming,
            # we employ a zero initializer for faster computation.
            kwargs = {'initialW': constant.Zero()}
        else:
            # employ default initializers used in the original paper
            kwargs = {'initialW': normal.HeNormal(scale=1.0)}

        if n_layers == 50:
            block = [3, 4, 6, 3]
        elif n_layers == 101:
            block = [3, 4, 23, 3]
        elif n_layers == 152:
            block = [3, 8, 36, 3]
        else:
            raise ValueError('The n_layers argument should be either 50, 101,'
                             ' or 152, but {} was given.'.format(n_layers))

        super(ResNetLayers, self).__init__(
            conv1=Convolution2D(3, 64, 7, 2, 3, **kwargs),
            bn1=BatchNormalization(64),
            res2=BuildingBlock(block[0], 64, 64, 256, 1, **kwargs),
            res3=BuildingBlock(block[1], 256, 128, 512, 2, **kwargs),
            res4=BuildingBlock(block[2], 512, 256, 1024, 2, **kwargs),
            res5=BuildingBlock(block[3], 1024, 512, 2048, 2, **kwargs),
            fc6=Linear(2048, 1000),
        )
        if pretrained_model and pretrained_model.endswith('.caffemodel'):
            _retrieve(n_layers, 'ResNet-{}-model.npz'.format(n_layers),
                      pretrained_model, self)
        elif pretrained_model:
            npz.load_npz(pretrained_model, self)
        self.functions = collections.OrderedDict([
            ('conv1', [self.conv1, self.bn1, relu]),
            ('pool1', [lambda x: max_pooling_2d(x, ksize=3, stride=2)]),
            ('res2', [self.res2]),
            ('res3', [self.res3]),
            ('res4', [self.res4]),
            ('res5', [self.res5]),
            ('pool5', [_global_average_pooling_2d]),
            ('fc6', [self.fc6]),
            ('prob', [softmax]),
        ])
Beispiel #17
0
    def __init__(self, pretrained_model='auto'):
        super(GoogLeNet, self).__init__()

        if pretrained_model:
            # As a sampling process is time-consuming,
            # we employ a zero initializer for faster computation.
            kwargs = {'initialW': constant.Zero()}
        else:
            # employ default initializers used in BVLC. For more detail, see
            # https://github.com/chainer/chainer/pull/2424#discussion_r109642209
            kwargs = {'initialW': uniform.LeCunUniform(scale=1.0)}

        with self.init_scope():
            self.conv1 = Convolution2D(3, 64, 7, stride=2, pad=3, **kwargs)
            self.conv2_reduce = Convolution2D(64, 64, 1, **kwargs)
            self.conv2 = Convolution2D(64, 192, 3, stride=1, pad=1, **kwargs)
            self.inc3a = Inception(192, 64, 96, 128, 16, 32, 32)
            self.inc3b = Inception(256, 128, 128, 192, 32, 96, 64)
            self.inc4a = Inception(480, 192, 96, 208, 16, 48, 64)
            self.inc4b = Inception(512, 160, 112, 224, 24, 64, 64)
            self.inc4c = Inception(512, 128, 128, 256, 24, 64, 64)
            self.inc4d = Inception(512, 112, 144, 288, 32, 64, 64)
            self.inc4e = Inception(528, 256, 160, 320, 32, 128, 128)
            self.inc5a = Inception(832, 256, 160, 320, 32, 128, 128)
            self.inc5b = Inception(832, 384, 192, 384, 48, 128, 128)
            self.loss3_fc = Linear(1024, 1000, **kwargs)

            self.loss1_conv = Convolution2D(512, 128, 1, **kwargs)
            self.loss1_fc1 = Linear(2048, 1024, **kwargs)
            self.loss1_fc2 = Linear(1024, 1000, **kwargs)

            self.loss2_conv = Convolution2D(528, 128, 1, **kwargs)
            self.loss2_fc1 = Linear(2048, 1024, **kwargs)
            self.loss2_fc2 = Linear(1024, 1000, **kwargs)

        if pretrained_model == 'auto':
            _retrieve(
                'bvlc_googlenet.npz',
                'http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel',
                self)
        elif pretrained_model:
            npz.load_npz(pretrained_model, self)
Beispiel #18
0
    def __init__(self, pretrained_model='auto'):
        if pretrained_model:
            # As a sampling process is time-consuming,
            # we employ a zero initializer for faster computation.
            init = constant.Zero()
            kwargs = {'initialW': init, 'initial_bias': init}
        else:
            # employ default initializers used in the original paper
            kwargs = {
                'initialW': normal.Normal(0.01),
                'initial_bias': constant.Zero(),
            }
        super(VGG16Layers, self).__init__()

        with self.init_scope():
            self.conv1_1 = Convolution2D(3, 64, 3, 1, 1, **kwargs)
            self.conv1_2 = Convolution2D(64, 64, 3, 1, 1, **kwargs)
            self.conv2_1 = Convolution2D(64, 128, 3, 1, 1, **kwargs)
            self.conv2_2 = Convolution2D(128, 128, 3, 1, 1, **kwargs)
            self.conv3_1 = Convolution2D(128, 256, 3, 1, 1, **kwargs)
            self.conv3_2 = Convolution2D(256, 256, 3, 1, 1, **kwargs)
            self.conv3_3 = Convolution2D(256, 256, 3, 1, 1, **kwargs)
            self.conv4_1 = Convolution2D(256, 512, 3, 1, 1, **kwargs)
            self.conv4_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv4_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv5_1 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv5_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv5_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.fc6 = Linear(512 * 7 * 7, 4096, **kwargs)
            self.fc7 = Linear(4096, 4096, **kwargs)
            self.fc8 = Linear(4096, 1000, **kwargs)

        if pretrained_model == 'auto':
            _retrieve(
                'VGG_ILSVRC_16_layers.npz',
                'http://www.robots.ox.ac.uk/%7Evgg/software/very_deep/'
                'caffe/VGG_ILSVRC_16_layers.caffemodel',
                self)
        elif pretrained_model:
            npz.load_npz(pretrained_model, self)
Beispiel #19
0
    def __init__(self, pretrained_model, n_layers, downsample_fb=False):
        super(ResNetLayers, self).__init__()

        if pretrained_model:
            # As a sampling process is time-consuming,
            # we employ a zero initializer for faster computation.
            conv_kwargs = {'initialW': constant.Zero()}
        else:
            # employ default initializers used in the original paper
            conv_kwargs = {'initialW': normal.HeNormal(scale=1.0)}

        kwargs = conv_kwargs.copy()
        kwargs['downsample_fb'] = downsample_fb

        if n_layers == 50:
            block = [3, 4, 6, 3]
        elif n_layers == 101:
            block = [3, 4, 23, 3]
        elif n_layers == 152:
            block = [3, 8, 36, 3]
        else:
            raise ValueError('The n_layers argument should be either 50, 101,'
                             ' or 152, but {} was given.'.format(n_layers))

        with self.init_scope():
            self.conv1 = Convolution2D(3, 64, 7, 2, 3, **conv_kwargs)
            self.bn1 = BatchNormalization(64)
            self.res2 = BuildingBlock(block[0], 64, 64, 256, 1, **kwargs)
            self.res3 = BuildingBlock(block[1], 256, 128, 512, 2, **kwargs)
            self.res4 = BuildingBlock(block[2], 512, 256, 1024, 2, **kwargs)
            self.res5 = BuildingBlock(block[3], 1024, 512, 2048, 2, **kwargs)
            self.fc6 = Linear(2048, 1000)

        if pretrained_model and pretrained_model.endswith('.caffemodel'):
            _retrieve(n_layers, 'ResNet-{}-model.npz'.format(n_layers),
                      pretrained_model, self)
        elif pretrained_model:
            npz.load_npz(pretrained_model, self)
Beispiel #20
0
    def __init__(self, pretrained_model="auto"):
        if pretrained_model:
            kwargs = {'initialW': constant.Zero()}
        else:
            kwargs = {'initialW': normal.HeNormal(scale=1.0)}

        super(ResNet, self).__init__(
            conv1=L.Convolution2D(3, 64, 7, 2, 3, **kwargs),
            bn1=L.BatchNormalization(64),
            res2=Block(3, 64, 64, 256, 1, **kwargs),
            res3=Block(4, 256, 128, 512, 2, **kwargs),
            res4=Block(6, 512, 256, 1024, 2, **kwargs),
            res5=Block(3, 1024, 512, 2048, 2, **kwargs),
            fc6=L.Linear(None, 1000),
        )
        if pretrained_model == 'auto':
            print("[ PREPROCESS ] Use caffe model of ResNet.")
            _retrieve('ResNet-50-model.npz', 'ResNet-50-model.caffemodel',
                      self)
            self.fc6 = L.Linear(None, 25)
        elif pretrained_model:
            npz.load_npz(pretrained_model, self)

        self.train = True
Beispiel #21
0
def main(id):
    with chainer.using_config("train", False):
        with chainer.using_config("enable_backprop", False):
            model_path = "/efs/fMRI_AE/SimpleFCAE_E32D32/model/model_iter_108858"

            gpu = 0
            get_device_from_id(gpu).use()
            """NibDataset
            def __init__(self, directory: str, crop: list):
            """
            crop = [[9, 81], [11, 99], [0, 80]]
            test_dataset = NibDataset("/data/test", crop=crop)
            """
            def __init__(self, dataset, batch_size, repeat=True, shuffle=True):
            """
            mask = load_mask_nib("/data/mask/average_optthr.nii", crop)

            model = Model(mask, 2, "mask", "mask")
            load_npz(model_path, model)
            model.to_gpu()

            for i in range(len(test_dataset)):
                if i % 8 != id:
                    continue
                inp = to_gpu(test_dataset.get_example(i))
                inp = xp.expand_dims(inp, 0)
                subject = test_dataset.get_subject(i)
                frame = test_dataset.get_frame(i)
                sys.stdout.write("\rsubject{:03d} frame{:03d}".format(
                    subject, frame))
                sys.stdout.flush()
                out = model.reconstruct(inp).array
                out = xp.squeeze(out)
                xp.save(
                    "/efs/fMRI_AE/SimpleFCAE_E32D32/reconstruct/reconstruction_subject{:03d}_frame{:03d}.npy"
                    .format(subject, frame), out)
Beispiel #22
0
def _make_npz(path_npz, url, model):
    path_caffemodel = download.cached_download(url)
    print('Now loading caffemodel (usually it may take few minutes)')
    VGG16Layers.convert_caffemodel_to_npz(path_caffemodel, path_npz)
    npz.load_npz(path_npz, model)
    return model
Beispiel #23
0
 def __init__(self, pretrained_model='auto'):
     if pretrained_model:
         # As a sampling process is time-consuming,
         # we employ a zero initializer for faster computation.
         kwargs = {'initialW': constant.Zero()}
     else:
         # employ default initializers used in the original paper
         kwargs = {'initialW': uniform.GlorotUniform(scale=1.0)}
     super(GoogLeNet,
           self).__init__(conv1=Convolution2D(3,
                                              64,
                                              7,
                                              stride=2,
                                              pad=3,
                                              **kwargs),
                          conv2_reduce=Convolution2D(64, 64, 1, **kwargs),
                          conv2=Convolution2D(64,
                                              192,
                                              3,
                                              stride=1,
                                              pad=1,
                                              **kwargs),
                          inc3a=Inception(192, 64, 96, 128, 16, 32, 32),
                          inc3b=Inception(256, 128, 128, 192, 32, 96, 64),
                          inc4a=Inception(480, 192, 96, 208, 16, 48, 64),
                          inc4b=Inception(512, 160, 112, 224, 24, 64, 64),
                          inc4c=Inception(512, 128, 128, 256, 24, 64, 64),
                          inc4d=Inception(512, 112, 144, 288, 32, 64, 64),
                          inc4e=Inception(528, 256, 160, 320, 32, 128, 128),
                          inc5a=Inception(832, 256, 160, 320, 32, 128, 128),
                          inc5b=Inception(832, 384, 192, 384, 48, 128, 128),
                          loss3_fc=Linear(1024, 1000, **kwargs),
                          loss1_conv=Convolution2D(512, 128, 1, **kwargs),
                          loss1_fc1=Linear(2048, 1024, **kwargs),
                          loss1_fc2=Linear(1024, 1000, **kwargs),
                          loss2_conv=Convolution2D(528, 128, 1, **kwargs),
                          loss2_fc1=Linear(2048, 1024, **kwargs),
                          loss2_fc2=Linear(1024, 1000, **kwargs))
     if pretrained_model == 'auto':
         _retrieve(
             'bvlc_googlenet.npz',
             'http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel',
             self)
     elif pretrained_model:
         npz.load_npz(pretrained_model, self)
     self.functions = OrderedDict([
         ('conv1', [self.conv1, relu]),
         ('pool1', [_max_pooling_2d, _local_response_normalization]),
         ('conv2_reduce', [self.conv2_reduce, relu]),
         ('conv2', [self.conv2, relu, _local_response_normalization]),
         ('pool2', [_max_pooling_2d]),
         ('inception_3a', [self.inc3a]),
         ('inception_3b', [self.inc3b]),
         ('pool3', [_max_pooling_2d]),
         ('inception_4a', [self.inc4a]),
         ('inception_4b', [self.inc4b]),
         ('inception_4c', [self.inc4c]),
         ('inception_4d', [self.inc4d]),
         ('inception_4e', [self.inc4e]),
         ('pool4', [_max_pooling_2d]),
         ('inception_5a', [self.inc5a]),
         ('inception_5b', [self.inc5b]),
         ('pool5', [_average_pooling_2d_k7]),
         ('loss3_fc', [_dropout, self.loss3_fc]),
         ('prob', [softmax]),
         # Since usually the following outputs are not used, they are put
         # after 'prob' to be skipped for efficiency.
         ('loss1_fc2', [
             _average_pooling_2d_k5, self.loss1_conv, relu, self.loss1_fc1,
             relu, self.loss1_fc2
         ]),
         ('loss2_fc2', [
             _average_pooling_2d_k5, self.loss2_conv, relu, self.loss2_fc1,
             relu, self.loss2_fc2
         ])
     ])
Beispiel #24
0
 def test_load_without_path(self):
     target = link.Chain(parent_linear=links.Linear(3, 2))
     npz.load_npz(self.temp_file_path, target, path='')
     numpy.testing.assert_array_equal(
         self.source_parent.parent_linear.W.data,
         target.parent_linear.W.data)
Beispiel #25
0
 def _make_npz(
         caffe_path="./all_data/train_gaze_det/model/model/binary_w.caffemodel",
         npz_path="./all_data/train_gaze_det/model/model/GazeFollow.npz"):
     GazeFollow.convert_caffemodel_to_npz(caffe_path, npz_path)
     npz.load_npz(npz_path, self)
     return self
Beispiel #26
0
    def __init__(self, pretrained_model='auto'):
        if pretrained_model:
            # As a sampling process is time-consuming,
            # we employ a zero initializer for faster computation.
            init = constant.Zero()
            kwargs = {'initialW': init, 'initial_bias': init}
        else:
            # employ default initializers used in the original paper
            kwargs = {
                'initialW': normal.Normal(0.01),
                'initial_bias': constant.Zero(),
            }
        super(VGG16Layers, self).__init__(
            conv1_1=Convolution2D(3, 64, 3, 1, 1, **kwargs),
            conv1_2=Convolution2D(64, 64, 3, 1, 1, **kwargs),
            conv2_1=Convolution2D(64, 128, 3, 1, 1, **kwargs),
            conv2_2=Convolution2D(128, 128, 3, 1, 1, **kwargs),
            conv3_1=Convolution2D(128, 256, 3, 1, 1, **kwargs),
            conv3_2=Convolution2D(256, 256, 3, 1, 1, **kwargs),
            conv3_3=Convolution2D(256, 256, 3, 1, 1, **kwargs),
            conv4_1=Convolution2D(256, 512, 3, 1, 1, **kwargs),
            conv4_2=Convolution2D(512, 512, 3, 1, 1, **kwargs),
            conv4_3=Convolution2D(512, 512, 3, 1, 1, **kwargs),
            conv5_1=Convolution2D(512, 512, 3, 1, 1, **kwargs),
            conv5_2=Convolution2D(512, 512, 3, 1, 1, **kwargs),
            conv5_3=Convolution2D(512, 512, 3, 1, 1, **kwargs),
            fc6=Linear(512 * 7 * 7, 4096, **kwargs),
            fc7=Linear(4096, 4096, **kwargs),
            fc8=Linear(4096, 1000, **kwargs),
        )
        if pretrained_model == 'auto':
            _retrieve(
                'VGG_ILSVRC_16_layers.npz',
                'http://www.robots.ox.ac.uk/%7Evgg/software/very_deep/'
                'caffe/VGG_ILSVRC_16_layers.caffemodel',
                self)
        elif pretrained_model:
            npz.load_npz(pretrained_model, self)

        self.functions = collections.OrderedDict([
            ('conv1_1', [self.conv1_1, relu]),
            ('conv1_2', [self.conv1_2, relu]),
            ('pool1', [_max_pooling_2d]),
            ('conv2_1', [self.conv2_1, relu]),
            ('conv2_2', [self.conv2_2, relu]),
            ('pool2', [_max_pooling_2d]),
            ('conv3_1', [self.conv3_1, relu]),
            ('conv3_2', [self.conv3_2, relu]),
            ('conv3_3', [self.conv3_3, relu]),
            ('pool3', [_max_pooling_2d]),
            ('conv4_1', [self.conv4_1, relu]),
            ('conv4_2', [self.conv4_2, relu]),
            ('conv4_3', [self.conv4_3, relu]),
            ('pool4', [_max_pooling_2d]),
            ('conv5_1', [self.conv5_1, relu]),
            ('conv5_2', [self.conv5_2, relu]),
            ('conv5_3', [self.conv5_3, relu]),
            ('pool5', [_max_pooling_2d]),
            ('fc6', [self.fc6, relu, dropout]),
            ('fc7', [self.fc7, relu, dropout]),
            ('fc8', [self.fc8]),
            ('prob', [softmax]),
        ])
Beispiel #27
0
def _make_npz(path_npz, url, model):
    path_caffemodel = download.cached_download(url)
    print('Now loading caffemodel (usually it may take few minutes)')
    GoogLeNet.convert_caffemodel_to_npz(path_caffemodel, path_npz)
    npz.load_npz(path_npz, model)
    return model
Beispiel #28
0
def main():
    parser = argparse.ArgumentParser(description='Train Encoder')
    parser.add_argument('--batchsize', '-b', type=int, default=64,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch', '-e', type=int, default=100,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--dataset', '-i', default='data/celebA/',
                        help='Directory of image files.')
    parser.add_argument('--out', '-o', default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume', '-r', default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--snapshot_interval', type=int, default=10000,
                        help='Interval of snapshot')
    parser.add_argument('--display_interval', type=int, default=1000,
                        help='Interval of displaying log to console')
    parser.add_argument('--gen', default='gen.npz')
    parser.add_argument('--enc', default=None)
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# batchsize: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    # Set up a neural network to train
    gen = Generator()
    npz.load_npz(args.gen, gen)
    enc = Encoder()
    if args.enc is not None:
        npz.load_npz(args.enc, enc)

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        gen.to_gpu()
        enc.to_gpu()

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0005, beta1=0.9):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001), 'hook_dec')
        return optimizer
    opt_gen = make_optimizer(gen)
    gen.disable_update()
    opt_enc = make_optimizer(enc)

    # Setup a dataset
    all_files = os.listdir(args.dataset)
    image_files = [f for f in all_files if ('png' in f or 'jpg' in f)]
    print('{} contains {} image files'.format(args.dataset, len(image_files)))
    train = CelebADataset(paths=image_files, root=args.dataset)

    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)

    # Set up a trainer
    updater = EncUpdater(
        models=(gen, enc),
        iterator=train_iter,
        optimizer={'gen': opt_gen, 'enc': opt_enc},
        device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    snapshot_interval = (args.snapshot_interval, 'iteration')
    display_interval = (args.display_interval, 'iteration')
    trainer.extend(
        extensions.snapshot(filename='snapshot_enc_iter_{.updater.iteration}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.ExponentialShift(
        'alpha', 0.5, optimizer=opt_enc), trigger=(10, 'epoch'))
    trainer.extend(extensions.snapshot_object(
        enc, 'enc_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
    trainer.extend(extensions.LogReport(trigger=display_interval, log_name='train_enc.log'))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'enc/loss',
    ]), trigger=display_interval)
    trainer.extend(extensions.PlotReport(
        ['enc/loss'], trigger=display_interval, file_name='enc-loss.png'))
    trainer.extend(extensions.ProgressBar(update_interval=10))

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()
Beispiel #29
0
def main():
    config = get_config()
    # print("configured as follows:")
    # print(yaml_dump(config))
    while True:
        s = input("ok? (y/n):")
        if s == 'y' or s == 'Y':
            log_config(config, "training start")
            break
        elif s == 'n' or s == 'N':
            destroy_config(config)
            exit(1)
    try:
        try:
            print("mask loading...")
            load_mask_module = import_module(
                config["additional information"]["mask"]["loader"]["module"],
                config["additional information"]["mask"]["loader"]["package"])
            load_mask = getattr(
                load_mask_module,
                config["additional information"]["mask"]["loader"]["function"])
            mask = load_mask(
                **config["additional information"]["mask"]["loader"]["params"])
            print("done.")
            print("mask.shape: {}".format(mask.shape))
        except FileNotFoundError as e:
            raise e

        model_module = import_module(config["model"]["module"],
                                     config["model"]["package"])
        Model = getattr(model_module, config["model"]["class"])
        model = Model(mask=mask, **config["model"]["params"])
        finetune_config = config["additional information"][
            "finetune"] if "finetune" in config[
                "additional information"] else None
        if finetune_config is not None:
            load_npz(path.join(finetune_config["directory"],
                               finetune_config["file"]),
                     model,
                     strict=False)

        try:
            chainer.cuda.get_device_from_id(0).use()
            gpu = 0
            print("transferring model to GPU...")
            model.to_gpu(gpu)
            print("GPU enabled")
        except RuntimeError:
            gpu = -1
            print("GPU disabled")

        dataset_module = import_module(config["dataset"]["module"],
                                       config["dataset"]["package"])
        Dataset = getattr(dataset_module, config["dataset"]["class"])
        train_dataset = Dataset(**config["dataset"]["train"]["params"])
        valid_dataset = Dataset(**config["dataset"]["valid"]["params"])

        train_iterator = Iterator(train_dataset, config["batch"]["train"],
                                  True, True)
        valid_iterator = Iterator(valid_dataset, config["batch"]["valid"],
                                  False, False)

        Optimizer = getattr(chainer.optimizers, config["optimizer"]["class"])
        optimizer = Optimizer(**config["optimizer"]["params"])

        optimizer.setup(model)

        for hook_config in config["optimizer"]["hook"]:
            hook_module = import_module(hook_config["module"],
                                        hook_config["package"])
            Hook = getattr(hook_module, hook_config["class"])
            hook = Hook(**hook_config["params"])
            optimizer.add_hook(hook)

        updater = Updater(train_iterator, optimizer, device=gpu)

        trainer = Trainer(updater, **config["trainer"]["params"])
        trainer.extend(snapshot(),
                       trigger=config["trainer"]["snapshot_interval"])
        trainer.extend(snapshot_object(model,
                                       "model_iter_{.updater.iteration}"),
                       trigger=config["trainer"]["model_interval"])
        trainer.extend(observe_lr(), trigger=config["trainer"]["log_interval"])
        trainer.extend(
            LogReport(
                ["epoch", "iteration", "main/loss", "validation/main/loss"],
                trigger=config["trainer"]["log_interval"]))
        trainer.extend(Evaluator(valid_iterator, model, device=gpu),
                       trigger=config["trainer"]["eval_interval"])
        trainer.extend(PrintReport(
            ["epoch", "iteration", "main/loss", "validation/main/loss"]),
                       trigger=config["trainer"]["log_interval"])
        trainer.extend(ProgressBar(update_interval=1))

        if "schedule" in config["additional information"].keys():
            for i, interval_funcs in enumerate(
                    config["additional information"]["schedule"].items()):
                interval, funcs = interval_funcs
                f = lambda trainer, funcs=funcs: [
                    trainer.updater.get_optimizer('main').target.
                    __getattribute__(func["function"])(*func["params"])
                    for func in funcs
                ]
                trainer.extend(f,
                               name="schedule_{}".format(i),
                               trigger=ManualScheduleTrigger(*interval))
        trainer.run()
        log_config(config, "succeeded")

    except Exception as e:
        log_config(config, "unintentional termination")
        raise e
Beispiel #30
0
    def __init__(self, pretrained_model='auto'):
        if pretrained_model:
            # As a sampling process is time-consuming,
            # we employ a zero initializer for faster computation.
            kwargs = {'initialW': constant.Zero()}
        else:
            # employ default initializers used in BVLC. For more detail, see
            # https://github.com/pfnet/chainer/pull/2424#discussion_r109642209
            kwargs = {'initialW': uniform.LeCunUniform(scale=1.0)}
        super(GoogLeNet, self).__init__(
            conv1=Convolution2D(3, 64, 7, stride=2, pad=3, **kwargs),
            conv2_reduce=Convolution2D(64, 64, 1, **kwargs),
            conv2=Convolution2D(64, 192, 3, stride=1, pad=1, **kwargs),
            inc3a=Inception(192, 64, 96, 128, 16, 32, 32),
            inc3b=Inception(256, 128, 128, 192, 32, 96, 64),
            inc4a=Inception(480, 192, 96, 208, 16, 48, 64),
            inc4b=Inception(512, 160, 112, 224, 24, 64, 64),
            inc4c=Inception(512, 128, 128, 256, 24, 64, 64),
            inc4d=Inception(512, 112, 144, 288, 32, 64, 64),
            inc4e=Inception(528, 256, 160, 320, 32, 128, 128),
            inc5a=Inception(832, 256, 160, 320, 32, 128, 128),
            inc5b=Inception(832, 384, 192, 384, 48, 128, 128),
            loss3_fc=Linear(1024, 1000, **kwargs),

            loss1_conv=Convolution2D(512, 128, 1, **kwargs),
            loss1_fc1=Linear(2048, 1024, **kwargs),
            loss1_fc2=Linear(1024, 1000, **kwargs),

            loss2_conv=Convolution2D(528, 128, 1, **kwargs),
            loss2_fc1=Linear(2048, 1024, **kwargs),
            loss2_fc2=Linear(1024, 1000, **kwargs)
        )
        if pretrained_model == 'auto':
            _retrieve(
                'bvlc_googlenet.npz',
                'http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel',
                self)
        elif pretrained_model:
            npz.load_npz(pretrained_model, self)
        self.functions = collections.OrderedDict([
            ('conv1', [self.conv1, relu]),
            ('pool1', [_max_pooling_2d, _local_response_normalization]),
            ('conv2_reduce', [self.conv2_reduce, relu]),
            ('conv2', [self.conv2, relu, _local_response_normalization]),
            ('pool2', [_max_pooling_2d]),
            ('inception_3a', [self.inc3a]),
            ('inception_3b', [self.inc3b]),
            ('pool3', [_max_pooling_2d]),
            ('inception_4a', [self.inc4a]),
            ('inception_4b', [self.inc4b]),
            ('inception_4c', [self.inc4c]),
            ('inception_4d', [self.inc4d]),
            ('inception_4e', [self.inc4e]),
            ('pool4', [_max_pooling_2d]),
            ('inception_5a', [self.inc5a]),
            ('inception_5b', [self.inc5b]),
            ('pool5', [_average_pooling_2d_k7]),
            ('loss3_fc', [_dropout, self.loss3_fc]),
            ('prob', [softmax]),
            # Since usually the following outputs are not used, they are put
            # after 'prob' to be skipped for efficiency.
            ('loss1_fc2', [_average_pooling_2d_k5, self.loss1_conv, relu,
                           self.loss1_fc1, relu, self.loss1_fc2]),
            ('loss2_fc2', [_average_pooling_2d_k5, self.loss2_conv, relu,
                           self.loss2_fc1, relu, self.loss2_fc2])
        ])
Beispiel #31
0
            self.conv3 = L.Convolution2D(None, 384, 3, pad=1)
            self.conv4 = L.Convolution2D(None, 384, 3, pad=1)
            self.conv5 = L.Convolution2D(None, 256, 3, pad=1)
            self.fc6 = L.Linear(None, 4096)
            self.fc7 = L.Linear(None, 4096)
            self.fc8 = L.Linear(None, nb_class)

    def __call__(self, x, t):
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv1(x))),
                             3,
                             stride=2)
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv2(h))),
                             3,
                             stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)))
        h = F.dropout(F.relu(self.fc7(h)))
        h = self.fc8(h)
        return h


if __name__ == "__main__":
    caffemodel = CaffeFunction("bvlc_alexnet.caffemodel")
    npz.save_npz("alexnet.npz", caffemodel, compression=False)
    alexnet = Alex()
    npz.load_npz("alexnet.npz", alexnet)
def main():
    attr_columns = [
        '5_o_Clock_Shadow', 'Arched_Eyebrows', 'Attractive', 'Bags_Under_Eyes',
        'Bald', 'Bangs', 'Big_Lips', 'Big_Nose', 'Black_Hair', 'Blond_Hair',
        'Blurry', 'Brown_Hair', 'Bushy_Eyebrows', 'Chubby', 'Double_Chin',
        'Eyeglasses', 'Goatee', 'Gray_Hair', 'Heavy_Makeup', 'High_Cheekbones',
        'Male', 'Mouth_Slightly_Open', 'Mustache', 'Narrow_Eyes', 'No_Beard',
        'Oval_Face', 'Pale_Skin', 'Pointy_Nose', 'Receding_Hairline',
        'Rosy_Cheeks', 'Sideburns', 'Smiling', 'Straight_Hair', 'Wavy_Hair',
        'Wearing_Earrings', 'Wearing_Hat', 'Wearing_Lipstick',
        'Wearing_Necklace', 'Wearing_Necktie', 'Young'
    ]

    parser = argparse.ArgumentParser(description='Get Attribute Vector')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=512,
                        help='Number of images in each mini-batch')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--dataset',
                        '-i',
                        default='data/celebA/',
                        help='Directory of image files.')
    parser.add_argument('--attr_list',
                        '-a',
                        default='data/list_attr_celeba.txt')
    parser.add_argument('--get_attr',
                        default='all',
                        nargs='+',
                        choices=attr_columns + ['all'])
    parser.add_argument('--outfile', '-o', default='attr_vec.json')
    parser.add_argument('--enc', default='pre-trained/enc_iter_310000.npz')
    args = parser.parse_args()

    enc = Encoder()
    npz.load_npz(args.enc, enc)
    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        enc.to_gpu()

    all_files = os.listdir(args.dataset)
    image_files = [f for f in all_files if ('png' in f or 'jpg' in f)]

    vectors = {}
    attr_df = pd.read_csv(args.attr_list, delim_whitespace=True, header=1)
    if args.get_attr == 'all':
        args.get_attr = attr_columns
    for attr_name in tqdm(list(set(args.get_attr) & set(attr_df.columns))):
        with_attr_files = attr_df[attr_df[attr_name] == 1].index.tolist()
        with_attr_files = list(set(with_attr_files) & set(image_files))
        with_attr_vec = get_vector(enc, with_attr_files, args)

        without_attr_files = attr_df[attr_df[attr_name] != 1].index.tolist()
        without_attr_files = list(set(without_attr_files) & set(image_files))
        without_attr_vec = get_vector(enc, without_attr_files, args)

        vectors[attr_name] = (with_attr_vec - without_attr_vec).tolist()

    with open(args.outfile, 'w') as f:
        f.write(
            json.dumps(vectors,
                       indent=4,
                       sort_keys=True,
                       separators=(',', ': ')))
Beispiel #33
0
def main(id):
    model_path = "/efs/fMRI_AE/SimpleFCAE_E32D32/model/model_iter_108858"

    gpu = 0
    get_device_from_id(gpu).use()
    """NibDataset
    def __init__(self, directory: str, crop: list):
    """
    crop = [[9, 81], [11, 99], [0, 80]]
    test_dataset = NibDataset("/data/test", crop=crop)

    mask = load_mask_nib("/data/mask/average_optthr.nii", crop)
    """SimpleFCAE_E32D32
    def __init__(self, mask, r: int, in_mask: str, out_mask: str):
    """
    model = Model(mask, 2, "mask", "mask")
    load_npz(model_path, model)
    model.to_gpu()

    # feature_idx = 0
    # feature_idx = (0, 4, 5, 5) # == [0, 9/2, 11/2, 10/2]
    # feature_idx = (0, 1, 1, 1)
    feature_idx = (0, 2, 7, 4)
    resample_size = 100
    batch_size = 10
    noise_level = 0.2

    for i in range(len(test_dataset)):
        if i % 8 != id:
            continue
        print("{:4}/{:4}".format(i, len(test_dataset)))
        subject = test_dataset.get_subject(i)
        frame = test_dataset.get_frame(i)
        test_img = xp.asarray(test_dataset[i])

        resample_remain = resample_size
        resample_processed = 0
        ret = xp.zeros(test_img.shape)
        while resample_remain > 0:
            batch_size_this_loop = min(batch_size, resample_remain)
            resample_remain -= batch_size_this_loop

            batch = xp.broadcast_to(
                test_img, chain((batch_size_this_loop, ), test_img.shape))
            sigma = noise_level / (xp.max(test_img) - xp.min(test_img))
            batch += sigma * xp.random.randn(*batch.shape)

            x = Variable(batch)

            feature = model.extract(x)
            assert feature.shape == (batch_size, 1, 9, 11, 10)
            feature = F.sum(feature, axis=0)
            assert feature.shape == (1, 9, 11, 10)
            feature = F.get_item(feature, feature_idx)
            feature.backward()
            grad = xp.mean(x.grad, axis=0)
            ret = (ret * resample_processed + grad * batch_size_this_loop) / (
                resample_processed + batch_size_this_loop)
            model.cleargrads()

        xp.save(
            "/efs/fMRI_AE/SimpleFCAE_E32D32/grad/sensitivity_map_feature_{}_{}_{}_subject{:03d}_frame{:03d}"
            .format(feature_idx[1], feature_idx[2], feature_idx[3], subject,
                    frame), ret)
Beispiel #34
0
    def __init__(self, pretrained_model='auto'):
        if pretrained_model:
            # As a sampling process is time-consuming,
            # we employ a zero initializer for faster computation.
            init = constant.Zero()
            kwargs = {'initialW': init, 'initial_bias': init}
        else:
            # employ default initializers used in the original paper
            kwargs = {
                'initialW': normal.Normal(0.01),
                'initial_bias': constant.Zero(),
            }
        super(VGG16Layers, self).__init__()

        with self.init_scope():
            self.conv1_1 = Convolution2D(3, 64, 3, 1, 1, **kwargs)
            self.conv1_2 = Convolution2D(64, 64, 3, 1, 1, **kwargs)
            self.conv2_1 = Convolution2D(64, 128, 3, 1, 1, **kwargs)
            self.conv2_2 = Convolution2D(128, 128, 3, 1, 1, **kwargs)
            self.conv3_1 = Convolution2D(128, 256, 3, 1, 1, **kwargs)
            self.conv3_2 = Convolution2D(256, 256, 3, 1, 1, **kwargs)
            self.conv3_3 = Convolution2D(256, 256, 3, 1, 1, **kwargs)
            self.conv4_1 = Convolution2D(256, 512, 3, 1, 1, **kwargs)
            self.conv4_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv4_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv5_1 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv5_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv5_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.fc6 = Linear(512 * 7 * 7, 4096, **kwargs)
            self.fc7 = Linear(4096, 4096, **kwargs)
            self.fc8 = Linear(4096, 1000, **kwargs)

        if pretrained_model == 'auto':
            _retrieve(
                'VGG_ILSVRC_16_layers.npz',
                'http://www.robots.ox.ac.uk/%7Evgg/software/very_deep/'
                'caffe/VGG_ILSVRC_16_layers.caffemodel', self)
        elif pretrained_model:
            npz.load_npz(pretrained_model, self)

        self.functions = collections.OrderedDict([
            ('conv1_1', [self.conv1_1, relu]),
            ('conv1_2', [self.conv1_2, relu]),
            ('pool1', [_max_pooling_2d]),
            ('conv2_1', [self.conv2_1, relu]),
            ('conv2_2', [self.conv2_2, relu]),
            ('pool2', [_max_pooling_2d]),
            ('conv3_1', [self.conv3_1, relu]),
            ('conv3_2', [self.conv3_2, relu]),
            ('conv3_3', [self.conv3_3, relu]),
            ('pool3', [_max_pooling_2d]),
            ('conv4_1', [self.conv4_1, relu]),
            ('conv4_2', [self.conv4_2, relu]),
            ('conv4_3', [self.conv4_3, relu]),
            ('pool4', [_max_pooling_2d]),
            ('conv5_1', [self.conv5_1, relu]),
            ('conv5_2', [self.conv5_2, relu]),
            ('conv5_3', [self.conv5_3, relu]),
            ('pool5', [_max_pooling_2d]),
            ('fc6', [self.fc6, relu, dropout]),
            ('fc7', [self.fc7, relu, dropout]),
            ('fc8', [self.fc8]),
            ('prob', [softmax]),
        ])
Beispiel #35
0
 def test_load_with_path(self):
     target = link.Chain(child_linear=links.Linear(2, 3))
     npz.load_npz(self.temp_file_path, target, 'child/')
     numpy.testing.assert_array_equal(
         self.source_child.child_linear.W.data, target.child_linear.W.data)
Beispiel #36
0
    def __init__(self,
                 pretrained_model='auto',
                 n_channels=3,
                 n_outputs=101,
                 mean_path='datasets/models/mean2.npz'):
        super(C3DVersion1UCF101, self).__init__()
        if pretrained_model:
            # As a sampling process is time-consuming,
            # we employ a zero initializer for faster computation.
            init = constant.Zero()
            conv_kwargs = {'initialW': init, 'initial_bias': init}
            fc_kwargs = conv_kwargs
        else:
            # employ default initializers used in the original paper
            conv_kwargs = {
                'initialW': normal.Normal(0.01),
                'initial_bias': constant.Zero(),
            }
            fc_kwargs = {
                'initialW': normal.Normal(0.005),
                'initial_bias': constant.One(),
            }
        with self.init_scope():
            self.conv1a = ConvolutionND(3, n_channels, 64, 3, 1, 1,
                                        **conv_kwargs)
            self.conv2a = ConvolutionND(3, 64, 128, 3, 1, 1, **conv_kwargs)
            self.conv3a = ConvolutionND(3, 128, 256, 3, 1, 1, **conv_kwargs)
            self.conv3b = ConvolutionND(3, 256, 256, 3, 1, 1, **conv_kwargs)
            self.conv4a = ConvolutionND(3, 256, 512, 3, 1, 1, **conv_kwargs)
            self.conv4b = ConvolutionND(3, 512, 512, 3, 1, 1, **conv_kwargs)
            self.conv5a = ConvolutionND(3, 512, 512, 3, 1, 1, **conv_kwargs)
            self.conv5b = ConvolutionND(3, 512, 512, 3, 1, 1, **conv_kwargs)
            self.fc6 = Linear(512 * 4 * 4, 4096, **fc_kwargs)
            self.fc7 = Linear(4096, 4096, **fc_kwargs)
            self.fc8 = Linear(4096, n_outputs, **fc_kwargs)
        if pretrained_model == 'auto':
            _retrieve(
                'conv3d_deepnetA_ucf.npz', 'http://vlg.cs.dartmouth.edu/c3d/'
                'c3d_ucf101_finetune_whole_iter_20000', self)
        elif pretrained_model:
            npz.load_npz(pretrained_model, self)

        self.pre = ConvolutionND(3,
                                 n_channels,
                                 n_channels,
                                 1,
                                 1,
                                 0,
                                 nobias=True,
                                 **conv_kwargs)
        self.pre.W.data[:] = 0
        self.pre.W.data[[0, 1, 2], [2, 1, 0]] = 128
        # self.pre.b.data[:] = 128 - numpy.array([90.25164795, 97.65701294, 101.4083252])
        self.mean = Bias(shape=(3, 16, 112, 112))
        mean = numpy.load(mean_path)['mean']
        self.mean.b.data[:] = 128 - mean[:, :, 8:8 + 112, 8:8 + 112]
        self.functions = collections.OrderedDict([
            ('pre', [self.pre, _resize, self.mean]),
            ('conv1a', [self.conv1a, relu]),
            ('pool1', [_max_pooling_2d]),
            ('conv2a', [self.conv2a, relu]),
            ('pool2', [_max_pooling_3d]),
            ('conv3a', [self.conv3a, relu]),
            ('conv3b', [self.conv3b, relu]),
            ('pool3', [_max_pooling_3d]),
            ('conv4a', [self.conv4a, relu]),
            ('conv4b', [self.conv4b, relu]),
            ('pool4', [_max_pooling_3d]),
            ('conv5a', [self.conv5a, relu]),
            ('conv5b', [self.conv5b, relu]),
            ('pool5', [_max_pooling_3d, dropout]),
            ('fc6', [self.fc6, relu, dropout]),
            ('fc7', [self.fc7, relu, dropout]),
            ('fc8', [self.fc8]),
            ('prob', [softmax]),
        ])
Beispiel #37
0
def _retrieve(name, url, model):
    root = download.get_dataset_directory('pfnet/chainer/models/')
    path = os.path.join(root, name)
    return download.cache_or_load_file(
        path, lambda path: _make_npz(path, url, model),
        lambda path: npz.load_npz(path, model))
Beispiel #38
0
def main():
    experiment_name = "Stacked_16_16_16_16"
    snapshot_name = "snapshot_iter_27215"
    config_path = "/efs/fMRI_AE/{}/log/config.yml".format(experiment_name)
    config = load_config(config_path)
    config["additional information"]["mask"]["loader"]["params"][
        "mask_path"] = path.join(
            config["additional information"]["mask"]["directory"],
            config["additional information"]["mask"]["file"])
    config["additional information"]["mask"]["loader"]["params"][
        "crop"] = config["additional information"]["crop"]
    snapshot_path = "/efs/fMRI_AE/{}/model/{}".format(experiment_name,
                                                      snapshot_name)
    # print("configured as follows:")
    # print(yaml_dump(config))
    while True:
        s = input("ok? (y/n):")
        if s == 'y' or s == 'Y':
            log_config(config, "training start")
            break
        elif s == 'n' or s == 'N':
            destroy_config(config)
            exit(1)
    try:
        try:
            print("mask loading...")
            load_mask_module = import_module(
                config["additional information"]["mask"]["loader"]["module"],
                config["additional information"]["mask"]["loader"]["package"])
            load_mask = getattr(
                load_mask_module,
                config["additional information"]["mask"]["loader"]["function"])
            mask = load_mask(
                **config["additional information"]["mask"]["loader"]["params"])
            print("done.")
            print("mask.shape: {}".format(mask.shape))
        except FileNotFoundError as e:
            raise e

        model_module = import_module(config["model"]["module"],
                                     config["model"]["package"])
        Model = getattr(model_module, config["model"]["class"])
        model = Model(mask=mask, **config["model"]["params"])
        finetune_config = config["additional information"]["finetune"]
        if finetune_config is not None:
            load_npz(path.join(finetune_config["directory"],
                               finetune_config["file"]),
                     model,
                     strict=False)

        try:
            chainer.cuda.get_device_from_id(0).use()
            gpu = 0
            print("transferring model to GPU...")
            model.to_gpu(gpu)
            print("GPU enabled")
        except RuntimeError:
            gpu = -1
            print("GPU disabled")

        dataset_module = import_module(config["dataset"]["module"],
                                       config["dataset"]["package"])
        Dataset = getattr(dataset_module, config["dataset"]["class"])
        train_dataset = Dataset(**config["dataset"]["train"]["params"])
        valid_dataset = Dataset(**config["dataset"]["valid"]["params"])

        train_iterator = Iterator(train_dataset, config["batch"]["train"],
                                  True, True)
        valid_iterator = Iterator(valid_dataset, config["batch"]["valid"],
                                  False, False)

        Optimizer = getattr(chainer.optimizers, config["optimizer"]["class"])
        optimizer = Optimizer(**config["optimizer"]["params"])

        optimizer.setup(model)

        for hook_config in config["optimizer"]["hook"]:
            hook_module = import_module(hook_config["module"],
                                        hook_config["package"])
            Hook = getattr(hook_module, hook_config["class"])
            hook = Hook(**hook_config["params"])
            optimizer.add_hook(hook)

        updater = Updater(train_iterator, optimizer, device=gpu)

        trainer = Trainer(updater, **config["trainer"]["params"])
        trainer.extend(snapshot(),
                       trigger=config["trainer"]["snapshot_interval"])
        trainer.extend(snapshot_object(model,
                                       "model_iter_{.updater.iteration}"),
                       trigger=config["trainer"]["model_interval"])
        trainer.extend(observe_lr(), trigger=config["trainer"]["log_interval"])
        trainer.extend(
            LogReport([
                "epoch", "iteration", "main/loss", "main/pca_loss",
                "main/reconstruction_loss", "validation/main/loss"
            ],
                      trigger=config["trainer"]["log_interval"]))
        trainer.extend(Evaluator(valid_iterator, model, device=gpu),
                       trigger=config["trainer"]["eval_interval"])
        trainer.extend(PrintReport([
            "epoch", "iteration", "main/loss", "main/pca_loss",
            "main/reconstruction_loss", "validation/main/loss"
        ]),
                       trigger=config["trainer"]["log_interval"])
        trainer.extend(ProgressBar(update_interval=1))

        if "schedule" in config["additional information"].keys():
            for i, interval_funcs in enumerate(
                    config["additional information"]["schedule"].items()):
                interval, funcs = interval_funcs
                f = lambda trainer, funcs=funcs: [
                    trainer.updater.get_optimizer('main').target.
                    __getattribute__(func["function"])(*func["params"])
                    for func in funcs
                ]
                trainer.extend(f,
                               name="schedule_{}".format(i),
                               trigger=ManualScheduleTrigger(*interval))

        load_npz(snapshot_path, trainer)
        target = trainer.updater.get_optimizer("main").target
        target.reset_pca()
        target.attach_pca()
        ipca_param = np.load(
            "/efs/fMRI_AE/Stacked_8_8_8_8_feature/ipca_mean_7920_components_990_7920.npz"
        )
        target.pca.W = chainer.Parameter(ipca_param["components"])
        target.pca.bias = chainer.Parameter(ipca_param["mean"])
        target.pca.disable_update()
        target.pca.to_gpu(gpu)
        target.detach_pca_loss()
        target.attach_reconstruction_loss()
        target.release_decoder()
        target.freeze_encoder()

        trainer.run()
        log_config(config, "succeeded")

    except Exception as e:
        log_config(config, "unintentional termination")
        raise e