Exemple #1
0
    def load_model(self):
        # model_file = osp.join(self.data_dir, 'max_miou.npz')
        model_file = osp.join(self.data_dir, 'max_depth_acc.npz')
        with open(osp.join(self.data_dir, 'model.txt'), 'r') as f:
            model_name = f.readline().rstrip()
        with open(osp.join(self.data_dir, 'n_class.txt'), 'r') as f:
            n_class = int(f.readline().rstrip())
        self.n_class = n_class
        with open(osp.join(self.data_dir, 'num_view.txt'), 'r') as f:
            num_view = int(f.readline().rstrip())
        self.num_view = num_view

        if model_name == 'FCN8sAtOnceInputRGBD':
            self.model = FCN8sAtOnceInputRGBD(n_class=n_class,
                                              masking=True,
                                              concat=True,
                                              no_bp_before_rgb_pool5=False)
        elif model_name == 'FCN8sAtOnceConcatAtOnce':
            self.model = FCN8sAtOnceConcatAtOnce(n_class=n_class,
                                                 masking=True,
                                                 no_bp_before_rgb_pool5=False)

        print('\nLoading trained model:          {0}'.format(model_file))
        S.load_npz(model_file, self.model)
        print('Finished loading trained model: {0}\n'.format(model_file))

        if self.gpu >= 0:
            cuda.get_device_from_id(self.gpu).use()
            self.model.to_gpu()
        if LooseVersion(chainer.__version__) < LooseVersion('2.0.0'):
            self.model.train = False
 def __init__(self):
     super().__init__()
     self.title = 'PyQt5 image - pythonspot.com'
     self.left = 300
     self.top = 300
     self.width = 900
     self.height = 600
     self.gen = network.Generator(depth=depth)
     serializers.load_npz(gen_path, self.gen)
     # file
     self.csv_file = [[random.random() for j in range(csv_len)]]
     self.vec2rand_model = load_model(vec2rand_model_path)
     # gpu using
     if gpu >= 0:
         cuda.get_device_from_id(0).use()
         self.gen.to_gpu()
     self.xp = self.gen.xp
     self.initUI()
     #generating image.
     z = self.xp.random.randn(1, 512, 1, 1).astype('f')
     x = self.gen(z, alpha=1.0)
     x = chainer.cuda.to_cpu(x.data)
     img = x[0].copy()
     utils.save_image(img, 'temp.jpg')
     _img = Image.open('temp.jpg')
     self.img = np.asarray(_img)
     self.initFigure()
     self.show()
Exemple #3
0
def dump_bilm_embeddings(vocab_file,
                         dataset_file,
                         options_file,
                         weight_file,
                         outfile,
                         gpu=-1,
                         batchsize=32):
    with open(options_file, 'r') as fin:
        options = json.load(fin)
    max_word_length = options['char_cnn']['max_characters_per_token']

    vocab = UnicodeCharsVocabulary(vocab_file, max_word_length)
    batcher = Batcher(vocab_file, max_word_length)

    model = Elmo(options_file,
                 weight_file,
                 num_output_representations=1,
                 requires_grad=False,
                 do_layer_norm=False,
                 dropout=0.)
    if gpu >= 0:
        cuda.get_device_from_id(gpu).use()
        model.to_gpu()

    # (batch_size, timesteps, 50)
    # TODO(sosk): preencoding token embedding for acceleration
    with chainer.using_config("train", False), \
            chainer.no_backprop_mode():
        sentence_id = 0
        n_lines = sum([1 for _ in open(dataset_file, 'r')])
        with open(dataset_file, 'r') as fin, h5py.File(outfile, 'w') as fout:
            for minibatch in minibatch_iterator(tqdm.tqdm(fin, total=n_lines),
                                                batchsize):
                sentences = [line.strip().split() for line in minibatch]
                char_ids = batcher.batch_sentences(sentences,
                                                   add_bos_eos=False)
                char_ids = model.xp.asarray(char_ids)
                mb_outs = model.forward(char_ids)
                mb_embedding_layers = mb_outs['elmo_layers']
                # [(batch_size, max_sequence_length, embedding_dim), ..., x n_layers]
                # Note that embedding layers have already trushed bos & eos
                # But they contains padding
                mb_mask = mb_outs['mask']
                mb_concat_embedding_layers = cuda.to_cpu(
                    model.xp.stack(
                        [mb_emb.array for mb_emb in mb_embedding_layers],
                        axis=1))
                # (batch_size, n_layers=3, max_sequence_length, embedding_dim)
                for mask, concat_embedding_layers in zip(
                        mb_mask, mb_concat_embedding_layers):
                    # remove pads
                    length = int(mask.sum())
                    concat_embedding_layers = concat_embedding_layers[:, :
                                                                      length]
                    # (n_layers=3, sequence_length, embedding_dim)
                    ds = fout.create_dataset('{}'.format(sentence_id),
                                             concat_embedding_layers.shape,
                                             dtype='float32',
                                             data=concat_embedding_layers)
                    sentence_id += 1
Exemple #4
0
    def __call__(self, x, test=False, finetune=False):
        """Invokes the forward propagation of BatchNormalization.

        BatchNormalization accepts additional arguments, which controls three
        different running mode.

        Args:
            x (Variable): Input variable.
            test (bool): If ``True``, BatchNormalization runs in testing mode;
                it normalizes the input using pre-computed statistics.
            finetune (bool): If ``finetune`` is ``True`` and ``test`` is
                ``False``, BatchNormalization runs in fine-tuning mode; it
                accumulates the input array to compute population statistics
                for normalization, and normalizes the input using batch
                statistics.

        If ``test`` is ``False``, then BatchNormalization runs in training
        mode; it computes moving averages of mean and variance for evaluation
        during training, and normalizes the input using batch statistics.

        """
        if hasattr(self, 'gamma'):
            gamma = self.gamma
        else:
            with cuda.get_device_from_id(self._device_id):
                gamma = variable.Variable(self.xp.ones(
                    self.avg_mean.shape, dtype=x.dtype), volatile='auto')
        if hasattr(self, 'beta'):
            beta = self.beta
        else:
            with cuda.get_device_from_id(self._device_id):
                beta = variable.Variable(self.xp.zeros(
                    self.avg_mean.shape, dtype=x.dtype), volatile='auto')

        # Var is always ones
        with cuda.get_device_from_id(self._device_id):
            self.one_var = self.xp.ones(self.avg_mean.shape, dtype=x.dtype)

        if not test:
            if finetune:
                self.N += 1
                decay = 1. - 1. / self.N
            else:
                decay = self.decay

            func = batch_normalization.BatchNormalizationFunction(
                self.eps, self.avg_mean, self.avg_var, True, decay,
                self.use_cudnn)
            ret = func(x, gamma, beta)

            self.avg_mean[:] = func.running_mean
            self.avg_var[:] = func.running_var
        else:
            # Use running average statistics or fine-tuned statistics.
            mean = variable.Variable(self.avg_mean, volatile='auto')
            #var = variable.Variable(self.avg_var, volatile='auto')
            var = variable.Variable(self.one_var, volatile='auto')
            ret = batch_normalization.fixed_batch_normalization(
                x, gamma, beta, mean, var, self.eps, self.use_cudnn)
        return ret
    def load_model(self):
        n_class = len(self.train_dataset.class_names)
        if self.model_name == 'fcn32s':
            self.model = fcn.models.FCN32s(n_class=n_class)
            vgg = fcn.models.VGG16()
            vgg_path = vgg.download()
            S.load_npz(vgg_path, vgg)
            self.model.init_from_vgg16(vgg)
        elif self.model_name == 'fcn16s':
            self.model = fcn.models.FCN16s(n_class=n_class)
            fcn32s = fcn.models.FCN32s()
            fcn32s_path = fcn32s.download()
            S.load_npz(fcn32s_path, fcn32s)
            self.model.init_from_fcn32s(fcn32s_path, fcn32s)
        elif self.model_name == 'fcn8s':
            self.model = fcn.models.FCN8s(n_class=n_class)
            fcn16s = fcn.models.FCN16s()
            fcn16s_path = fcn16s.download()
            S.load_npz(fcn16s_path, fcn16s)
            self.model.init_from_fcn16s(fcn16s_path, fcn16s)
        elif self.model_name == 'fcn8s_at_once':
            self.model = fcn.models.FCN8sAtOnce(n_class=n_class)
            vgg = fcn.models.VGG16()
            vgg_path = vgg.download()
            S.load_npz(vgg_path, vgg)
            self.model.init_from_vgg16(vgg)
        else:
            raise ValueError(
                'Unsupported model_name: {}'.format(self.model_name))

        if self.gpu >= 0:
            cuda.get_device_from_id(self.gpu).use()
            self.model.to_gpu()
Exemple #6
0
def generate():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', '-g', type=int, default=-1)
    parser.add_argument('--gen', type=str, default=None)
    parser.add_argument('--depth', '-d', type=int, default=0)
    parser.add_argument('--out', '-o', type=str, default='img/')
    parser.add_argument('--num', '-n', type=int, default=10)
    args = parser.parse_args()

    gen = network.Generator(depth=args.depth)
    print('loading generator model from ' + args.gen)
    serializers.load_npz(args.gen, gen)

    if args.gpu >= 0:
        cuda.get_device_from_id(0).use()
        gen.to_gpu()

    xp = gen.xp

    z1 = gen.z(1)
    z2 = gen.z(1)

    for i in range(args.num):
        print(i)
        p = i / (args.num - 1)
        z = z1 * p + z2 * (1 - p)
        x = gen(z, alpha=1.0)
        x = chainer.cuda.to_cpu(x.data)

        img = x[0].copy()
        filename = os.path.join(args.out, 'gen_%04d.png' % i)
        utils.save_image(img, filename)
Exemple #7
0
def test():

    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', '-g', type=int, default=-1)
    parser.add_argument('--model', '-m', type=str, default=None)
    parser.add_argument('--id', '-i', type=int, default=0)
    parser.add_argument('--inf', type=int, default=10)
    parser.add_argument('--outf', type=int, default=3)
    args = parser.parse_args()

    test = dataset.UCSDped1Dataset(0, 200, args.inf, args.outf,
                                   "./ucsd_ped1_test.npy")

    model = convlstm.Model(n_input=2, size=[128, 64, 64])

    if args.model != None:
        print("loading model from " + args.model)
        serializers.load_npz(args.model, model)

    x, t = test[args.id]

    x = np.expand_dims(x, 0)
    t = np.expand_dims(t, 0)

    if args.gpu >= 0:
        cuda.get_device_from_id(0).use()
        model.to_gpu()
        x = cuda.cupy.array(x)
        t = cuda.cupy.array(t)

    print(x.shape)
    print(t.shape)

    res = model(Variable(x), Variable(t))
    print(res)
Exemple #8
0
    def __call__(self, c, h, x):
        """Returns new cell state and updated output of LSTM.

        Args:
            c (~chainer.Variable): Cell states of LSTM units.
            h (~chainer.Variable): Output at the previous time step.
            x (~chainer.Variable): A new batch from the input sequence.

        Returns:
            tuple of ~chainer.Variable: Returns ``(c_new, h_new)``, where
            ``c_new`` represents new cell state, and ``h_new`` is updated
            output of LSTM units.

        """
        if self.upward.W.data is None:
            in_size = x.size // x.shape[0]
            with cuda.get_device_from_id(self._device_id):
                self.upward._initialize_params(in_size)
                self._initialize_params()

        lstm_in = self.upward(x)
        if h is not None:
            lstm_in += self.lateral(h)
        if c is None:
            xp = self.xp
            with cuda.get_device_from_id(self._device_id):
                c = variable.Variable(
                    xp.zeros((x.shape[0], self.state_size), dtype=x.dtype))
        return lstm.lstm(c, lstm_in)
Exemple #9
0
    def __init__(self,
                 arch=None,
                 weights_file=None,
                 model=None,
                 device=-1,
                 precise=False):

        self.arch = arch
        self.precise = precise
        if model is not None:
            self.model = model
        else:
            print('[INFO] [DetectHuman]: LOADING POSE MODEL', flush=True)
            self.model = params['archs'][arch]()

            if weights_file:
                serializers.load_npz(weights_file, self.model)

        self.device = device
        if self.device >= 0:
            cuda.get_device_from_id(device).use()
            self.model.to_gpu()

            # create gaussian filter
            self.gaussian_kernel = self.create_gaussian_kernel(
                params['gaussian_sigma'], params['ksize'])[None, None]
            self.gaussian_kernel = cuda.to_gpu(self.gaussian_kernel)
    def set_cuda(self, gpu_id=0, deterministic=False, benchmark=True):
        """Set model to the GPU version.
        Args:
            gpu_id (int)
            deterministic (bool, optional):
            benchmark (bool, optional): not used
        """
        if self.use_cuda:
            chainer.config.type_check = False

            if deterministic:
                chainer.config.cudnn_deterministic = True
                logger.info('GPU deterministic mode (no cudnn)')
            else:
                chainer.config.cudnn_deterministic = False
                logger.info('GPU mode')
            cuda.get_device_from_id(gpu_id).use()
            self.to_gpu()

            if not chainer.cuda.available:
                raise ValueError('cuda is not available')
            if not chainer.cuda.cudnn_enabled:
                raise ValueError('cudnn is not available')
        else:
            logger.info('CPU mode')
Exemple #11
0
    def __call__(self, orig_img):
        orig_img = orig_img.copy()
        if self.precise:
            return self.detect_precise(orig_img)
        orig_img_h, orig_img_w, _ = orig_img.shape

        input_w, input_h = self.compute_optimal_size(orig_img, params['inference_img_size'])
        map_w, map_h = self.compute_optimal_size(orig_img, params['heatmap_size'])

        resized_image = cv2.resize(orig_img, (input_w, input_h))
        x_data = self.preprocess(resized_image)

        if self.device >= 0:
            x_data = cuda.to_gpu(x_data)

        h1s, h2s = self.model(x_data)

        pafs = F.resize_images(h1s[-1], (map_h, map_w)).data[0]
        heatmaps = F.resize_images(h2s[-1], (map_h, map_w)).data[0]

        if self.device >= 0:
            pafs = pafs.get()
            cuda.get_device_from_id(self.device).synchronize()

        all_peaks = self.compute_peaks_from_heatmaps(heatmaps)
        if len(all_peaks) == 0:
            return np.empty((0, len(JointType), 3)), np.empty(0)
        all_connections = self.compute_connections(pafs, all_peaks, map_w, params)
        subsets = self.grouping_key_points(all_connections, all_peaks, params)
        all_peaks[:, 1] *= orig_img_w / map_w
        all_peaks[:, 2] *= orig_img_h / map_h
        poses = self.subsets_to_pose_array(subsets, all_peaks)
        scores = subsets[:, -2]
        return poses, scores
Exemple #12
0
    def __call__(self, x, gamma_=None, beta_=None):
        if hasattr(self, 'gamma'):
            gamma = self.gamma
        elif gamma_ is not None:
            gamma = gamma_
        else:
            with cuda.get_device_from_id(self._device_id):
                gamma = variable.Variable(
                    self.xp.ones(self.avg_mean.shape, dtype=x.dtype))
        if hasattr(self, 'beta'):
            beta = self.beta
        elif beta_ is not None:
            beta = beta_
        else:
            with cuda.get_device_from_id(self._device_id):
                beta = variable.Variable(
                    self.xp.zeros(self.avg_mean.shape, dtype=x.dtype))

        decay = self.decay
        if (not configuration.config.train) and self.valid_test:
            mean = variable.Variable(self.avg_mean)
            var = variable.Variable(self.avg_var)
            ret = fixed_instance_normalization(x, gamma, beta, mean, var,
                                               self.eps)
        else:
            func = InstanceNormalizationFunction(self.eps, self.avg_mean,
                                                 self.avg_var, decay)
            ret = func(x, gamma, beta)
            self.avg_mean = func.running_mean
            self.avg_var = func.running_var

        return ret
Exemple #13
0
    def load_model(self):
        # model_file = osp.join(self.data_dir, 'max_miou.npz')
        model_file = osp.join(self.data_dir, 'max_depth_acc.npz')
        with open(osp.join(self.data_dir, 'model.txt'), 'r') as f:
            model_name = f.readline().rstrip()
        with open(osp.join(self.data_dir, 'n_class.txt'), 'r') as f:
            n_class = int(f.readline().rstrip())
        self.n_class = n_class
        with open(osp.join(self.data_dir, 'num_view.txt'), 'r') as f:
            num_view = int(f.readline().rstrip())
        self.num_view = num_view

        if model_name == 'FCN8sMultiViewMirrorSegmentationDepthEstimation':
            self.model = FCN8sMultiViewMirrorSegmentationDepthEstimation(
                n_class=n_class, num_view=num_view)

        print('\nLoading trained model:          {0}'.format(model_file))
        S.load_npz(model_file, self.model)
        print('Finished loading trained model: {0}\n'.format(model_file))

        if self.gpu >= 0:
            cuda.get_device_from_id(self.gpu).use()
            self.model.to_gpu()
        if LooseVersion(chainer.__version__) < LooseVersion('2.0.0'):
            self.model.train = False
    def load_model(self):
        n_class = len(self.train_dataset.class_names)
        if self.model_name == 'fcn32s':
            self.model = fcn.models.FCN32s(n_class=n_class)
            vgg = fcn.models.VGG16()
            vgg_path = vgg.download()
            S.load_npz(vgg_path, vgg)
            self.model.init_from_vgg16(vgg)
        elif self.model_name == 'fcn16s':
            self.model = fcn.models.FCN16s(n_class=n_class)
            fcn32s = fcn.models.FCN32s()
            fcn32s_path = fcn32s.download()
            S.load_npz(fcn32s_path, fcn32s)
            self.model.init_from_fcn32s(fcn32s_path, fcn32s)
        elif self.model_name == 'fcn8s':
            self.model = fcn.models.FCN8s(n_class=n_class)
            fcn16s = fcn.models.FCN16s()
            fcn16s_path = fcn16s.download()
            S.load_npz(fcn16s_path, fcn16s)
            self.model.init_from_fcn16s(fcn16s_path, fcn16s)
        elif self.model_name == 'fcn8s_at_once':
            self.model = fcn.models.FCN8sAtOnce(n_class=n_class)
            vgg = fcn.models.VGG16()
            vgg_path = vgg.download()
            S.load_npz(vgg_path, vgg)
            self.model.init_from_vgg16(vgg)
        else:
            raise ValueError(
                'Unsupported model_name: {}'.format(self.model_name))

        if self.gpu >= 0:
            cuda.get_device_from_id(self.gpu).use()
            self.model.to_gpu()
    def __init__(self):
        super(SoleAffordanceSegmentation, self).__init__()

        affordance = rospy.get_param('~affordance')

        pretrained_model, modal, out_channels = get_pretrained_model(
            affordance
        )
        self.out_channels = out_channels

        gpu = rospy.get_param('~gpu', 0)

        model = grasp_fusion.models.FCN8sVGG16Sigmoid(
            out_channels=out_channels,
            pretrained_model=pretrained_model,
            modal=modal,
        )
        if gpu >= 0:
            cuda.get_device_from_id(gpu).use()
            model.to_gpu()

        self.model = model

        self.pub_label = self.advertise('~output/label', Image, queue_size=1)
        self.pub_prob = self.advertise('~output/prob', Image, queue_size=1)
        self.pub_viz = self.advertise('~output/viz', Image, queue_size=1)
Exemple #16
0
def generate():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', '-g', type=int, default=-1)
    parser.add_argument('--sgen', type=str, default=None)
    parser.add_argument('--depth', '-d', type=int, default=5)
    parser.add_argument('--out', '-o', type=str, default='img/')
    parser.add_argument('--num', '-n', type=int, default=100)
    args = parser.parse_args()

    sgen = network.StyleBasedGenerator(depth=args.depth)
    print('loading generator model from ' + args.sgen)
    serializers.load_npz(args.sgen, sgen)

    if args.gpu >= 0:
        cuda.get_device_from_id(0).use()
        sgen.to_gpu()

    xp = sgen.xp

    for i in range(args.num):
        print(i)
        z = sgen.make_latent(1)
        x = sgen(z, alpha=1.0)
        x = chainer.cuda.to_cpu(x.data)

        img = x[0].copy()
        filename = os.path.join(args.out, '%d.png' % (i + 1))
        utils.save_image(img, filename)
    def __call__(self, orig_img):
        orig_img = orig_img.copy()
        if self.precise:
            return self.detect_precise(orig_img)
        orig_img_h, orig_img_w, _ = orig_img.shape

        input_w, input_h = self.compute_optimal_size(orig_img, params['inference_img_size'])
        map_w, map_h = self.compute_optimal_size(orig_img, params['heatmap_size'])

        resized_image = cv2.resize(orig_img, (input_w, input_h))
        x_data = self.preprocess(resized_image)

        if self.device >= 0:
            x_data = cuda.to_gpu(x_data)

        h1s, h2s = self.model(x_data)

        pafs = F.resize_images(h1s[-1], (map_h, map_w)).data[0]
        heatmaps = F.resize_images(h2s[-1], (map_h, map_w)).data[0]

        if self.device >= 0:
            pafs = pafs.get()
            cuda.get_device_from_id(self.device).synchronize()

        all_peaks = self.compute_peaks_from_heatmaps(heatmaps)
        if len(all_peaks) == 0:
            return np.empty((0, len(JointType), 3)), np.empty(0)
        all_connections = self.compute_connections(pafs, all_peaks, map_w, params)
        subsets = self.grouping_key_points(all_connections, all_peaks, params)
        all_peaks[:, 1] *= orig_img_w / map_w
        all_peaks[:, 2] *= orig_img_h / map_h
        poses = self.subsets_to_pose_array(subsets, all_peaks)
        scores = subsets[:, -2]
        return poses, scores
Exemple #18
0
    def __call__(self, x, finetune=False):
        if hasattr(self, 'gamma'):
            gamma = self.gamma
        else:
            with cuda.get_device_from_id(self._device_id):
                gamma = variable.Variable(
                    self.xp.ones(self.avg_mean.shape, dtype=x.dtype))
        if hasattr(self, 'beta'):
            beta = self.beta
        else:
            with cuda.get_device_from_id(self._device_id):
                beta = variable.Variable(
                    self.xp.zeros(self.avg_mean.shape, dtype=x.dtype))

        if chainer.configuration.config.train:
            if finetune:
                self.N += 1
                decay = 1. - 1. / self.N
            else:
                decay = self.decay

            func = MultiNodeBatchNormalizationFunction(self.comm, self.eps,
                                                       self.avg_mean,
                                                       self.avg_var, decay)
            ret = func(x, gamma, beta)

            self.avg_mean[:] = func.running_mean
            self.avg_var[:] = func.running_var
        else:
            # Use running average statistics or fine-tuned statistics.
            mean = variable.Variable(self.avg_mean)
            var = variable.Variable(self.avg_var)
            ret = batch_normalization.fixed_batch_normalization(
                x, gamma, beta, mean, var, self.eps)
        return ret
Exemple #19
0
    def __call__(self, x, finetune=False):
        if hasattr(self, 'gamma'):
            gamma = self.gamma
        else:
            with cuda.get_device_from_id(self._device_id):
                gamma = variable.Variable(self.xp.ones(
                    self.avg_mean.shape, dtype=x.dtype))
        if hasattr(self, 'beta'):
            beta = self.beta
        else:
            with cuda.get_device_from_id(self._device_id):
                beta = variable.Variable(self.xp.zeros(
                    self.avg_mean.shape, dtype=x.dtype))

        if chainer.configuration.config.train:
            if finetune:
                self.N += 1
                decay = 1. - 1. / self.N
            else:
                decay = self.decay

            func = MultiNodeBatchNormalizationFunction(
                self.comm, self.eps, self.avg_mean, self.avg_var, decay)
            ret = func(x, gamma, beta)

            self.avg_mean[:] = func.running_mean
            self.avg_var[:] = func.running_var
        else:
            # Use running average statistics or fine-tuned statistics.
            mean = variable.Variable(self.avg_mean)
            var = variable.Variable(self.avg_var)
            ret = batch_normalization.fixed_batch_normalization(
                x, gamma, beta, mean, var, self.eps)
        return ret
Exemple #20
0
    def __init__(self, arch=None, weights_file=None, model=None, device=-1):
        # test
        # self.model = params['archs']['nn1']()
        # serializers.load_npz('result/nn1/model_iter_50000', self.model)
        print('Loading PoseNet...')
        self.model = params['archs']['posenet']()
        serializers.load_npz('models/coco_posenet.npz', self.model)

        # if model is not None:
        #     self.model = model
        # else:
        #     # load model
        #     print('Loading PoseNet...')
        #     self.model = params['archs'][arch]()
        #     if weights_file:
        #         serializers.load_npz(weights_file, self.model)

        self.device = device
        if self.device >= 0:
            cuda.get_device_from_id(device).use()
            self.model.to_gpu()

            # create gaussian filter
            ksize = params['ksize']
            kernel = cuda.to_gpu(
                self.create_gaussian_kernel(sigma=params['gaussian_sigma'],
                                            ksize=ksize))
            self.gaussian_kernel = kernel
Exemple #21
0
    def __call__(self, c, h, x):
        """Returns new cell state and updated output of LSTM.

        Args:
            c (~chainer.Variable): Cell states of LSTM units.
            h (~chainer.Variable): Output at the previous time step.
            x (~chainer.Variable): A new batch from the input sequence.

        Returns:
            tuple of ~chainer.Variable: Returns ``(c_new, h_new)``, where
            ``c_new`` represents new cell state, and ``h_new`` is updated
            output of LSTM units.

        """
        if self.upward.W.data is None:
            in_size = x.size // x.shape[0]
            with cuda.get_device_from_id(self._device_id):
                self.upward._initialize_params(in_size)
                self._initialize_params()

        lstm_in = self.upward(x)
        if h is not None:
            lstm_in += self.lateral(h)
        if c is None:
            xp = self.xp
            with cuda.get_device_from_id(self._device_id):
                c = variable.Variable(
                    xp.zeros((x.shape[0], self.state_size), dtype=x.dtype))
        return lstm.lstm(c, lstm_in)
Exemple #22
0
def main():
    default_model_file = osp.join(here, 'data/G_horse2zebra_from_pytorch.npz')
    default_out_file = osp.join(here, 'logs/create_horse2zebra.gif')

    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('video_file', help='Video file of horse.')
    parser.add_argument('-g', '--gpu', type=int, default=0, help='GPU id.')
    parser.add_argument('-m',
                        '--model-file',
                        default=default_model_file,
                        help='Model file.')
    parser.add_argument('-o',
                        '--out-file',
                        default=default_out_file,
                        help='Output video file.')
    args = parser.parse_args()

    print('GPU id: {:d}'.format(args.gpu))
    print('Model file: {:s}'.format(args.model_file))
    print('Video file: {:s}'.format(args.video_file))
    print('Output file: {:s}'.format(args.out_file))

    chainer.global_config.train = False
    chainer.global_config.enable_backprop = False

    model = chainer_cyclegan.models.ResnetGenerator()
    chainer.serializers.load_npz(args.model_file, model)

    if args.gpu >= 0:
        cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    batch_size = 1

    video = imageio.get_reader(args.video_file)
    writer = imageio.get_writer(args.out_file)
    for img in tqdm.tqdm(video):
        img_org = img.copy()
        img = cv2.resize(img, (256, 256))

        xi = img.astype(np.float32)
        xi = (xi / 255) * 2 - 1
        xi = xi.transpose(2, 0, 1)
        x = np.repeat(xi[None, :, :, :], batch_size, axis=0)
        if args.gpu >= 0:
            x = cuda.to_gpu(x)

        y = model(x)

        yi = y[0].array
        yi = cuda.to_cpu(yi)
        yi = yi.transpose(1, 2, 0)
        yi = (yi + 1) / 2 * 255
        out = yi.astype(np.uint8)
        out = cv2.resize(out, (img_org.shape[1], img_org.shape[0]))

        writer.append_data(np.hstack([img_org, out]))

    print('Wrote video: {:s}'.format(args.out_file))
Exemple #23
0
    def update_core(self):
        train_iter = self.get_iterator('main')
        optimizer = self.get_optimizer('main')

        batch = train_iter.next()
        t_data = self.converter(batch, self.device)
        B = t_data.shape[0]
        y_data = self.func(B)
        B, C, H, W = t_data.shape[:4]
        y_data = F.broadcast_to(y_data, (B, C, H, W))
        loss = compute_loss(y_data, t_data)

        reporter.report({
            'main/loss': loss,
            'pos/light_x': self.model.data[0],
            'pos/light_y': self.model.data[1],
            'pos/light_z': self.model.data[2]
        })

        y_data = y_data.data
        if self.device >= 0:
            y_data = y_data.get()
            cuda.get_device_from_id(self.device).synchronize()

        img = y_data[0]
        img = np.transpose(img, (1, 2, 0))
        img = np.clip(img, 0, 1)
        img = (img * 255).astype(np.uint8)
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        save_progress_image(self.odir, self.count, img)

        optimizer.target.cleargrads()
        loss.backward()
        optimizer.update()
        self.count += 1
Exemple #24
0
def generate():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', '-g', type=int, default=-1)
    parser.add_argument('--model', '-m', type=str, default=None)
    parser.add_argument('--id', '-i', type=int, default=0)
    parser.add_argument('--inf', type=int, default=10)
    parser.add_argument('--outf', type=int, default=10)
    args = parser.parse_args()

    test = dataset.MovingMnistDataset(0, 10000, args.inf, args.outf)

    model = network.MovingMnistNetwork(sz=[128, 64, 64], n=2, directory="img/")

    if args.model != None:
        print( "loading model from " + args.model )
        serializers.load_npz(args.model, model)

    x, t = test[args.id]

    x = np.expand_dims(x, 0)
    t = np.expand_dims(t, 0)

    if args.gpu >= 0:
        cuda.get_device_from_id(0).use()
        model.to_gpu()
        x = cuda.cupy.array(x)
        t = cuda.cupy.array(t)

    res = model(Variable(x), Variable(t))
    def __init__(self, arch=None, weights_file=None, model=None, device=-1):
        # test
        # self.model = params['archs']['nn1']()
        # serializers.load_npz('result/nn1/model_iter_50000', self.model)
        print('Loading PoseNet...')
        self.model = params['archs']['posenet']()
        serializers.load_npz('models/coco_posenet.npz', self.model)

        # if model is not None:
        #     self.model = model
        # else:
        #     # load model
        #     print('Loading PoseNet...')
        #     self.model = params['archs'][arch]()
        #     if weights_file:
        #         serializers.load_npz(weights_file, self.model)

        self.device = device
        if self.device >= 0:
            cuda.get_device_from_id(device).use()
            self.model.to_gpu()

            # create gaussian filter
            ksize = params['ksize']
            kernel = cuda.to_gpu(self.create_gaussian_kernel(sigma=params['gaussian_sigma'], ksize=ksize))
            self.gaussian_kernel = kernel
Exemple #26
0
 def __init__(self, ds_path, spec_file_name, layer_name, box_type,
              window_size, model_param_file_path, plog_path, gpu_id):
     """ Constructor
         Args:
          ds_path (str): a dataset path
          spec_file_name (str): a dataset spec file name
          layer_name (str): a layer name
          box_type (str): a type of boxes - 'tbox' or 'ebox'
          model_param_file_path (str): a model parameter file path
          gpu_id (int): GPU ID (-1 for CPU) 
     """
     # set dataset
     self.ds = TripDataset(ds_path, spec_file_name, layer_name, box_type)
     # check dataset
     self.ds_length = self.ds.get_length()
     # window size
     self.window_size = window_size
     # set gpu
     self.gpu_id = gpu_id
     if self.gpu_id >= 0:
         cuda.get_device_from_id(self.gpu_id).use()
     # set model
     self.set_model(model_param_file_path)
     # prediction log file path
     self.plog_path = plog_path
     self.plogf = None
Exemple #27
0
    def __init__(self):
        super(AffordanceSegmentation, self).__init__()

        affordance = rospy.get_param('~affordance')

        pretrained_model, modal, out_channels = get_pretrained_model(
            affordance)

        gpu = rospy.get_param('~gpu', 0)

        self.lock = Lock()

        # rospy.set_param('~always_subscribe', True)

        model = grasp_fusion.models.FCN8sVGG16Sigmoid(
            out_channels=out_channels,
            pretrained_model=pretrained_model,
            modal=modal,
        )
        if gpu >= 0:
            cuda.get_device_from_id(gpu).use()
            model.to_gpu()

        self.model = model

        self.pub_label = self.advertise('~output/label', Image, queue_size=1)
        self.pub_prob = self.advertise('~output/prob', Image, queue_size=1)
        self.pub_viz = self.advertise('~output/viz', Image, queue_size=1)

        self._prob = None
        self.srv_reset = rospy.Service('~reset', Empty, self.reset_callback)
Exemple #28
0
    def __init__(self, model, optimizer, obs_normalizer=None,
                 update_interval=3072, minibatch_size=3072, epochs=1,
                 entropy_coef=1e-3, loss_decay=0.99, entropy_decay=0.99,
                 discriminator_value_offset=1e-8, noisy_label=False,
                 noisy_label_range=0.3, gpu=None):
        self.model = model
        self.optimizer = optimizer
        self.obs_normalizer = obs_normalizer

        if gpu is not None and gpu >= 0:
            cuda.get_device_from_id(gpu).use()
            self.model.to_gpu(device=gpu)
            if obs_normalizer is not None:
                self.obs_normalizer.to_gpu(device=gpu)
        self.xp = self.model.xp

        self.epochs = epochs
        self.update_interval = update_interval
        self.minibatch_size = minibatch_size
        self.epochs = epochs
        self.entropy_coef = entropy_coef
        self.loss_decay = loss_decay
        self.entropy_decay = entropy_decay
        self.average_loss = 0.
        self.accuracy_gen = 0.
        self.accuracy_exp = 0.
        self.average_entropy = 0.
        self.discriminator_value_offset = discriminator_value_offset
        self.noisy_label = noisy_label
        self.noisy_label_range = noisy_label_range
        self._reset_trajectories()
Exemple #29
0
def main():
    args = parse_args()

    model = VGG16(pretrained_model='imagenet')
    image = cv2.imread(args.image_path)
    image_size = (224, 224)
    image = cv2.resize(image, image_size)

    if args.gpu >= 0:
        cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()
        xp = cuda.cupy
    else:
        xp = np

    x = np.float32(image)
    x = x.transpose((2, 0, 1))[np.newaxis, ::-1, :, :]
    x -= model.mean
    x = xp.asarray(x)

    y_grad = xp.zeros((1, 1000), dtype=np.float32)
    y_grad[0, args.label] = 1.0
    gcam = gradcam.gradcam(model,
                           x, [model.conv5_3.conv, F.relu],
                           y_grad=y_grad)
    gcam = cuda.to_cpu(gcam[0])

    heatmap_image = gradcam.heatmap(gcam, image_size)
    cv2.imwrite(args.heatmap_path, heatmap_image)

    overlay_image = gradcam.overlay(image, gcam)
    cv2.imwrite(args.overlay_path, overlay_image)
Exemple #30
0
    def __call__(self, x, **kwargs):
        argument.check_unexpected_kwargs(
            kwargs, test='test argument is not supported anymore. '
            'Use chainer.using_config')
        finetune, = argument.parse_kwargs(kwargs, ('finetune', False))

        if hasattr(self, 'gamma'):
            gamma = self.gamma
        else:
            with cuda.get_device_from_id(self._device_id):
                gamma = variable.Variable(self.xp.ones(
                    self.avg_mean.shape, dtype=x.dtype))
        if hasattr(self, 'beta'):
            beta = self.beta
        else:
            with cuda.get_device_from_id(self._device_id):
                beta = variable.Variable(self.xp.zeros(
                    self.avg_mean.shape, dtype=x.dtype))
        if configuration.config.train:
            if finetune:
                self.N += 1
                decay = 1. - 1. / self.N
            else:
                decay = self.decay

            ret = batch_normalization(
                x, gamma, beta, eps=self.eps, running_mean=self.avg_mean,
                running_var=self.avg_var, decay=decay)
        else:
            # Use running average statistics or fine-tuned statistics.
            mean = variable.Variable(self.avg_mean)
            var = variable.Variable(self.avg_var)
            ret = fixed_batch_normalization(
                x, gamma, beta, mean, var, self.eps)
        return ret
    def __init__(self,
                 arch=None,
                 weights_file=None,
                 model=None,
                 device=-1,
                 precise=False):
        self.arch = arch
        self.precise = precise
        if model is not None:
            self.model = model
        else:
            print('Loading the model...')
            self.model = params['archs'][arch]()

            if weights_file:
                serializers.load_npz(weights_file, self.model)

        self.device = 0
        if self.device >= 0:
            cuda.get_device_from_id(device).use()
            self.model.to_gpu()

            self.gaussian_kernel = self.create_gaussian_kernel(
                params['gaussian_sigma'], params['ksize'])[None, None]
            self.gaussian_kernel = cuda.to_gpu(self.gaussian_kernel)
    def __call__(self,img):
        edit_img = img.copy()
        img_h,img_w ,_ = edit_img.shape
        #画像とheatmapの大きさの最適化(stride=8の倍数にする)
        input_w,input_h = self.compute_optimal_size(edit_img,constants['img_size'])
        map_w,map_h = self.compute_optimal_size(edit_img,constants['heatmap_size'])
        #画像サイズの更新と学習器に入れるためにデータの編集
        resized_image = cv2.resize(edit_img, (input_w, input_h))
        x_data = self.preprocess(resized_image)
        #GPUへの適用
        if self.device >= 0:
            x_data = cuda.to_gpu(x_data)
        #学習器からの出力(全ステージから)
        Ss,Ls = self.model(x_data)
        #最終ステージの物のみ取り出す
        heatmaps = F.resize_images(Ss[-1], (map_h, map_w)).data[0]
        pafs = F.resize_images(Ls[-1], (map_h, map_w)).data[0]

        if self.device >= 0:
            pafs = pafs.get()
            cuda.get_device_from_id(self.device).synchronize()
        #heatmapからPeakを計算する
        all_peaks = self.compute_peaks_from_heatmaps(heatmaps)
        if len(all_peaks) == 0:
            return np.empty((0, len(JointType), 3)), np.empty(0)
        #peakとpafからConnectionを計算する
        all_connections = self.compute_connections(pafs, all_peaks, map_w, constants)
        #subsetの作成
        subsets = self.grouping_key_points(all_connections, all_peaks, constants)
        all_peaks[:, 1] *= img_w / map_w
        all_peaks[:, 2] *= img_h / map_h
        #poseの計算
        poses = self.subsets_to_pose_array(subsets, all_peaks)
        return poses
Exemple #33
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', '-g', type=int, default=-1)
    parser.add_argument('--model', '-m', type=str, default="./results/model")
    parser.add_argument('--begin', '-b', type=int, default=0)
    args = parser.parse_args()

    # Set up a neural network to train.
    _, test_x = get_cifar10(withlabel=False, ndim=3)

    test = LoadDataset(test_x)

    model = network.CAE(3,3, return_out=True)
    
    if args.model != None:
        print( "loading model from " + args.model )
        serializers.load_npz(args.model, model)
    
    # Show 64 images
    fig = plt.figure(figsize=(6,6))
    plt.title("Original images: first rows,\n Predicted images: second rows")
    plt.axis('off')
    plt.tight_layout()
    
    pbar = tqdm(total=32)
    for i in range(4):
        for j in range(8):
            ax = fig.add_subplot(8, 8, i*16+j+1, xticks=[], yticks=[])
            x, t = test[i*8+j]
            xT = x.transpose(1, 2, 0)
            ax.imshow(xT, cmap=plt.cm.bone, interpolation='nearest')
            
            x = np.expand_dims(x, 0)
            t = np.expand_dims(t, 0)
    
            if args.gpu >= 0:
                cuda.get_device_from_id(0).use()
                model.to_gpu()
                x = cuda.cupy.array(x)
                t = cuda.cupy.array(t)
            
            predicted, loss = model(Variable(x), Variable(t))
            #print(predicted.shape)
            #print(loss)   
            
            predicted = F.transpose(predicted[0], (1, 2, 0))
            predicted = cuda.to_cpu(predicted.data) #Variable to numpy
            predicted = predicted * 255
            predicted = predicted.astype(np.uint8) 
            ax = fig.add_subplot(8, 8, i*16+j+9, xticks=[], yticks=[])
            ax.imshow(predicted, cmap=plt.cm.bone, interpolation='nearest')

            pbar.update(1)
            
    pbar.close()
   
    plt.savefig("result.png")
    plt.show()
    plt.close()
Exemple #34
0
    def __call__(self, batch, device=None, padding=None):
        """Concatenate data and transfer them to GPU asynchronously.

        See also :func:`chainer.dataset.concat_examples`.

        Args:
            batch (list): A list of examples.
            device (int): Device ID to which each array is sent.
            padding: Scalar value for extra elements.

        Returns:
            Array, a tuple of arrays, or a dictionary of arrays.
            The type depends on the type of each example in the batch.
        """
        if len(batch) == 0:
            raise ValueError('batch is empty')
        first_elem = batch[0]

        if len(self._conveyor) == 0:
            self._device = device  # device is set at first call
            if device is not None and device >= 0 and self._stream is None:
                with cuda.get_device_from_id(device):
                    self._stream = cuda.Stream(non_blocking=True)
        if device is not self._device:
            raise ValueError('device is different')

        with cuda.get_device_from_id(device):
            if isinstance(first_elem, tuple):
                result = []
                if not isinstance(padding, tuple):
                    padding = [padding] * len(first_elem)

                for i in six.moves.range(len(first_elem)):
                    self._conveyor[i].put(
                        _concat_arrays([example[i] for example in batch],
                                       padding[i]))

                for i in six.moves.range(len(first_elem)):
                    result.append(self._conveyor[i].get())

                return tuple(result)

            elif isinstance(first_elem, dict):
                result = {}
                if not isinstance(padding, dict):
                    padding = {key: padding for key in first_elem}

                for key in first_elem:
                    self._conveyor[key].put(
                        _concat_arrays([example[key] for example in batch],
                                       padding[key]))

                for key in first_elem:
                    result[key] = self._conveyor[key].get()

                return result

            else:
                return to_device(device, _concat_arrays(batch, padding))
Exemple #35
0
def _loss(X_, Y_, verbose=1, beta=1, weights=None, cpu=False, **kwargs):
    """CUDA ready: implementation based on NumPy/CuPy"""
    if cuda.available and not cpu:  # if CUDA is installed on the machine, use it.
        gpu = True
        if cuda.get_array_module(X_).__name__ == 'cupy':  # if At_ is on GPU
            # make sure it is on the same device as Bt_
            assert X_.data.device.id == Y_.data.device.id
            # use that Device as main device
            cuda.get_device_from_id(X_.data.device.id).use()
            # set pointers
            X = X_
            Y = Y_
            print(X.device.id)
        else:  # At_ is a CPU array
            print('Copying to GPU.')
            X = cuda.to_gpu(X_, device=2)
            Y = cuda.to_gpu(Y_, device=2)
            cuda.get_device_from_id(2).use()
    else:  # CUDA is not installed, run on CPU
        gpu = False
        # set pointers
        X = X_
        Y = Y_
    xp = cuda.get_array_module(X)

    # Compute objective value and grad at Z.
    X = X.reshape(Y.shape[:-1])
    d1, d2, d3, m = X.shape
    nb = Y.shape[-1]
    if weights is None:
        weights = np.ones(nb)
    G = xp.zeros((d1, d2, d3, m))
    d = 0
    for i in range(nb):
        if verbose > 1:
            print('computing distances and gradients for shape %d' % (i + 1))
        M_gpu, G_gpu = sinkhorn_dist_grad(X,
                                          Y[:, :, :, :, i],
                                          verbose=verbose,
                                          **kwargs)
        if gpu:
            if verbose > 1:
                print('_ copying to cpu')
            M_cpu = cuda.to_cpu(M_gpu)
        else:
            M_cpu = M_gpu
        d_cpu, D_cpu = soft_dtw(M_cpu, beta)
        D_bar_cpu = soft_dtw_grad(D_cpu, beta)
        if gpu:
            if verbose > 1:
                print('_ copying to gpu')
            D_bar_gpu = cuda.to_gpu(D_bar_cpu, device=G_gpu.device.id)
        else:
            D_bar_gpu = D_bar_cpu

        final_G_gpu = chain_rule(D_bar_gpu, G_gpu)
        G += weights[i] * final_G_gpu
        d += weights[i] * d_cpu
    return d, G
    def __call__(self, x, **kwargs):
        """__call__(self, x, finetune=False)

        Invokes the forward propagation of BatchNormalization.

        In training mode, the BatchNormalization computes moving averages of
        mean and variance for evaluation during training, and normalizes the
        input using batch statistics.

        .. warning::

           ``test`` argument is not supported anymore since v2.
           Instead, use ``chainer.using_config('train', False)``.
           See :func:`chainer.using_config`.

        Args:
            x (Variable): Input variable.
            finetune (bool): If it is in the training mode and ``finetune`` is
                ``True``, BatchNormalization runs in fine-tuning mode; it
                accumulates the input array to compute population statistics
                for normalization, and normalizes the input using batch
                statistics.

        """
        argument.check_unexpected_kwargs(
            kwargs, test='test argument is not supported anymore. '
            'Use chainer.using_config')
        finetune, = argument.parse_kwargs(kwargs, ('finetune', False))

        if hasattr(self, 'gamma'):
            gamma = self.gamma
        else:
            with cuda.get_device_from_id(self._device_id):
                gamma = variable.Variable(self.xp.ones(
                    self.avg_mean.shape, dtype=x.dtype))
        if hasattr(self, 'beta'):
            beta = self.beta
        else:
            with cuda.get_device_from_id(self._device_id):
                beta = variable.Variable(self.xp.zeros(
                    self.avg_mean.shape, dtype=x.dtype))

        if configuration.config.train:
            if finetune:
                self.N += 1
                decay = 1. - 1. / self.N
            else:
                decay = self.decay

            ret = functions.batch_normalization(
                x, gamma, beta, eps=self.eps, running_mean=self.avg_mean,
                running_var=self.avg_var, decay=decay)
        else:
            # Use running average statistics or fine-tuned statistics.
            mean = variable.Variable(self.avg_mean)
            var = variable.Variable(self.avg_var)
            ret = functions.fixed_batch_normalization(
                x, gamma, beta, mean, var, self.eps)
        return ret
Exemple #37
0
 def test_forward_gpu_multi(self):
     with cuda.get_device_from_id(0):
         self.link.to_gpu()
         x1 = cuda.to_gpu(self.x1)
         x2 = cuda.to_gpu(self.x2)
         x3 = cuda.to_gpu(self.x3)
     with cuda.get_device_from_id(1):
         self.check_forward(x1, x2, x3)
Exemple #38
0
 def test_copydata_gpu_to_another_gpu(self):
     cp = cuda.cupy
     with cuda.get_device_from_id(0):
         data1 = cp.zeros(3, dtype=np.float32)
         expect = cp.ones(3, dtype=np.float32)
     with cuda.get_device_from_id(1):
         data2 = cp.ones(3, dtype=np.float32)
     self.check_copydata(data1, data2, expect)
Exemple #39
0
 def test_addgrad_gpu_to_another_gpu(self):
     cp = cuda.cupy
     with cuda.get_device_from_id(1):
         a = cp.full(3, 10, dtype=np.float32)
     with cuda.get_device_from_id(0):
         b = cp.full(3, 20, dtype=np.float32)
         c = cp.full(3, 30, dtype=np.float32)
     self.check_addgrad(a, b, c)
 def test_forward_gpu_multi(self):
     with cuda.get_device_from_id(0):
         self.link.to_gpu()
         c = cuda.to_gpu(self.c)
         h = cuda.to_gpu(self.h)
         x = cuda.to_gpu(self.x)
     with cuda.get_device_from_id(1):
         self.check_forward(c, h, x)
Exemple #41
0
 def test_addgrad_gpu_to_another_gpu_none_dst_dev1(self):
     cp = cuda.cupy
     with cuda.get_device_from_id(1):
         a = cp.full(3, 20, dtype=np.float32)
     with cuda.get_device_from_id(0):
         b = cp.full(3, 10, dtype=np.float32)
         c = cp.full(3, 20, dtype=np.float32)
     with cuda.get_device_from_id(1):
         self.check_addgrad(a, b, c, clear_dst_grad=True)
Exemple #42
0
 def test_zerograds_fill_multi_gpu(self):
     cupy = cuda.cupy
     with cuda.get_device_from_id(1):
         a = chainer.Variable(cupy.empty(3, dtype=np.float32))
         a.grad = cupy.empty_like(a.data)
     a.zerograd()
     self.assertEqual(int(a.grad.device), 1)
     with cuda.get_device_from_id(1):
         g_expect = cupy.zeros_like(a.data)
         cupy.testing.assert_array_equal(a.grad, g_expect)
Exemple #43
0
def dump_bilm_embeddings(vocab_file, dataset_file, options_file,
                         weight_file, outfile, gpu=-1,
                         batchsize=32):
    with open(options_file, 'r') as fin:
        options = json.load(fin)
    max_word_length = options['char_cnn']['max_characters_per_token']

    vocab = UnicodeCharsVocabulary(vocab_file, max_word_length)
    batcher = Batcher(vocab_file, max_word_length)

    model = Elmo(
        options_file,
        weight_file,
        num_output_representations=1,
        requires_grad=False,
        do_layer_norm=False,
        dropout=0.)
    if gpu >= 0:
        cuda.get_device_from_id(gpu).use()
        model.to_gpu()

    # (batch_size, timesteps, 50)
    # TODO(sosk): preencoding token embedding for acceleration
    with chainer.using_config("train", False), \
            chainer.no_backprop_mode():
        sentence_id = 0
        n_lines = sum([1 for _ in open(dataset_file, 'r')])
        with open(dataset_file, 'r') as fin, h5py.File(outfile, 'w') as fout:
            for minibatch in minibatch_iterator(tqdm.tqdm(fin, total=n_lines),
                                                batchsize):
                sentences = [line.strip().split() for line in minibatch]
                char_ids = batcher.batch_sentences(
                    sentences, add_bos_eos=False)
                char_ids = model.xp.asarray(char_ids)
                mb_outs = model.forward(char_ids)
                mb_embedding_layers = mb_outs['elmo_layers']
                # [(batch_size, max_sequence_length, embedding_dim), ..., x n_layers]
                # Note that embedding layers have already trushed bos & eos
                # But they contains padding
                mb_mask = mb_outs['mask']
                mb_concat_embedding_layers = cuda.to_cpu(
                    model.xp.stack([mb_emb.array for mb_emb in mb_embedding_layers], axis=1))
                # (batch_size, n_layers=3, max_sequence_length, embedding_dim)
                for mask, concat_embedding_layers in zip(mb_mask, mb_concat_embedding_layers):
                    # remove pads
                    length = int(mask.sum())
                    concat_embedding_layers = concat_embedding_layers[:, :length]
                    # (n_layers=3, sequence_length, embedding_dim)
                    ds = fout.create_dataset(
                        '{}'.format(sentence_id),
                        concat_embedding_layers.shape,
                        dtype='float32',
                        data=concat_embedding_layers
                    )
                    sentence_id += 1
Exemple #44
0
 def test_zerograds_multi_gpu(self):
     cupy = cuda.cupy
     with cuda.get_device_from_id(1):
         a = chainer.Variable(cupy.empty(3, dtype=np.float32))
     with testing.assert_warns(DeprecationWarning):
         a.zerograd()
     self.assertIsNot(a.grad, None)
     self.assertEqual(int(a.grad.device), 1)
     with cuda.get_device_from_id(1):
         g_expect = cupy.zeros_like(a.data)
         cupy.testing.assert_array_equal(a.grad, g_expect)
Exemple #45
0
def dump_token_embeddings(vocab_file, options_file, weight_file, outfile,
                          gpu=-1, batchsize=128):
    '''
    Given an input vocabulary file, dump all the token embeddings to the
    outfile.  The result can be used as the embedding_weight_file when
    constructing a BidirectionalLanguageModel.
    '''
    with open(options_file, 'r') as fin:
        options = json.load(fin)
    max_word_length = options['char_cnn']['max_characters_per_token']

    vocab = UnicodeCharsVocabulary(vocab_file, max_word_length)
    batcher = Batcher(vocab_file, max_word_length)

    model = Elmo(
        options_file,
        weight_file,
        num_output_representations=1,
        requires_grad=False,
        do_layer_norm=False,
        dropout=0.)

    tokens = [vocab.id_to_word(i) for i in range(vocab.size)]
    n_tokens = len(tokens)

    # (batch_size, timesteps, 50)
    if gpu >= 0:
        cuda.get_device_from_id(gpu).use()
        model.to_gpu()

    all_embeddings = []
    with chainer.using_config("train", False), \
            chainer.no_backprop_mode():
        for minibatch in minibatch_iterator(tqdm.tqdm(tokens, total=n_tokens),
                                            batchsize):
            char_ids = batcher.batch_sentences([minibatch], add_bos_eos=False)
            char_ids = model.xp.asarray(char_ids)  # to gpu
            embeddings = model._elmo_lstm._token_embedder\
                                         .forward(char_ids)['token_embedding']
            # (batch_size, sequence_length + 2, embedding_dim)
            embeddings = embeddings[:, 1:-1]  # del bos and eos
            embeddings = embeddings[0]
            embeddings = cuda.to_cpu(embeddings.array)
            all_embeddings.append(embeddings)

    all_embeddings = numpy.concatenate(all_embeddings, axis=0)
    with h5py.File(outfile, 'w') as fout:
        ds = fout.create_dataset(
            'embedding',
            all_embeddings.shape,
            dtype='float32',
            data=all_embeddings)
    def __init__(self, arch=None, weights_file=None, model=None, device=-1):
        print('Loading FaceNet...')
        self.model = params['archs'][arch]()
        serializers.load_npz(weights_file, self.model)

        self.device = device
        if self.device >= 0:
            cuda.get_device_from_id(device).use()
            self.model.to_gpu()

            # create gaussian filter
            ksize = params['ksize']
            kernel = cuda.to_gpu(self.create_gaussian_kernel(sigma=params['gaussian_sigma'], ksize=ksize))
            self.gaussian_kernel = kernel
Exemple #47
0
    def __call__(self, x):
        """Updates the internal state and returns the LSTM outputs.

        Args:
            x (~chainer.Variable): A new batch from the input sequence.

        Returns:
            ~chainer.Variable: Outputs of updated LSTM units.

        """
        if self.upward.has_uninitialized_params:
            with cuda.get_device_from_id(self._device_id):
                in_size = x.size // x.shape[0]
                self.upward._initialize_params(in_size)
                self._initialize_params()

        batch = x.shape[0]
        lstm_in = self.upward(x)
        h_rest = None
        if self.h is not None:
            h_size = self.h.shape[0]
            if batch == 0:
                h_rest = self.h
            elif h_size < batch:
                msg = ('The batch size of x must be equal to or less than'
                       'the size of the previous state h.')
                raise TypeError(msg)
            elif h_size > batch:
                h_update, h_rest = split_axis.split_axis(
                    self.h, [batch], axis=0)
                lstm_in += self.lateral(h_update)
            else:
                lstm_in += self.lateral(self.h)
        if self.c is None:
            xp = self.xp
            with cuda.get_device_from_id(self._device_id):
                self.c = variable.Variable(
                    xp.zeros((batch, self.state_size), dtype=x.dtype),
                    volatile='auto')
        self.c, y = lstm.lstm(self.c, lstm_in)

        if h_rest is None:
            self.h = y
        elif len(y.data) == 0:
            self.h = h_rest
        else:
            self.h = concat.concat([y, h_rest], axis=0)

        return y
Exemple #48
0
    def _train_for_auxiliary_task(self, x_l0, x_l1, y_l, x_u0, x_u1):
        # Compute gradients
        y_pred0 = self.model(x_u0, self.model_params)
        y_pred1 = self.model(x_u1, self.model_params)
        loss_rc = self.rc_loss(y_pred0, y_pred1)
        self._cleargrads()
        loss_rc.backward(retain_grad=True)

        # Update optimizee parameters by meta-learner
        model_params = self.model_params
        for i, elm in enumerate(model_params.items()):
            name, w = elm
            meta_learner = self.meta_learners[i]
            ml_optimizer = self.ml_optimizers[i]
            shape = w.shape
            with cuda.get_device_from_id(self.device):
                xp = cuda.get_array_module(w.data)
                g_old = w.grad  # no nedd to deep copy
                grad_data = xp.reshape(g_old, (np.prod(shape), 1))
            
                # refine grad, update w, and replace
                grad = Variable(grad_data)
                g = meta_learner(grad)  #TODO: use either h or c
                w -= F.reshape(g, shape)
            model_params[name] = w
                
        # Forward primary taks for training meta-leaners
        #TODO: use the same labeled data?
        y_pred = self.model(x_l0, self.model_params)
        self.loss_ml += F.softmax_cross_entropy(y_pred, y_l)
 def accuracy_gpu(self, device=None):
     model = self.model
     optimizer = self.optimizer
     model.to_gpu(device=device)
     optimizer.setup(model)
     with cuda.get_device_from_id(device):
         return self._train_linear_classifier(model, optimizer, True)
Exemple #50
0
    def update_parameter_by_meta_learner(
            self, model_params, loss, 
            x_l0, x_l1, y_l):

        # Forward meta-learner
        namedparams = model_params
        for i, elm in enumerate(namedparams.items()):  # parameter-loop
            k, p = elm
            with cuda.get_device_from_id(self.device):
                shape = p.shape
                xp = cuda.get_array_module(p.data)

                x = p.grad
                grad = xp.reshape(x, (np.prod(shape), ))
                meta_learner = self.meta_learners[i]
                g = meta_learner(Variable(grad))  # forward
                w = p - F.reshape(g, shape)
                self.model_params[k] = w

        # Train meta-learner with main objective
        y_pred = self.model(x_l0, self.model_params)
        loss_ce = F.softmax_cross_entropy(y_pred, y_l)
        
        self.cleargrads()  # need to clear W'grad due to loss_rec.backward
        for meta_learner in self.meta_learners:
            meta_learner.cleargrads()
        loss_ce.backward(retain_grad=True)
        for opt in self.opt_meta_learners:
            opt.update()

        loss_ce.unchain_backward()  #TODO: here is a proper place to unchain?
Exemple #51
0
    def update_parameter_by_meta_learner(
            self, model_params, loss, 
            x_l0, x_l1, y_l):

        # Forward meta-learner
        namedparams = model_params
        for i, elm in enumerate(namedparams.items()):  # parameter-loop
            k, p = elm
            with cuda.get_device_from_id(self.device):
                shape = p.shape
                xp = cuda.get_array_module(p.data)

                # normalize grad
                x = p.grad
                p_val = 10
                grad0 = xp.where(xp.absolute(x) > xp.exp(-p_val), 
                                   xp.log(xp.absolute(x))/p_val, -1)
                grad1 = xp.where(xp.absolute(x) > xp.exp(-p_val), 
                                   xp.sign(x), xp.exp(p_val)*x)
                grad0 = xp.reshape(grad0, (np.prod(shape), ))
                grad1 = xp.reshape(grad1, (np.prod(shape), ))
                grad0 = xp.expand_dims(grad0, axis=1)
                grad1 = xp.expand_dims(grad1, axis=1)
                input_grad = xp.concatenate((grad0, grad1), axis=1)

                # normalize loss
                x = loss.data
                loss0 = xp.where(xp.absolute(x) > xp.exp(-p_val), 
                                   xp.log(xp.absolute(x))/p_val, -1)
                loss1 = xp.where(xp.absolute(x) > xp.exp(-p_val), 
                                   xp.sign(x), xp.exp(p_val)*x)
                loss0 = xp.expand_dims(loss0, axis=0)
                loss1 = xp.expand_dims(loss1, axis=0)
                input_loss = xp.concatenate((loss0, loss1))
                input_loss = xp.broadcast_to(input_loss, 
                                             (input_grad.shape[0], 2))

                # input
                input_ = xp.concatenate((input_grad, input_loss), axis=1)

                meta_learner = self.meta_learners[i]
                g = meta_learner(Variable(input_.astype(xp.float32)))  # forward
                w = p - F.reshape(g, shape) * 1e-3
                namedparams[k] = w
                
        # Train meta-learner with main objective
        y_pred = self.model(x_l0, self.model_params)
        loss_ce = F.softmax_cross_entropy(y_pred, y_l)
        
        self.cleargrads()  # need to clear W'grad due to loss_rec.backward
        for meta_learner in self.meta_learners:
            meta_learner.cleargrads()
        loss_ce.backward(retain_grad=True)  

        for opt in self.opt_meta_learners:
            opt.update()

        loss_ce.unchain_backward()
Exemple #52
0
    def __call__(self, x):
        """Updates the internal state and returns the LSTM outputs.

        Args:
            x (~chainer.Variable): A new batch from the input sequence.

        Returns:
            ~chainer.Variable: Outputs of updated LSTM units.

        """
        lstm_in = self.upward(x)
        if self.h is not None:
            lstm_in += self.lateral(self.h)
        else:
            xp = self.xp
            with cuda.get_device_from_id(self._device_id):
                self.h = variable.Variable(
                    xp.zeros((len(x.data), self.state_size),
                             dtype=x.data.dtype),
                    volatile='auto')
        if self.c is None:
            xp = self.xp
            with cuda.get_device_from_id(self._device_id):
                self.c = variable.Variable(
                    xp.zeros((len(x.data), self.state_size),
                             dtype=x.data.dtype),
                    volatile='auto')

        lstm_in = reshape.reshape(lstm_in, (len(lstm_in.data),
                                            lstm_in.data.shape[1] // 4,
                                            4))

        a, i, f, o = split_axis.split_axis(lstm_in, 4, 2)
        a = reshape.reshape(a, (len(a.data), self.state_size))
        i = reshape.reshape(i, (len(i.data), self.state_size))
        f = reshape.reshape(f, (len(f.data), self.state_size))
        o = reshape.reshape(o, (len(o.data), self.state_size))

        c_tmp = tanh.tanh(a) * sigmoid.sigmoid(i) + sigmoid.sigmoid(f) * self.c
        self.c = zoneout.zoneout(self.c, c_tmp, self.c_ratio, self.train)
        self.h = zoneout.zoneout(self.h,
                                 sigmoid.sigmoid(o) * tanh.tanh(c_tmp),
                                 self.h_ratio, self.train)
        return self.h
    def __init__(self, arch=None, weights_file=None, model=None, device=-1, precise=False):
        self.arch = arch
        self.precise = precise
        if model is not None:
            self.model = model
        else:
            print('Loading the model...')
            self.model = params['archs'][arch]()

            if weights_file:
                serializers.load_npz(weights_file, self.model)

        self.device = device
        if self.device >= 0:
            cuda.get_device_from_id(device).use()
            self.model.to_gpu()

            # create gaussian filter
            self.gaussian_kernel = self.create_gaussian_kernel(params['gaussian_sigma'], params['ksize'])[None, None]
            self.gaussian_kernel = cuda.to_gpu(self.gaussian_kernel)
    def load_model(self):
        n_fg_class = len(self.train_dataset.fg_class_names)

        pooling_func = cmr.functions.roi_align_2d
        anchor_scales = (4, 8, 16, 32)
        roi_size = 14
        min_size = 600
        max_size = 1000
        mask_initialW = chainer.initializers.Normal(0.01)

        if self.model_name == 'vgg16':
            self.mask_rcnn = cmr.models.MaskRCNNVGG16(
                n_fg_class=n_fg_class,
                pretrained_model='imagenet',
                pooling_func=pooling_func,
                anchor_scales=anchor_scales,
                roi_size=roi_size,
                min_size=min_size,
                max_size=max_size,
                mask_initialW=mask_initialW,
            )
        elif self.model_name in ['resnet50', 'resnet101']:
            n_layers = int(self.model_name.lstrip('resnet'))
            self.mask_rcnn = cmr.models.MaskRCNNResNet(
                n_layers=n_layers,
                n_fg_class=n_fg_class,
                pooling_func=pooling_func,
                anchor_scales=anchor_scales,
                roi_size=roi_size,
                min_size=min_size,
                max_size=max_size,
                mask_initialW=mask_initialW,
            )
        else:
            raise ValueError(
                'Unsupported model_name: {}'.format(self.model_name))
        self.model = cmr.models.MaskRCNNTrainChain(self.mask_rcnn)

        if self.gpu >= 0:
            cuda.get_device_from_id(self.gpu).use()
            self.model.to_gpu()
Exemple #55
0
    def __call__(self, x):
        """Applies the linear layer.

        Args:
            x (~chainer.Variable): Batch of input vectors.

        Returns:
            ~chainer.Variable: Output of the linear layer.

        """
        if self.has_uninitialized_params:
            with cuda.get_device_from_id(self._device_id):
                self._initialize_params(x.size // x.shape[0])
        return linear.linear(x, self.W, self.b)
Exemple #56
0
    def forward_meta_learners(self, ):
        # Forward of meta-learner, i.e., parameter update
        for i, name_param in enumerate(self.model_params.items()):
            k, p = name_param
            with cuda.get_device_from_id(self.device):
                shape = p.shape
                xp = cuda.get_array_module(p.data)

                w_data = p.data  # meta learner is gated-reccurent unit for W not for G
                w_data = xp.reshape(w_data, (1, 1, np.prod(shape)))
                meta_learner = self.meta_learners[i]
                w_accum = meta_learner(Variable(w_data))  # forward
                w_accum = F.reshape(w_accum, shape)
                self.model_params[k] = w_accum
Exemple #57
0
    def __call__(self, x, W=None, b=None):
        if self.has_uninitialized_params:
            with cuda.get_device_from_id(self._device_id):
                self._initialize_params(x.shape[1])

        if W is not None:
            return deconvolution_2d.deconvolution_2d(
                x, W, b, self.stride, self.pad,
                self.outsize, self.use_cudnn,
                deterministic=self.deterministic)

        return deconvolution_2d.deconvolution_2d(
            x, self.W, self.b, self.stride, self.pad,
            self.outsize, self.use_cudnn,
            deterministic=self.deterministic)
Exemple #58
0
    def forward_meta_learners(self, ):
        # Forward of meta-learner, i.e., parameter update
        for i, name_param in enumerate(self.model_params.items()):
            k, p = name_param
            with cuda.get_device_from_id(self.device):
                shape = p.shape
                xp = cuda.get_array_module(p.data)

                x = p.grad
                #grad = xp.reshape(x, (np.prod(shape), ))
                grad = xp.reshape(x, (np.prod(shape), 1))
                meta_learner = self.meta_learners[i]
                g = meta_learner(Variable(grad))  # forward
                w = p - F.reshape(g, shape)                
                self.model_params[k] = w  # parameter update
    def __call__(self, x):
        """Applies the convolution layer.

        Args:
            x (~chainer.Variable): Input image.

        Returns:
            ~chainer.Variable: Output of the convolution.

        """
        if self.has_uninitialized_params:
            with cuda.get_device_from_id(self._device_id):
                self._initialize_params(x.shape[1])
        return dilated_convolution_2d.dilated_convolution_2d(
            x, self.W, self.b, self.stride,
            self.pad, self.dilate, self.use_cudnn)