示例#1
0
def main():
    xp = np
    using_gpu = args.gpu_device >= 0
    if using_gpu:
        cuda.get_device(args.gpu_device).use()
        xp = cupy

    hyperparams = Hyperparameters(args.snapshot_path)
    hyperparams.print()

    num_bins_x = 2.0**hyperparams.num_bits_x

    encoder = Glow(hyperparams, hdf5_path=args.snapshot_path)
    if using_gpu:
        encoder.to_gpu()

    with chainer.no_backprop_mode() and encoder.reverse() as decoder:
        while True:
            z = xp.random.normal(0,
                                 args.temperature,
                                 size=(
                                     1,
                                     3,
                                 ) + hyperparams.image_size).astype("float32")

            x, _ = decoder.reverse_step(z)
            x_img = make_uint8(x.data[0], num_bins_x)
            plt.imshow(x_img, interpolation="none")
            plt.pause(.01)
示例#2
0
    def setInitAllParameters(self,
                             optimizer,
                             init_type="default",
                             init_scale=0.1):
        sys.stdout.write("############ Current Parameters BEGIN\n")
        self.printAllParameters(optimizer)
        sys.stdout.write("############ Current Parameters END\n")

        if init_type == "uniform":
            sys.stdout.write("# initializer is [uniform] [%f]\n" % init_scale)
            t_initializer = chainer.initializers.Uniform(init_scale)
            named_params = sorted(optimizer.target.namedparams(),
                                  key=lambda x: x[0])
            for n, p in named_params:
                with cuda.get_device(p.data):
                    p.copydata(chainer.Parameter(t_initializer, p.data.shape))
        elif init_type == "normal":
            sys.stdout.write("# initializer is [normal] [%f]\n" % init_scale)
            t_initializer = chainer.initializers.Normal(init_scale)
            named_params = sorted(optimizer.target.namedparams(),
                                  key=lambda x: x[0])
            for n, p in named_params:
                with cuda.get_device(p.data):
                    p.copydata(chainer.Parameter(t_initializer, p.data.shape))
        else:  # "default"
            sys.stdout.write("# initializer is [defalit] [%f]\n" % init_scale)
            named_params = sorted(optimizer.target.namedparams(),
                                  key=lambda x: x[0])
            for n, p in named_params:
                with cuda.get_device(p.data):
                    p.data *= init_scale
        self.printAllParameters(optimizer, init_type, init_scale)
        return 0
示例#3
0
    def _create_new_model(self, steps_list, epoch_num=None, batch_size=2048):
        epoch_num = self.default_params[
            'create_new_model_epoch_num'] if epoch_num is None else epoch_num

        model = DualNet()
        model.load(self.model_filename)

        if self.gpu_device >= 0:
            cuda.get_device(self.gpu_device).use()
            model.to_gpu(self.gpu_device)

        optimizer = chainer.optimizers.Adam()
        optimizer.setup(model)
        for i in range(epoch_num):
            x_train, y_train_policy, y_train_value = self._get_train_batch(
                steps_list, batch_size)
            y_policy, y_value = model(x_train)
            model.cleargrads()
            loss = F.mean_squared_error(y_policy,
                                        y_train_policy) + F.mean_squared_error(
                                            y_value, y_train_value)
            loss.backward()
            optimizer.update()
            print("[new nodel] epoch: {} / {}, loss: {}".format(
                i + 1, epoch_num, loss))

        if self.gpu_device >= 0:
            model.to_cpu()

        return model
示例#4
0
def run_inference(model_dir: str, epoch: Optional[int], device: str, metric: str):
    chainer.config.train = False

    if device >= 0:
        cuda.get_device(device).use()

    set_seed()

    configs = json.load(open(os.path.join(model_dir, "args")))
    snapshot_file, prediction_path = select_snapshot(epoch, metric, model_dir)
    logger.debug(f"creat prediction into {prediction_path}")

    vocab = Vocabulary.prepare(configs)
    num_word_vocab = configs["num_word_vocab"]
    num_char_vocab = configs["num_char_vocab"]
    num_tag_vocab = configs["num_tag_vocab"]

    model = BiLSTM_CRF(configs, num_word_vocab, num_char_vocab, num_tag_vocab)

    model_path = os.path.join(model_dir, snapshot_file)
    chainer.serializers.load_npz(model_path, model)
    logger.debug(f"load {snapshot_file}")

    if device >= 0:
        model.to_gpu(device)

    transformer = DatasetTransformer(vocab)
    transform = transformer.transform
    test_iterator = create_iterator(
        vocab, configs, "test", transform, return_original_sentence=True
    )

    with open(prediction_path, "w", encoding="utf-8") as file:
        for batch in test_iterator:
            batch, original_sentences = list(zip(*batch))
            in_arrays, t_arrays = converter(batch, device)
            p_arrays = model.predict(in_arrays)

            word_sentences, t_tag_sentences = list(
                zip(*transformer.itransform(in_arrays[0], t_arrays))
            )
            _, p_tag_sentences = list(
                zip(*transformer.itransform(in_arrays[0], p_arrays))
            )

            sentence_gen = zip(
                word_sentences,
                t_tag_sentences,
                p_tag_sentences,
                original_sentences,
            )  # NOQA
            for ws, ts, ps, _os in sentence_gen:
                for w, t, p, o in zip(ws, ts, ps, _os):
                    w = w.replace(" ", "<WHITESPACE>")
                    o = o.replace(" ", "<WHITESPACE>")
                    if w != o:
                        w = f"{w}({o})"
                    print(f"{w} {t} {p}", file=file)
                print(file=file)
示例#5
0
def main():
    xp = np
    using_gpu = args.gpu_device >= 0
    if using_gpu:
        cuda.get_device(args.gpu_device).use()
        xp = cupy

    hyperparams = Hyperparameters(args.snapshot_path)
    hyperparams.print()

    num_bins_x = 2.0**hyperparams.num_bits_x
    image_size = (28, 28)

    images = chainer.datasets.mnist.get_mnist(withlabel=False)[0]
    images = 255.0 * np.asarray(images).reshape((-1, ) + image_size + (1, ))
    if hyperparams.num_image_channels != 1:
        images = np.broadcast_to(images, (images.shape[0], ) + image_size +
                                 (hyperparams.num_image_channels, ))
    images = preprocess(images, hyperparams.num_bits_x)

    dataset = glow.dataset.Dataset(images)
    iterator = glow.dataset.Iterator(dataset, batch_size=1)

    print(tabulate([["#image", len(dataset)]]))

    encoder = Glow(hyperparams, hdf5_path=args.snapshot_path)
    if using_gpu:
        encoder.to_gpu()

    fig = plt.figure(figsize=(8, 4))
    left = fig.add_subplot(1, 2, 1)
    right = fig.add_subplot(1, 2, 2)

    with chainer.no_backprop_mode() and encoder.reverse() as decoder:
        while True:
            for data_indices in iterator:
                x = to_gpu(dataset[data_indices])
                x += xp.random.uniform(0, 1.0 / num_bins_x, size=x.shape)
                factorized_z_distribution, _ = encoder.forward_step(x)

                factorized_z = []
                for (zi, mean, ln_var) in factorized_z_distribution:
                    factorized_z.append(zi)

                # for zi in factorized_z:
                #     noise = xp.random.normal(
                #         0, 0.2, size=zi.shape).astype("float32")
                #     zi.data += noise
                rev_x, _ = decoder.reverse_step(factorized_z)

                x_img = make_uint8(x[0], num_bins_x)
                rev_x_img = make_uint8(rev_x.data[0], num_bins_x)

                left.imshow(x_img, interpolation="none")
                right.imshow(rev_x_img, interpolation="none")

                plt.pause(.01)
示例#6
0
    def test_get_device_warning(self):
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always')
            cuda.get_device(cuda.cupy.array([1]))

        assert len(w) == 1
        assert w[0].category is DeprecationWarning
        assert ('get_device is deprecated. Please use get_device_from_id'
                ' or get_device_from_array instead.' in str(w[0].message))
示例#7
0
    def test_get_device_warning(self):
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always')
            cuda.get_device(cuda.cupy.array([1]))

        assert len(w) == 1
        assert w[0].category is DeprecationWarning
        assert ('get_device is deprecated. Please use get_device_from_id'
                ' or get_device_from_array instead.' in str(w[0].message))
示例#8
0
    def test_get_device_warning(self):
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always')
            cuda.get_device(cuda.cupy.array([1]))

        self.assertEqual(len(w), 1)
        self.assertIs(w[0].category, DeprecationWarning)
        self.assertIn(
            'get_device is deprecated. Please use get_device_from_id'
            ' or get_device_from_array instead.', str(w[0].message))
示例#9
0
    def test_get_device_warning(self):
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always')
            cuda.get_device(cuda.cupy.array([1]))

        self.assertEqual(len(w), 1)
        self.assertIs(w[0].category, DeprecationWarning)
        self.assertIn(
            'get_device is deprecated. Please use get_device_from_id'
            ' or get_device_from_array instead.', str(w[0].message))
示例#10
0
def main():
    xp = np
    using_gpu = args.gpu_device >= 0
    if using_gpu:
        cuda.get_device(args.gpu_device).use()
        xp = cupy

    hyperparams = Hyperparameters(args.snapshot_path)
    hyperparams.print()

    num_bins_x = 2.0**hyperparams.num_bits_x

    assert args.dataset_format in ["png", "npy"]

    files = Path(args.dataset_path).glob("*.{}".format(args.dataset_format))
    if args.dataset_format == "png":
        images = []
        for filepath in files:
            image = np.array(Image.open(filepath)).astype("float32")
            image = preprocess(image, hyperparams.num_bits_x)
            images.append(image)
        assert len(images) > 0
        images = np.asanyarray(images)
    elif args.dataset_format == "npy":
        images = []
        for filepath in files:
            array = np.load(filepath).astype("float32")
            array = preprocess(array, hyperparams.num_bits_x)
            images.append(array)
        assert len(images) > 0
        num_files = len(images)
        images = np.asanyarray(images)
        images = images.reshape((num_files * images.shape[1], ) +
                                images.shape[2:])
    else:
        raise NotImplementedError

    dataset = glow.dataset.Dataset(images)
    iterator = glow.dataset.Iterator(dataset, batch_size=1)

    print(tabulate([["#image", len(dataset)]]))

    encoder = Glow(hyperparams, hdf5_path=args.snapshot_path)
    if using_gpu:
        encoder.to_gpu()

    with chainer.no_backprop_mode() and encoder.reverse() as decoder:
        for data_indices in iterator:
            print("data:", data_indices)
            x = to_gpu(dataset[data_indices])
            x += xp.random.uniform(0, 1.0 / num_bins_x, size=x.shape)
            factorized_z_distribution, _ = encoder.forward_step(x)

            for (_, mean, ln_var) in factorized_z_distribution:
                print(xp.mean(mean.data), xp.mean(xp.exp(ln_var.data)))
示例#11
0
def main():
    xp = np
    using_gpu = args.gpu_device >= 0
    if using_gpu:
        cuda.get_device(args.gpu_device).use()
        xp = cupy

    model1 = get_model(args.snapshot_path_1, using_gpu)
    model2 = get_model(args.snapshot_path_2, using_gpu)
    model3 = get_model(args.snapshot_path_3, using_gpu)

    num_bins_x, hyperparams = model1[1:]

    fig = plt.figure(figsize=(12, 4))
    left = fig.add_subplot(1, 3, 1)
    center = fig.add_subplot(1, 3, 2)
    right = fig.add_subplot(1, 3, 3)

    while True:
        z = xp.random.normal(0,
                             args.temperature,
                             size=(
                                 1,
                                 3,
                             ) + hyperparams.image_size).astype("float32")

        with chainer.no_backprop_mode():
            encoder = model1[0]
            with encoder.reverse() as decoder:
                hyperparams = model1[2]
                x, _ = decoder.reverse_step(z)
                x_img = make_uint8(x.data[0], num_bins_x)
                left.imshow(x_img, interpolation="none")
                left.set_title("#channels = {}".format(
                    hyperparams.nn_hidden_channels))

            encoder = model2[0]
            with encoder.reverse() as decoder:
                hyperparams = model2[2]
                x, _ = decoder.reverse_step(z)
                x_img = make_uint8(x.data[0], num_bins_x)
                center.imshow(x_img, interpolation="none")
                center.set_title("#channels = {}".format(
                    hyperparams.nn_hidden_channels))

            encoder = model3[0]
            with encoder.reverse() as decoder:
                hyperparams = model3[2]
                x, _ = decoder.reverse_step(z)
                x_img = make_uint8(x.data[0], num_bins_x)
                right.imshow(x_img, interpolation="none")
                right.set_title("#channels = {}".format(
                    hyperparams.nn_hidden_channels))

            plt.pause(.01)
示例#12
0
def load_models(args: argparse.Namespace) -> dict[str, chainer.Chain]:
    """Load models using a args config

	Args:
		args (argparse.Namespace): argparse namespace containing config such as the arch and color

	Returns:
		dict[str, chainer.Chain]: Mapping of model names to chainer.Chain models
	"""
    ch = 3 if args.color == "rgb" else 1
    if args.model_dir is None:
        model_dir = THISDIR + f"/models/{args.arch.lower()}"
    else:
        model_dir = args.model_dir

    models = {}
    flag = False
    if args.method == "noise_scale":
        model_name = f"anime_style_noise{args.noise_level}_scale_{args.color}.npz"
        model_path = os.path.join(model_dir, model_name)
        if os.path.exists(model_path):
            models["noise_scale"] = srcnn.archs[args.arch](ch)
            load_npz(model_path, models["noise_scale"])
            alpha_model_name = f"anime_style_scale_{args.color}.npz"
            alpha_model_path = os.path.join(model_dir, alpha_model_name)
            models["alpha"] = srcnn.archs[args.arch](ch)
            load_npz(alpha_model_path, models["alpha"])
        else:
            flag = True
    if args.method == "scale" or flag:
        model_name = f"anime_style_scale_{args.color}.npz"
        model_path = os.path.join(model_dir, model_name)
        models["scale"] = srcnn.archs[args.arch](ch)
        load_npz(model_path, models["scale"])
    if args.method == "noise" or flag:
        model_name = f"anime_style_noise{args.noise_level}_{args.color}.npz"
        model_path = os.path.join(model_dir, model_name)
        if not os.path.exists(model_path):
            model_name = f"anime_style_noise{args.noise_level}_scale_{args.color}.npz"
            model_path = os.path.join(model_dir, model_name)
        models["noise"] = srcnn.archs[args.arch](ch)
        load_npz(model_path, models["noise"])

    if args.gpu >= 0:
        cuda.check_cuda_available()
        cuda.get_device(args.gpu).use()
        for _, model in models.items():
            model.to_gpu()
    return models
示例#13
0
def main():
    os.makedirs(args.log_directory)
    os.makedirs(args.model_directory)

    # GPU usage
    gp = np
    gpu_device = args.gpu_device
    assigned_gpu = gpu_device>=0
    if assigned_gpu:
        cuda.get_device(gpu_device).use()
        gp = cp

    # Dataset
    data_train = read_dataset(args.train_dataset_directory)
    if args.test_dataset_directory is not None:
        data_test = read_dataset(args.test_dataset_directory)
    
    # # Logging
    # csv = data_format
    # csv.load(args.logdirectory)

    # Optimizer
    optimizer = chainer.optimizers.Adam(alpha=0.001, 
                                        beta1=0.9, 
                                        beta2=0.999, 
                                        eps=1e-08, 
                                        eta=1.0, 
                                        weight_decay_rate=0, 
                                        amsgrad=False, 
                                        adabound=False, 
                                        final_lr=0.1, 
                                        gamma=0.001)
    print(optimizer)    

    # Training
    dataset_size = len(data_train)

    for epoch in range(args.epochs):
        print("Epoch: "+str(epoch))

        for subset_index,subset in enumerate(data_train):
            batches = sampler(data, batch_size=args.batch_size)

            for batch_index, data in enumerate(batches):

                images, uncertainties = subset[data]

                z = 
示例#14
0
 def lossfun_multi_leam(self, z, t):
     with cuda.get_device(z):
             z_class = z[-self.n_class:]
             z = z[:-self.n_class]
     t_class = Variable(cp.array(range(self.n_class)))
     loss = - (F.mean(t * F.log(z) + (1-t)*F.log1p(-z))) / z.shape[0] + F.softmax_cross_entropy(z_class, t_class)
     return loss   
示例#15
0
 def print_strings(self, train_mode, epoch, cMBSize, encLen, decLen,
                   start_time, args):
     with cuda.get_device(self.lossVal):
         msg0 = 'Epoch: %3d | LL: %9.6f PPL: %10.4f' % (
             epoch, float(self.lossVal / max(1, self.procTot)),
             math.exp(min(10, float(self.lossVal / max(1, self.procTot)))))
         msg1 = '| gN: %8.4f %8.4f %8.4f' % (self.gnorm, self.gnormLimit,
                                             self.pnorm)
         dt = self.corTot + self.incorTot
         msg2 = '| acc: %6.2f %8d %8d ' % (float(
             100.0 * self.corTot / max(1, dt)), self.corTot, self.incorTot)
         msg3 = '| tot: %8d proc: %8d | num: %8d %6d %6d ' % (
             self.trainsizeTot, self.procTot, self.instanceNum,
             self.encMaxLen, self.decMaxLen)
         msg4 = '| MB: %4d %6d %4d %4d | Time: %10.4f' % (
             cMBSize, self.batchCount, encLen, decLen,
             time.time() - start_time)
         # dev.dataのときは必ず評価,学習データのときはオプションに従う
         if train_mode == 0:
             msgA = '%s %s %s %s' % (msg0, msg2, msg3, msg4)
         # elif args.doEvalAcc > 0:
         msgA = '%s %s %s %s %s' % (msg0, msg1, msg2, msg3, msg4)
         # else:
         #     msgA = '%s %s %s %s' % (msg0, msg1, msg3, msg4)
         return msgA
示例#16
0
def objective(trial, train, test, dir):
    #trialからパラメータを取得
    n_unit6 = trial.suggest_int('n_unit6', 4, 2048)
    n_unit7 = trial.suggest_int('n_unit7', 4, 2048)
    batch_size = trial.suggest_int('batch_size', 2, 128)
    class_labels = 12
    n_epoch = 20
    gpu = 0

    #モデルを定義
    model = VGG(n_units6=n_unit6, n_units7=n_unit7, class_labels=class_labels)
    optimizer = create_optimizer(trial, model)

    cuda.get_device(0).use()
    model.to_gpu(0)

    #trainer周りの設定
    train_iter = iterators.SerialIterator(train, batch_size)
    test_iter = iterators.SerialIterator(test, 1, repeat=False, shuffle=False)

    updater = training.StandardUpdater(train_iter, optimizer, device=gpu)
    trainer = training.Trainer(updater, (n_epoch, 'epoch'), out=dir)

    trainer.extend(integrator)
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.observe_lr())
    trainer.extend(
        extensions.PrintReport([
            'epoch',
            'main/loss',
            'main/accuracy',
            'test/main/loss',
            'test/main/accuracy',
            'elapsed_time',
            'lr',
        ]))
    trainer.extend(extensions.ProgressBar(update_interval=1))
    trainer.extend(extensions.Evaluator(test_iter, model, device=gpu),
                   name='test')

    #学習の実行
    trainer.run()

    # accuracyを評価指標として用いる
    return 1 - trainer.observation['test/main/accuracy']
示例#17
0
def main():
    xp = np
    using_gpu = args.gpu_device >= 0
    if using_gpu:
        cuda.get_device(args.gpu_device).use()
        xp = cupy

    hyperparams = Hyperparameters(args.snapshot_path)
    hyperparams.print()

    num_bins_x = 2.0**hyperparams.num_bits_x

    encoder = Glow(hyperparams, hdf5_path=args.snapshot_path)
    if using_gpu:
        encoder.to_gpu()

    temperatures = [0.0, 0.25, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
    total = len(temperatures)
    fig = plt.figure(figsize=(total * 4, 4))
    subplots = []
    for n in range(total):
        subplot = fig.add_subplot(1, total, n + 1)
        subplots.append(subplot)

    with chainer.no_backprop_mode() and encoder.reverse() as decoder:
        while True:
            z_batch = []
            for temperature in temperatures:
                z = np.random.normal(0,
                                     temperature,
                                     size=(3, ) +
                                     hyperparams.image_size).astype("float32")
                z_batch.append(z)
            z_batch = np.asanyarray(z_batch)
            if using_gpu:
                z_batch = cuda.to_gpu(z_batch)
            x, _ = decoder.reverse_step(z_batch)
            for n, (temperature,
                    subplot) in enumerate(zip(temperatures, subplots)):
                x_img = make_uint8(x.data[n], num_bins_x)
                # x_img = np.broadcast_to(x_img, (28, 28, 3))
                subplot.imshow(x_img, interpolation="none")
                subplot.set_title("temperature={}".format(temperature))
            plt.pause(.01)
示例#18
0
    def __init__(self,
                 gpuid=-1,
                 alpha=0.5,
                 gamma=0.95,
                 train=True,
                 backprop=True):
        self.timing = brica.Timing(5, 1, 0)
        self.agent = self._set_agent(gpuid=gpuid)
        self.reward = 0
        self.time = 0
        if gpuid < 0:
            self.xp = numpy
        else:
            print("Use GPU")
            cuda.get_device(gpuid).use()
            self.xp = cuda.cupy

        chainer.config.train = train
        chainer.config.enable_backprop = backprop
def objective(trial):

    # Create model instance
    model = TripletClassifier(
        MyNeuralNetwork(n_mid_units=mid_size, n_out=out_size))
    optimizer = create_optimizer(trial, model)
    batchsize = trial.suggest_int('batchsize', 10, len(y))
    epoch = trial.suggest_int('epoch', 10, 50)

    # Assign GPU or CPU to the model
    if gpu_id >= 0:
        cuda.get_device(gpu_id).use()
        model.to_gpu(gpu_id)

    # Define Iterator
    train_set = datasets.TupleDataset(train_triplet, train_label)
    test_set = datasets.TupleDataset(test_triplet, test_label)
    train_iter = iterators.SerialIterator(train_set, batchsize)
    test_iter = iterators.SerialIterator(test_set,
                                         batchsize,
                                         repeat=False,
                                         shuffle=False)

    # Define Trainer
    updater = chainer.training.StandardUpdater(train_iter, optimizer)
    trainer = chainer.training.Trainer(updater, (epoch, 'epoch'))
    trainer.extend(chainer.training.extensions.Evaluator(test_iter, model))
    log_report_extension = chainer.training.extensions.LogReport(log_name=None)
    trainer.extend(
        chainer.training.extensions.PrintReport([
            'epoch', 'main/squared_error', 'validation/main/squared_error',
            'main/abs_error', 'validation/main/abs_error', 'elapsed_time'
        ]))
    trainer.extend(log_report_extension)

    trainer.run()

    log_last = log_report_extension.log[-1]
    for key, value in log_last.items():
        trial.set_user_attr(key, value)

    val_err = log_report_extension.log[-1]['validation/main/squared_error']
    return val_err
示例#20
0
    def __call__(self, x, finetune=False):
        if self.gamma is not None:
            gamma = self.gamma
        else:
            with cuda.get_device(self._device_id):
                gamma = variable.Variable(
                    self.xp.ones(self.avg_mean.shape, dtype=x.dtype))

        if self.beta is not None:
            beta = self.beta
        else:
            with cuda.get_device(self._device_id):
                beta = variable.Variable(
                    self.xp.zeros(self.avg_mean.shape, dtype=x.dtype))

        if configuration.config.train:
            if finetune:
                self.N += 1
                decay = 1. - 1. / self.N
            else:
                decay = self.decay

            func = batch_renormalization.BatchRenormalizationFunction(
                self.eps, self.avg_mean, self.avg_var, decay, self.rmax,
                self.dmax, self.freeze_running_statistics)
            if self.freeze_running_statistics:
                func.r = self.r
                func.d = self.d
            ret = func(x, gamma, beta)
            if self.freeze_running_statistics and self.r is None:
                self.r = func.r
                self.d = func.d

            self.avg_mean[:] = func.running_mean
            self.avg_var[:] = func.running_var
        else:
            # Use running average statistics or fine-tuned statistics.
            mean = variable.Variable(self.avg_mean)
            var = variable.Variable(self.avg_var)
            ret = batch_renormalization.fixed_batch_renormalization(
                x, gamma, beta, mean, var, self.eps)
        return ret
示例#21
0
    def __call__(self, x, finetune=False):
        if self.gamma is not None:
            gamma = self.gamma
        else:
            with cuda.get_device(self._device_id):
                gamma = variable.Variable(self.xp.ones(
                    self.avg_mean.shape, dtype=x.dtype))

        if self.beta is not None:
            beta = self.beta
        else:
            with cuda.get_device(self._device_id):
                beta = variable.Variable(self.xp.zeros(
                    self.avg_mean.shape, dtype=x.dtype))

        if configuration.config.train:
            if finetune:
                self.N += 1
                decay = 1. - 1. / self.N
            else:
                decay = self.decay

            func = batch_renormalization.BatchRenormalizationFunction(
                self.eps, self.avg_mean, self.avg_var, decay,
                self.rmax, self.dmax, self.freeze_running_statistics)
            if self.freeze_running_statistics:
                func.r = self.r
                func.d = self.d
            ret = func(x, gamma, beta)
            if self.freeze_running_statistics and self.r is None:
                self.r = func.r
                self.d = func.d

            self.avg_mean[:] = func.running_mean
            self.avg_var[:] = func.running_var
        else:
            # Use running average statistics or fine-tuned statistics.
            mean = variable.Variable(self.avg_mean)
            var = variable.Variable(self.avg_var)
            ret = batch_renormalization.fixed_batch_renormalization(
                x, gamma, beta, mean, var, self.eps)
        return ret
示例#22
0
 def auc_fun_leam(self, z, t):
     aucs = []
     with cuda.get_device(z):
         z = z[:-self.n_class]
     t = np.array(chainer.cuda.to_cpu(t))
     z = chainer.cuda.to_cpu(z.data)
     for i in range(6):
         fpr, tpr, thresholds = metrics.roc_curve(t[::,i], z[::,i]) 
         aucs.append(metrics.auc(fpr, tpr))
     print(aucs)   
     return Variable(np.array(sum(aucs)/len(aucs)))
示例#23
0
    def auc_fun_cnn(z, t):
        aucs = []
        with cuda.get_device(z):
            z = z
        t = chainer.cuda.to_cpu(t)
        z = chainer.cuda.to_cpu(z.data)

        for i in range(6):
            fpr, tpr, thresholds = metrics.roc_curve(t[::,i], z[::,i]) 
            aucs.append(metrics.auc(fpr, tpr))

        return Variable(np.array(sum(aucs)/len(aucs)))
示例#24
0
    def update_core(self):
        # get_optimizer mehtod allows to get optimizer
        gen_optimizer = self.get_optimizer("gen")
        critic_optimizer = self.get_optimizer("critic")

        # obtain batch data
        # get_iterator("main") is SerialIterator so next() returns next minibatch
        batch = self.get_iterator("main").next()
        batch_size = len(batch)

        # start with the critic at optimum even in the first iterations.
        if self.iteration < 25 or self.iteration % 500 == 0:
            n_critic = 100
        else:
            n_critic = self.n_critic

        # optimize Critic
        for _ in range(n_critic):
            # prepare the minibatch real data
            x_real = self.converter(
                batch, self.device
            )  # self.converter() is concat_example() また self.deviceでデータをgpuに送る
            x_real = Variable(x_real)
            x_real = (x_real - 127.5) / 127.5  # normalize image data

            # prepare the minibatch fake data
            z = Variable(self.xp.asarray(
                self.gen.make_hidden(batch_size)))  # genertate z random vector

            # inference
            y_real = self.critic(x_real)  # Genuine image estimation result
            y_fake = self.critic(self.gen(z))

            # backward
            critic_optimizer.update(self.loss_critic, y_fake, y_real)

            # clip w
            for param in self.critic.params():
                if param.data is None:
                    continue
                with cuda.get_device(param.data):
                    xp = cuda.get_array_module(param.data)
                    param.data = xp.clip(param.data, self.clip_lower,
                                         self.clip_upper)

        # optimize generator
        z = Variable(self.xp.asarray(
            self.gen.make_hidden(batch_size)))  # genertate z random vector
        y_fake = self.critic(self.gen(z))  # Estimation result of fake image

        # backward
        gen_optimizer.update(self.loss_gen, y_fake)
示例#25
0
 def printAllParameters(self, optimizer, init_type="***", init_scale=1.0):
     total_norm = 0
     total_param = 0
     named_params = sorted(optimizer.target.namedparams(),
                           key=lambda x: x[0])
     for n, p in named_params:
         t_norm = chainer.optimizer_hooks.gradient_clipping._sum_sqnorm(
             p.data)
         sys.stdout.write('### {} {} {} {} {}\n'.format(
             p.name, p.data.ndim, p.data.shape, p.data.size, t_norm))
         total_norm += t_norm
         total_param += p.data.size
     with cuda.get_device(total_norm):
         sys.stdout.write(
             '# param size= [{}] norm = [{}] scale=[{}, {}]\n'.format(
                 total_param, self.model.xp.sqrt(total_norm), init_type,
                 init_scale))
def lossfun(x, t):
    with cuda.get_device(t):
        soft20_x = x[:, :20]
        soft100_x = x[:, 20:120]
        soft200_x = x[:, 120:320]
        soft20_label = t[:, 1:21]
        soft100_label = t[:, 21:121]
        soft200_label = t[:, 121:321]

    soft20_loss = -(F.sum(
        soft20_label * F.log_softmax(soft20_x))) / soft20_x.shape[0]
    soft100_loss = -(F.sum(
        soft100_label * F.log_softmax(soft100_x))) / soft100_x.shape[0]
    soft200_loss = -(F.sum(
        soft200_label * F.log_softmax(soft200_x))) / soft200_x.shape[0]
    loss = soft20_loss + soft100_loss + soft200_loss
    return loss
示例#27
0
def main():
    try:
        os.makedirs(args.figure_directory)
    except:
        pass

    #==============================================================================
    # Utilities
    #==============================================================================
    def read_files(directory):
        filenames = []
        files = os.listdir(directory)
        # ipdb.set_trace()
        for filename in files:
            if filename.endswith(".h5"):
                filenames.append(filename)
        filenames.sort()

        dataset_images = []
        dataset_viewpoints = []
        for i in range(len(filenames)):
            F = h5py.File(os.path.join(directory, filenames[i]))
            tmp_images = list(F["images"])
            tmp_viewpoints = list(F["viewpoints"])

            dataset_images.extend(tmp_images)
            dataset_viewpoints.extend(tmp_viewpoints)

        # for i in range(len(filenames)):
        #     images_npy_path = os.path.join(directory, "images", filenames[i])
        #     viewpoints_npy_path = os.path.join(directory, "viewpoints", filenames[i])
        #     tmp_images = np.load(images_npy_path)
        #     tmp_viewpoints = np.load(viewpoints_npy_path)

        #     assert tmp_images.shape[0] == tmp_viewpoints.shape[0]

        #     dataset_images.extend(tmp_images)
        #     dataset_viewpoints.extend(tmp_viewpoints)
        dataset_images = np.array(dataset_images)
        dataset_viewpoints = np.array(dataset_viewpoints)

        dataset = list()
        for i in range(len(dataset_images)):
            item = {
                'image': dataset_images[i],
                'viewpoint': dataset_viewpoints[i]
            }
            dataset.append(item)

        return dataset

    def to_device(array):
        # if using_gpu:
        array = cuda.to_gpu(array)
        return array

    def fill_observations_axis(observation_images):
        axis_observations_image = np.full(
            (3, image_shape[1], total_observations_per_scene * image_shape[2]),
            black_color,
            dtype=np.float32)
        num_current_obs = len(observation_images)
        total_obs = total_observations_per_scene
        width = image_shape[2]
        x_start = width * (total_obs - num_current_obs) // 2
        for obs_image in observation_images:
            x_end = x_start + width
            axis_observations_image[:, :, x_start:x_end] = obs_image
            x_start += width
        return axis_observations_image

    def compute_camera_angle_at_frame(t):
        return t * 2 * math.pi / (fps * 2)

    def rotate_query_viewpoint(horizontal_angle_rad, camera_distance,
                               camera_position_y):
        camera_position = np.array([
            camera_distance * math.sin(horizontal_angle_rad),  # x
            camera_position_y,
            camera_distance * math.cos(horizontal_angle_rad),  # z
        ])
        center = np.array((0, camera_position_y, 0))
        camera_direction = camera_position - center
        yaw, pitch = compute_yaw_and_pitch(camera_direction)

        query_viewpoints = xp.array(
            (
                camera_position[0],
                camera_position[1],
                camera_position[2],
                math.cos(yaw),
                math.sin(yaw),
                math.cos(pitch),
                math.sin(pitch),
            ),
            dtype=np.float32,
        )
        query_viewpoints = xp.broadcast_to(query_viewpoints,
                                           (1, ) + query_viewpoints.shape)

        return query_viewpoints

    def render(representation,
               camera_distance,
               camera_position_y,
               total_frames,
               animation_frame_array,
               rotate_camera=True):

        # viewpoint_file = open('viewpoints.txt','w')
        for t in range(0, total_frames):
            artist_array = [
                axis_observations.imshow(cv2.cvtColor(
                    make_uint8(axis_observations_image), cv2.COLOR_BGR2RGB),
                                         interpolation="none",
                                         animated=True)
            ]

            horizontal_angle_rad = compute_camera_angle_at_frame(t)
            if rotate_camera == False:
                horizontal_angle_rad = compute_camera_angle_at_frame(0)

            query_viewpoints = rotate_query_viewpoint(horizontal_angle_rad,
                                                      camera_distance,
                                                      camera_position_y)

            generated_images = model.generate_image(query_viewpoints,
                                                    representation)[0]
            generated_images = chainer.backends.cuda.to_cpu(generated_images)
            generated_images = make_uint8(generated_images)
            generated_images = cv2.cvtColor(generated_images,
                                            cv2.COLOR_BGR2RGB)

            artist_array.append(
                axis_generation.imshow(generated_images,
                                       interpolation="none",
                                       animated=True))

            animation_frame_array.append(artist_array)

    def render_wVar(representation,
                    camera_distance,
                    camera_position_y,
                    total_frames,
                    animation_frame_array,
                    no_of_samples,
                    rotate_camera=True,
                    wVariance=True):

        # highest_var = 0.0
        # with open("queries.txt",'w') as file_wviews, open("variance.txt",'w') as file_wvar:
        for t in range(0, total_frames):
            artist_array = [
                axis_observations.imshow(cv2.cvtColor(
                    make_uint8(axis_observations_image), cv2.COLOR_BGR2RGB),
                                         interpolation="none",
                                         animated=True)
            ]

            horizontal_angle_rad = compute_camera_angle_at_frame(t)
            if rotate_camera == False:
                horizontal_angle_rad = compute_camera_angle_at_frame(0)

            query_viewpoints = rotate_query_viewpoint(horizontal_angle_rad,
                                                      camera_distance,
                                                      camera_position_y)

            # q_x, q_y, q_z, _, _, _, _ = query_viewpoints[0]

            # file_wviews.writelines("".join(str(q_x))+", "+
            #                         "".join(str(q_y))+", "+
            #                         "".join(str(q_z))+"\n")

            generated_images = cp.squeeze(
                cp.array(
                    model.generate_images(query_viewpoints, representation,
                                          no_of_samples)))
            # ipdb.set_trace()
            var_image = cp.var(generated_images, axis=0)
            mean_image = cp.mean(generated_images, axis=0)
            mean_image = make_uint8(
                np.squeeze(chainer.backends.cuda.to_cpu(mean_image)))
            mean_image_rgb = cv2.cvtColor(mean_image, cv2.COLOR_BGR2RGB)

            var_image = chainer.backends.cuda.to_cpu(var_image)

            # grayscale
            r, g, b = var_image
            gray_var_image = 0.2989 * r + 0.5870 * g + 0.1140 * b
            # thresholding Otsu's method
            # thresh = threshold_otsu(gray_var_image)
            # var_binary = gray_var_image > thresh

            ## hill climb algorthm for searching highest variance
            # cur_var = np.mean(gray_var_image)
            # if cur_var>highest_var:
            #     highest_var = cur_var

            #     if wVariance==True:
            #         print('highest variance: '+str(highest_var)+', viewpoint: '+str(query_viewpoints[0]))
            #         highest_var_vp = query_viewpoints[0]
            #         file_wvar.writelines('highest variance: '+str(highest_var)+', viewpoint: '+str(highest_var_vp)+'\n')
            #     else:
            #         pass

            artist_array.append(
                axis_generation_var.imshow(gray_var_image,
                                           cmap=plt.cm.gray,
                                           interpolation="none",
                                           animated=True))

            artist_array.append(
                axis_generation_mean.imshow(mean_image_rgb,
                                            interpolation="none",
                                            animated=True))

            animation_frame_array.append(artist_array)

            # if wVariance==True:
            #     print('final highest variance: '+str(highest_var)+', viewpoint: '+str(highest_var_vp))
            #     file_wvar.writelines('final highest variance: '+str(highest_var)+', viewpoint: '+str(highest_var_vp)+'\n')
            # else:
            #     pass

        # file_wviews.close()
        # file_wvar.close()

    # loading dataset & model
    cuda.get_device(args.gpu_device).use()
    xp = cp

    hyperparams = HyperParameters()
    assert hyperparams.load(args.snapshot_directory)

    model = Model(hyperparams)
    chainer.serializers.load_hdf5(args.snapshot_file, model)
    model.to_gpu()

    total_observations_per_scene = 4
    fps = 30

    black_color = -0.5
    image_shape = (3, ) + hyperparams.image_size
    axis_observations_image = np.zeros(
        (3, image_shape[1], total_observations_per_scene * image_shape[2]),
        dtype=np.float32)

    #==============================================================================
    # Visualization
    #==============================================================================
    plt.style.use("dark_background")
    fig = plt.figure(figsize=(6, 7))
    plt.subplots_adjust(left=0.1, right=0.95, bottom=0.1, top=0.95)
    # fig.suptitle("GQN")
    axis_observations = fig.add_subplot(2, 1, 1)
    axis_observations.axis("off")
    axis_observations.set_title("observations")
    axis_generation = fig.add_subplot(2, 1, 2)
    axis_generation.axis("off")
    axis_generation.set_title("Rendered Predictions")
    axis_generation_var = fig.add_subplot(2, 2, 3)
    axis_generation_var.axis("off")
    axis_generation_var.set_title("Variance Render")
    axis_generation_mean = fig.add_subplot(2, 2, 4)
    axis_generation_mean.axis("off")
    axis_generation_mean.set_title("Mean Render")

    # iterator
    dataset = read_files(args.dataset_directory)
    file_number = 1
    with chainer.no_backprop_mode():

        iterator = chainer.iterators.SerialIterator(dataset, batch_size=1)
        # ipdb.set_trace()
        for i in tqdm(range(len(iterator.dataset))):
            animation_frame_array = []
            images, viewpoints = np.array([
                iterator.dataset[i]["image"]
            ]), np.array([iterator.dataset[i]["viewpoint"]])

            camera_distance = np.mean(
                np.linalg.norm(viewpoints[:, :, :3], axis=2))
            camera_position_y = np.mean(viewpoints[:, :, 1])

            images = images.transpose((0, 1, 4, 2, 3)).astype(np.float32)
            images = preprocess_images(images)

            batch_index = 0

            total_views = images.shape[1]
            random_observation_view_indices = list(range(total_views))
            random.shuffle(random_observation_view_indices)
            random_observation_view_indices = random_observation_view_indices[:
                                                                              total_observations_per_scene]
            observed_images = images[batch_index,
                                     random_observation_view_indices]
            observed_viewpoints = viewpoints[batch_index,
                                             random_observation_view_indices]

            observed_images = to_device(observed_images)
            observed_viewpoints = to_device(observed_viewpoints)

            # Scene encoder
            representation = model.compute_observation_representation(
                observed_images[None, :1], observed_viewpoints[None, :1])

            # Update figure
            observation_index = random_observation_view_indices[0]
            observed_image = images[batch_index, observation_index]
            axis_observations_image = fill_observations_axis([observed_image])

            # Neural rendering
            # render(representation, camera_distance, camera_position_y,
            #         fps * 2, animation_frame_array)
            render_wVar(representation, camera_distance, camera_position_y,
                        fps * 2, animation_frame_array, 100)

            for n in range(total_observations_per_scene):
                observation_indices = random_observation_view_indices[:n + 1]
                axis_observations_image = fill_observations_axis(
                    images[batch_index, observation_indices])

                # Scene encoder
                representation = model.compute_observation_representation(
                    observed_images[None, :n + 1],
                    observed_viewpoints[None, :n + 1])
                # Neural rendering
                # render(representation, camera_distance, camera_position_y,
                #     fps // 2, animation_frame_array,rotate_camera=False)
                render_wVar(representation,
                            camera_distance,
                            camera_position_y,
                            fps // 2,
                            animation_frame_array,
                            100,
                            rotate_camera=False,
                            wVariance=False)

            # Scene encoder with all given observations
            representation = model.compute_observation_representation(
                observed_images[None, :total_observations_per_scene + 1],
                observed_viewpoints[None, :total_observations_per_scene + 1])

            # Neural rendering
            # render(representation, camera_distance, camera_position_y,
            #         fps * 6, animation_frame_array)
            render_wVar(representation, camera_distance, camera_position_y,
                        fps * 6, animation_frame_array, 100)

            anim = animation.ArtistAnimation(
                fig,
                animation_frame_array,
                interval=1 / fps,  # originally 1/fps
                blit=True,
                repeat_delay=0)

            anim.save("{}/observations_{}.gif".format(args.figure_directory,
                                                      file_number),
                      writer="imagemagick",
                      fps=10)
            # ipdb.set_trace()
            # anim.save(
            #     "{}/rooms_ring_camera_observations_{}.mp4".format(
            #         args.figure_directory, file_number),
            #     writer='ffmpeg',
            #     fps=10)

            file_number += 1
示例#28
0
        paths = []
        for j in range(0, G):
            pathsAndLabels.append(np.asarray([os.path.join(os.path.join(train_dir, 'image3'), 'stage'+str(j)+'\\'), j]))

        for j in range(0, G):
            paths.append(np.asarray([os.path.join(os.path.join(test_dir, 'image2'), 'stage'+str(j)+'\\'), j]))


        (x_train, t_train), correct_train_label = image2Train(pathsAndLabels)
        (x_test, t_test), correct_test_label = image2Test(paths)


        model = CNN()#モデルの選択

        #GPUの設定
        cuda.get_device(0).use()
        model.to_gpu(0)

        optimizer = optimizers.Adam() # 最適化関数
        optimizer.setup(model)

        #ラベルごとのトータル誤判別数
        miss_train_total = np.zeros(G)
        miss_test_total = np.zeros(G)

        #ラベルごとのトータル正解数
        correct_train_total = np.zeros(G)
        correct_test_total = np.zeros(G)

        #何を何と判別したか2次元配列
        miss_train_judge = np.zeros([G,G])
示例#29
0
def main():
    try:
        os.mkdir(args.snapshot_directory)
    except:
        pass

    np.random.seed(0)

    xp = np
    device_gpu = args.gpu_device
    device_cpu = -1
    using_gpu = device_gpu >= 0
    if using_gpu:
        cuda.get_device(args.gpu_device).use()
        xp = cupy

    dataset = gqn.data.Dataset(args.dataset_directory)

    hyperparams = HyperParameters()
    hyperparams.generator_share_core = args.generator_share_core
    hyperparams.generator_share_prior = args.generator_share_prior
    hyperparams.generator_generation_steps = args.generation_steps
    hyperparams.generator_share_upsampler = args.generator_share_upsampler
    hyperparams.inference_share_core = args.inference_share_core
    hyperparams.inference_share_posterior = args.inference_share_posterior
    hyperparams.h_channels = args.h_channels
    hyperparams.z_channels = args.z_channels
    hyperparams.u_channels = args.u_channels
    hyperparams.image_size = (args.image_size, args.image_size)
    hyperparams.representation_channels = args.representation_channels
    hyperparams.representation_architecture = args.representation_architecture
    hyperparams.pixel_n = args.pixel_n
    hyperparams.pixel_sigma_i = args.initial_pixel_variance
    hyperparams.pixel_sigma_f = args.final_pixel_variance
    hyperparams.save(args.snapshot_directory)
    print(hyperparams)

    model = Model(hyperparams,
                  snapshot_directory=args.snapshot_directory,
                  optimized=args.optimized)
    if using_gpu:
        model.to_gpu()

    scheduler = Scheduler(sigma_start=args.initial_pixel_variance,
                          sigma_end=args.final_pixel_variance,
                          final_num_updates=args.pixel_n,
                          snapshot_directory=args.snapshot_directory)
    print(scheduler)

    optimizer = AdamOptimizer(model.parameters,
                              mu_i=args.initial_lr,
                              mu_f=args.final_lr,
                              initial_training_step=scheduler.num_updates)
    print(optimizer)

    pixel_var = xp.full((args.batch_size, 3) + hyperparams.image_size,
                        scheduler.pixel_variance**2,
                        dtype="float32")
    pixel_ln_var = xp.full((args.batch_size, 3) + hyperparams.image_size,
                           math.log(scheduler.pixel_variance**2),
                           dtype="float32")

    representation_shape = (args.batch_size,
                            hyperparams.representation_channels,
                            args.image_size // 4, args.image_size // 4)

    fig = plt.figure(figsize=(9, 3))
    axis_data = fig.add_subplot(1, 3, 1)
    axis_data.set_title("Data")
    axis_data.axis("off")
    axis_reconstruction = fig.add_subplot(1, 3, 2)
    axis_reconstruction.set_title("Reconstruction")
    axis_reconstruction.axis("off")
    axis_generation = fig.add_subplot(1, 3, 3)
    axis_generation.set_title("Generation")
    axis_generation.axis("off")

    current_training_step = 0
    for iteration in range(args.training_iterations):
        mean_kld = 0
        mean_nll = 0
        mean_mse = 0
        mean_elbo = 0
        total_num_batch = 0
        start_time = time.time()

        for subset_index, subset in enumerate(dataset):
            iterator = gqn.data.Iterator(subset, batch_size=args.batch_size)

            for batch_index, data_indices in enumerate(iterator):
                # shape: (batch, views, height, width, channels)
                # range: [-1, 1]
                images, viewpoints = subset[data_indices]

                # (batch, views, height, width, channels) -> (batch, views, channels, height, width)
                images = images.transpose((0, 1, 4, 2, 3)).astype(np.float32)

                total_views = images.shape[1]

                # Sample number of views
                num_views = random.choice(range(1, total_views + 1))
                observation_view_indices = list(range(total_views))
                random.shuffle(observation_view_indices)
                observation_view_indices = observation_view_indices[:num_views]
                query_index = random.choice(range(total_views))

                if num_views > 0:
                    observation_images = preprocess_images(
                        images[:, observation_view_indices])
                    observation_query = viewpoints[:, observation_view_indices]
                    representation = model.compute_observation_representation(
                        observation_images, observation_query)
                else:
                    representation = xp.zeros(representation_shape,
                                              dtype="float32")
                    representation = chainer.Variable(representation)

                # Sample query
                query_index = random.choice(range(total_views))
                query_images = preprocess_images(images[:, query_index])
                query_viewpoints = viewpoints[:, query_index]

                # Transfer to gpu if necessary
                query_images = to_device(query_images, device_gpu)
                query_viewpoints = to_device(query_viewpoints, device_gpu)

                z_t_param_array, mean_x = model.sample_z_and_x_params_from_posterior(
                    query_images, query_viewpoints, representation)

                # Compute loss
                ## KL Divergence
                loss_kld = 0
                for params in z_t_param_array:
                    mean_z_q, ln_var_z_q, mean_z_p, ln_var_z_p = params
                    kld = gqn.functions.gaussian_kl_divergence(
                        mean_z_q, ln_var_z_q, mean_z_p, ln_var_z_p)
                    loss_kld += cf.sum(kld)

                ## Negative log-likelihood of generated image
                loss_nll = cf.sum(
                    gqn.functions.gaussian_negative_log_likelihood(
                        query_images, mean_x, pixel_var, pixel_ln_var))

                # Calculate the average loss value
                loss_nll = loss_nll / args.batch_size
                loss_kld = loss_kld / args.batch_size

                loss = loss_nll / scheduler.pixel_variance + loss_kld

                model.cleargrads()
                loss.backward()
                optimizer.update(current_training_step)

                loss_nll = float(loss_nll.data) + math.log(256.0)
                loss_kld = float(loss_kld.data)

                elbo = -(loss_nll + loss_kld)

                loss_mse = float(
                    cf.mean_squared_error(query_images, mean_x).data)

                printr(
                    "Iteration {}: Subset {} / {}: Batch {} / {} - elbo: {:.2f} - loss: nll: {:.2f} mse: {:.6e} kld: {:.5f} - lr: {:.4e} - pixel_variance: {:.5f} - step: {}  "
                    .format(iteration + 1,
                            subset_index + 1, len(dataset), batch_index + 1,
                            len(iterator), elbo, loss_nll, loss_mse, loss_kld,
                            optimizer.learning_rate, scheduler.pixel_variance,
                            current_training_step))

                scheduler.step(iteration, current_training_step)
                pixel_var[...] = scheduler.pixel_variance**2
                pixel_ln_var[...] = math.log(scheduler.pixel_variance**2)

                total_num_batch += 1
                current_training_step += 1
                mean_kld += loss_kld
                mean_nll += loss_nll
                mean_mse += loss_mse
                mean_elbo += elbo

            model.serialize(args.snapshot_directory)

            # Visualize
            if args.with_visualization:
                axis_data.imshow(make_uint8(query_images[0]),
                                 interpolation="none")
                axis_reconstruction.imshow(make_uint8(mean_x.data[0]),
                                           interpolation="none")

                with chainer.no_backprop_mode():
                    generated_x = model.generate_image(
                        query_viewpoints[None, 0], representation[None, 0])
                    axis_generation.imshow(make_uint8(generated_x[0]),
                                           interpolation="none")
                plt.pause(1e-8)

        elapsed_time = time.time() - start_time
        print(
            "\033[2KIteration {} - elbo: {:.2f} - loss: nll: {:.2f} mse: {:.6e} kld: {:.5f} - lr: {:.4e} - pixel_variance: {:.5f} - step: {} - time: {:.3f} min"
            .format(iteration + 1, mean_elbo / total_num_batch,
                    mean_nll / total_num_batch, mean_mse / total_num_batch,
                    mean_kld / total_num_batch, optimizer.learning_rate,
                    scheduler.pixel_variance, current_training_step,
                    elapsed_time / 60))
        model.serialize(args.snapshot_directory)
示例#30
0
文件: train.py 项目: tatHi/pyner
def run_training(config: str, device: int, seed: int):
    configs = ConfigParser.parse(config)
    params = yaml.load(open(config, encoding="utf-8"))

    if device >= 0:
        cuda.get_device(device).use()

    set_seed(seed, device)

    vocab = Vocabulary.prepare(configs)
    num_word_vocab = max(vocab.dictionaries["word2idx"].values()) + 1
    num_char_vocab = max(vocab.dictionaries["char2idx"].values()) + 1
    num_tag_vocab = max(vocab.dictionaries["tag2idx"].values()) + 1

    model = BiLSTM_CRF(configs, num_word_vocab, num_char_vocab, num_tag_vocab)

    transformer = DatasetTransformer(vocab)
    transform = transformer.transform

    external_configs = configs["external"]
    if "word_vector" in external_configs:
        syn0 = model.embed_word.W.data
        _, word_dim = syn0.shape
        pre_word_dim = vocab.gensim_model.vector_size
        if word_dim != pre_word_dim:
            msg = "Mismatch vector size between model and pre-trained word vectors"  # NOQA
            msg += f"(model: \x1b[31m{word_dim}\x1b[0m"
            msg += f", pre-trained word vector: \x1b[31m{pre_word_dim}\x1b[0m"
            raise Exception(msg)

        word2idx = vocab.dictionaries["word2idx"]
        syn0 = prepare_pretrained_word_vector(word2idx, vocab.gensim_model,
                                              syn0, num_word_vocab)
        model.set_pretrained_word_vectors(syn0)

    train_iterator = create_iterator(vocab, configs, "train", transform)
    valid_iterator = create_iterator(vocab, configs, "valid", transform)
    test_iterator = create_iterator(vocab, configs, "test", transform)

    if device >= 0:
        model.to_gpu(device)

    optimizer = create_optimizer(configs)
    optimizer.setup(model)
    optimizer = add_hooks(optimizer, configs)

    updater = T.StandardUpdater(train_iterator,
                                optimizer,
                                converter=converter,
                                device=device)

    params = configs.export()
    params["num_word_vocab"] = num_word_vocab
    params["num_char_vocab"] = num_char_vocab
    params["num_tag_vocab"] = num_tag_vocab

    epoch = configs["iteration"]["epoch"]
    trigger = (epoch, "epoch")

    model_path = configs["output"]
    timestamp = datetime.datetime.now()
    timestamp_str = timestamp.isoformat()
    output_path = Path(f"{model_path}.{timestamp_str}")

    trainer = T.Trainer(updater, trigger, out=output_path)
    save_args(params, output_path)
    msg = f"Create \x1b[31m{output_path}\x1b[0m for saving model snapshots"
    logging.debug(msg)

    entries = ["epoch", "iteration", "elapsed_time", "lr", "main/loss"]
    entries += ["validation/main/loss", "validation/main/fscore"]
    entries += ["validation_1/main/loss", "validation_1/main/fscore"]

    valid_evaluator = NamedEntityEvaluator(valid_iterator,
                                           model,
                                           transformer.itransform,
                                           converter,
                                           device=device)

    test_evaluator = NamedEntityEvaluator(test_iterator,
                                          model,
                                          transformer.itransform,
                                          converter,
                                          device=device)

    epoch_trigger = (1, "epoch")
    snapshot_filename = "snapshot_epoch_{.updater.epoch:04d}"
    trainer.extend(valid_evaluator, trigger=epoch_trigger)
    trainer.extend(test_evaluator, trigger=epoch_trigger)
    trainer.extend(E.observe_lr(), trigger=epoch_trigger)
    trainer.extend(E.LogReport(trigger=epoch_trigger))
    trainer.extend(E.PrintReport(entries=entries), trigger=epoch_trigger)
    trainer.extend(E.ProgressBar(update_interval=20))
    trainer.extend(E.snapshot_object(model, filename=snapshot_filename),
                   trigger=(1, "epoch"))

    if "learning_rate_decay" in params:
        logger.debug("Enable Learning Rate decay")
        trainer.extend(
            LearningRateDecay("lr", params["learning_rate"],
                              params["learning_rate_decay"]),
            trigger=epoch_trigger,
        )

    trainer.run()
示例#31
0
def main():
    xp = np
    using_gpu = args.gpu_device >= 0
    if using_gpu:
        cuda.get_device(args.gpu_device).use()
        xp = cupy

    hyperparams = Hyperparameters(args.snapshot_path)
    hyperparams.print()

    num_bins_x = 2.0**hyperparams.num_bits_x

    encoder = Glow(hyperparams, hdf5_path=args.snapshot_path)
    if using_gpu:
        encoder.to_gpu()

    total = hyperparams.levels + 1
    fig = plt.figure(figsize=(4 * total, 4))
    subplots = []
    for n in range(total):
        subplot = fig.add_subplot(1, total, n + 1)
        subplots.append(subplot)

    def reverse_step(z, sampling=True):
        if isinstance(z, list):
            factorized_z = z
        else:
            factorized_z = encoder.factor_z(z)

        assert len(factorized_z) == len(encoder.blocks)

        out = None
        sum_logdet = 0

        for block, zi in zip(encoder.blocks[::-1], factorized_z[::-1]):
            out, logdet = block.reverse_step(
                out,
                gaussian_eps=zi,
                squeeze_factor=encoder.hyperparams.squeeze_factor,
                sampling=sampling)
            sum_logdet += logdet

        return out, sum_logdet

    with chainer.no_backprop_mode() and encoder.reverse() as decoder:
        while True:
            base_z = xp.random.normal(0,
                                      args.temperature,
                                      size=(
                                          1,
                                          3,
                                      ) + hyperparams.image_size,
                                      dtype="float32")
            factorized_z = encoder.factor_z(base_z)

            rev_x, _ = decoder.reverse_step(factorized_z)
            rev_x_img = make_uint8(rev_x.data[0], num_bins_x)
            subplots[0].imshow(rev_x_img, interpolation="none")

            z = xp.copy(base_z)
            factorized_z = encoder.factor_z(z)
            for n in range(hyperparams.levels - 1):
                factorized_z[n] = xp.random.normal(0,
                                                   args.temperature,
                                                   size=factorized_z[n].shape,
                                                   dtype="float32")
            rev_x, _ = decoder.reverse_step(factorized_z)
            rev_x_img = make_uint8(rev_x.data[0], num_bins_x)
            subplots[1].imshow(rev_x_img, interpolation="none")

            # for n in range(hyperparams.levels):
            #     z = xp.copy(base_z)
            #     factorized_z = encoder.factor_z(z)
            #     for m in range(n + 1):
            #         factorized_z[m] = xp.random.normal(
            #             0,
            #             args.temperature,
            #             size=factorized_z[m].shape,
            #             dtype="float32")
            #         # factorized_z[m] = xp.zeros_like(factorized_z[m])
            #     out = None
            #     for k, (block, zi) in enumerate(
            #             zip(encoder.blocks[::-1], factorized_z[::-1])):
            #         sampling = False
            #         out, _ = block.reverse_step(
            #             out,
            #             gaussian_eps=zi,
            #             squeeze_factor=encoder.hyperparams.squeeze_factor,
            #             sampling=sampling)
            #     rev_x = out

            #     rev_x_img = make_uint8(rev_x.data[0], num_bins_x)
            #     subplots[n + 1].imshow(rev_x_img, interpolation="none")

            for n in range(hyperparams.levels):
                z = xp.copy(base_z)
                factorized_z = encoder.factor_z(z)
                factorized_z[n] = xp.random.normal(0,
                                                   args.temperature,
                                                   size=factorized_z[n].shape,
                                                   dtype="float32")
                factorized_z[n] = xp.zeros_like(factorized_z[n])
                out = None
                for k, (block, zi) in enumerate(
                        zip(encoder.blocks[::-1], factorized_z[::-1])):
                    sampling = False if k == hyperparams.levels - n - 1 else True
                    out, _ = block.reverse_step(
                        out,
                        gaussian_eps=zi,
                        squeeze_factor=encoder.hyperparams.squeeze_factor,
                        sampling=sampling)
                rev_x = out

                rev_x_img = make_uint8(rev_x.data[0], num_bins_x)
                subplots[n + 1].imshow(rev_x_img, interpolation="none")
            plt.pause(.01)
示例#32
0
def main():
    try:
        os.mkdir(args.output_directory)
    except:
        pass

    xp = np
    using_gpu = args.gpu_device >= 0
    if using_gpu:
        cuda.get_device(args.gpu_device).use()
        xp = cupy

    dataset = gqn.data.Dataset(args.dataset_path)

    models = []
    snapshot_dirs = os.listdir(args.snapshots_dir_path)
    for snapshot_dir in snapshot_dirs:
        snapshot_path = os.path.join(args.snapshots_dir_path, snapshot_dir)
        hyperparams = HyperParameters(snapshot_directory=snapshot_path)
        models.append(Model(hyperparams, snapshot_directory=snapshot_path))

    if using_gpu:
        for model in models:
            model.to_gpu()

    plt.style.use("dark_background")
    fig = plt.figure(figsize=(10, 5))

    num_views_per_scene = 4
    num_generation = 2  # lessened from 4 to 2 (remaining 2 used for original outpu)
    num_original = 2
    total_frames_per_rotation = 24

    image_shape = (3, ) + hyperparams.image_size
    blank_image = make_uint8(np.full(image_shape, 0))
    file_number = 1

    with chainer.no_backprop_mode():
        for subset in dataset:
            iterator = gqn.data.Iterator(subset, batch_size=1)

            for data_indices in iterator:
                snapshot_array = []

                observed_image_array = xp.zeros(
                    (num_views_per_scene, ) + image_shape, dtype=np.float32)
                observed_viewpoint_array = xp.zeros((num_views_per_scene, 7),
                                                    dtype=np.float32)

                # shape: (batch, views, height, width, channels)
                # range: [-1, 1]
                images, viewpoints, original_images = subset[data_indices]

                # (batch, views, height, width, channels) -> (batch, views, channels, height, width)
                images = images.transpose((0, 1, 4, 2, 3)).astype(np.float32)
                images = images / 255.0
                images += np.random.uniform(
                    0, 1.0 / 256.0, size=images.shape).astype(np.float32)

                # (batch, views, height, width, channels) -> (batch, views, channels, height, width)
                original_images = original_images.transpose(
                    (0, 1, 4, 2, 3)).astype(np.float32)
                original_images = original_images / 255.0
                original_images += np.random.uniform(
                    0, 1.0 / 256.0,
                    size=original_images.shape).astype(np.float32)

                batch_index = 0

                # Generate images without observations
                r = xp.zeros((
                    num_generation,
                    hyperparams.representation_channels,
                ) + hyperparams.chrz_size,
                             dtype=np.float32)

                angle_rad = 0
                current_scene_original_images_cpu = original_images[
                    batch_index]
                current_scene_original_images = to_gpu(
                    current_scene_original_images_cpu)

                for i in range(10):
                    gqn.animator.Snapshot.make_graph(
                        id='kl_div_graph_' + str(i),
                        pos=5 + i,
                        graph_type='plot',
                        frame_in_rotation=total_frames_per_rotation,
                        num_of_data_per_graph=num_views_per_scene + 1,
                        trivial_settings={
                            'colors':
                            ['red', 'blue', 'green', 'orange', 'white'],
                            'markers': ['o', 'o', 'o', 'o', 'o'],
                            'noXTicks': True,
                            'noYTicks': True,
                        })

                for t in range(total_frames_per_rotation):
                    grid_master = GridSpec(nrows=4,
                                           ncols=5,
                                           height_ratios=[1, 1, 1, 1])
                    snapshot = gqn.animator.Snapshot(
                        layout_settings={
                            'subplot_count':
                            14,
                            'grid_master':
                            grid_master,
                            'subplots': [{
                                'subplot_id':
                                i + 1,
                                'subplot':
                                GridSpecFromSubplotSpec(
                                    nrows=2,
                                    ncols=2,
                                    subplot_spec=grid_master[i * 2:i * 2 + 2,
                                                             0:1])
                            } for i in range(2)] + [{
                                'subplot_id':
                                i + 3,
                                'subplot':
                                GridSpecFromSubplotSpec(
                                    nrows=1,
                                    ncols=1,
                                    subplot_spec=grid_master[i, 2])
                            } for i in range(2)] + [{
                                'subplot_id':
                                i + 5,
                                'subplot':
                                GridSpecFromSubplotSpec(
                                    nrows=1,
                                    ncols=1,
                                    subplot_spec=grid_master[i // 2,
                                                             3 + i % 2])
                            } for i in range(4)] + [{
                                'subplot_id':
                                i + 9,
                                'subplot':
                                GridSpecFromSubplotSpec(
                                    nrows=1,
                                    ncols=1,
                                    subplot_spec=grid_master[2 + i // 3,
                                                             2 + i % 3])
                            } for i in range(6)]
                        })

                    query_viewpoints = rotate_query_viewpoint(
                        angle_rad, num_generation, xp)
                    generated_images_list = []
                    for model in models:
                        generated_images_list.append(
                            model.generate_image(query_viewpoints, r, xp))

                    sq_d_list = []
                    for generated_images in generated_images_list:
                        sq_d_list.append(
                            gqn.math.get_squared_distance(
                                to_cpu(current_scene_original_images[t]),
                                to_cpu(generated_images[0]))[0])

                    snapshot.add_media(media_position=1,
                                       media_type='image',
                                       media_data=make_uint8(
                                           generated_images[0]))
                    snapshot.add_title(target_media_pos=1, text='Generated')

                    snapshot.add_media(media_position=2,
                                       media_type='image',
                                       media_data=make_uint8(
                                           current_scene_original_images[t]))
                    snapshot.add_title(target_media_pos=2, text='Original')

                    # snapshot.add_media(media_position=3  , media_type='image', media_data=make_uint8(current_scene_original_images[t]))
                    # snapshot.add_title(target_media_pos=3, text='Original')

                    snapshot.add_media(media_position=4,
                                       media_type='image',
                                       media_data=make_uint8(blank_image))
                    snapshot.add_title(target_media_pos=4, text='Observed')

                    for i in range(10):
                        gqn.animator.Snapshot.add_graph_data(
                            graph_id='kl_div_graph_' + str(i),
                            data_id='kl_div_data_0',
                            new_data=sq_d_list[i],
                            frame_num=t,
                        )
                    print('snap')
                    snapshot_array.append(snapshot)

                    angle_rad += 2 * math.pi / total_frames_per_rotation

                # Generate images with observations
                for m in range(num_views_per_scene):
                    kl_div_sum = 0
                    observed_image = images[batch_index, m]
                    observed_viewpoint = viewpoints[batch_index, m]

                    observed_image_array[m] = to_gpu(observed_image)
                    observed_viewpoint_array[m] = to_gpu(observed_viewpoint)

                    r_list = []
                    for i, model in enumerate(models):
                        r_list.append(
                            model.compute_observation_representation(
                                observed_image_array[None, :m + 1],
                                observed_viewpoint_array[None, :m + 1]))

                        r_list[i] = cf.broadcast_to(r_list[i],
                                                    (num_generation, ) +
                                                    r_list[i].shape[1:])

                    angle_rad = 0
                    for t in range(total_frames_per_rotation):
                        grid_master = GridSpec(nrows=4,
                                               ncols=5,
                                               height_ratios=[1, 1, 1, 1])
                        snapshot = gqn.animator.Snapshot(
                            layout_settings={
                                'subplot_count':
                                14,
                                'grid_master':
                                grid_master,
                                'subplots': [{
                                    'subplot_id':
                                    i + 1,
                                    'subplot':
                                    GridSpecFromSubplotSpec(
                                        nrows=2,
                                        ncols=2,
                                        subplot_spec=grid_master[i * 2:i * 2 +
                                                                 2, 0:1])
                                } for i in range(2)] + [{
                                    'subplot_id':
                                    i + 3,
                                    'subplot':
                                    GridSpecFromSubplotSpec(
                                        nrows=1,
                                        ncols=1,
                                        subplot_spec=grid_master[i, 2])
                                } for i in range(2)] + [{
                                    'subplot_id':
                                    i + 5,
                                    'subplot':
                                    GridSpecFromSubplotSpec(
                                        nrows=1,
                                        ncols=1,
                                        subplot_spec=grid_master[i // 2,
                                                                 3 + i % 2])
                                } for i in range(4)] + [{
                                    'subplot_id':
                                    i + 9,
                                    'subplot':
                                    GridSpecFromSubplotSpec(
                                        nrows=1,
                                        ncols=1,
                                        subplot_spec=grid_master[2 + i // 3,
                                                                 2 + i % 3])
                                } for i in range(6)]
                            })

                        query_viewpoints = rotate_query_viewpoint(
                            angle_rad, num_generation, xp)
                        generated_images_list = []
                        for model, r in zip(models, r_list):
                            generated_images_list.append(
                                model.generate_image(query_viewpoints, r, xp))

                        sq_d_list = []
                        for i, generated_images in enumerate(
                                generated_images_list):
                            sq_d_list.append(
                                gqn.math.get_squared_distance(
                                    to_cpu(current_scene_original_images[t]),
                                    to_cpu(generated_images[0]))[0])

                        snapshot.add_media(media_position=1,
                                           media_type='image',
                                           media_data=make_uint8(
                                               generated_images[0]))
                        snapshot.add_title(target_media_pos=1,
                                           text='Generated')

                        snapshot.add_media(
                            media_position=2,
                            media_type='image',
                            media_data=make_uint8(
                                current_scene_original_images[t]))
                        snapshot.add_title(target_media_pos=2, text='Original')

                        # snapshot.add_media(media_position=3  , media_type='image', media_data=make_uint8(current_scene_original_images[t]))
                        # snapshot.add_title(target_media_pos=3, text='Original')

                        snapshot.add_media(media_position=4,
                                           media_type='image',
                                           media_data=make_uint8(
                                               observed_image_array[m]))
                        snapshot.add_title(target_media_pos=4, text='Observed')

                        # for i, kl_div in enumerate(kl_div_list):
                        for i in range(10):
                            gqn.animator.Snapshot.add_graph_data(
                                graph_id='kl_div_graph_' + str(i),
                                data_id='kl_div_data_' + str(m + 1),
                                new_data=sq_d_list[i],
                                frame_num=t,
                            )

                        angle_rad += 2 * math.pi / total_frames_per_rotation
                        # plt.pause(1e-8)

                        print('snap')
                        snapshot_array.append(snapshot)

                plt.subplots_adjust(left=None,
                                    bottom=None,
                                    right=None,
                                    top=None,
                                    wspace=0,
                                    hspace=0)

                anim = animation.FuncAnimation(
                    fig,
                    func_anim_upate,
                    fargs=(fig, [snapshot_array]),
                    interval=1 / 24,
                    frames=(num_views_per_scene + 1) *
                    total_frames_per_rotation)

                anim.save("{}/shepard_matzler_{}.mp4".format(
                    args.output_directory, file_number),
                          writer="ffmpeg",
                          fps=12)
                file_number += 1
示例#33
0
 def test_get_device_for_int(self):
     with testing.assert_warns(DeprecationWarning):
         device = cuda.get_device(0)
     assert device == cuda.Device(0)
def main():
    try:
        os.mkdir(args.snapshot_path)
    except:
        pass

    comm = chainermn.create_communicator()
    device = comm.intra_rank
    print("device", device, "/", comm.size)
    cuda.get_device(device).use()
    xp = cupy

    dataset = gqn.data.Dataset(args.dataset_path)

    hyperparams = HyperParameters()
    hyperparams.generator_share_core = args.generator_share_core
    hyperparams.generator_share_prior = args.generator_share_prior
    hyperparams.generator_generation_steps = args.generation_steps
    hyperparams.inference_share_core = args.inference_share_core
    hyperparams.inference_share_posterior = args.inference_share_posterior
    hyperparams.channels_chz = args.channels_chz
    hyperparams.generator_channels_u = args.channels_u
    hyperparams.inference_channels_map_x = args.channels_map_x
    hyperparams.pixel_n = args.pixel_n
    hyperparams.pixel_sigma_i = args.initial_pixel_sigma
    hyperparams.pixel_sigma_f = args.final_pixel_sigma
    if comm.rank == 0:
        hyperparams.save(args.snapshot_path)
        hyperparams.print()

    model = Model(hyperparams, snapshot_directory=args.snapshot_path)
    model.to_gpu()

    optimizer = Optimizer(
        model.parameters,
        communicator=comm,
        mu_i=args.initial_lr,
        mu_f=args.final_lr)
    if comm.rank == 0:
        optimizer.print()

    dataset_mean, dataset_std = dataset.load_mean_and_std()

    if comm.rank == 0:
        np.save(os.path.join(args.snapshot_path, "mean.npy"), dataset_mean)
        np.save(os.path.join(args.snapshot_path, "std.npy"), dataset_std)

    # avoid division by zero
    dataset_std += 1e-12

    sigma_t = hyperparams.pixel_sigma_i
    pixel_var = xp.full(
        (args.batch_size, 3) + hyperparams.image_size,
        sigma_t**2,
        dtype="float32")
    pixel_ln_var = xp.full(
        (args.batch_size, 3) + hyperparams.image_size,
        math.log(sigma_t**2),
        dtype="float32")

    random.seed(0)
    subset_indices = list(range(len(dataset.subset_filenames)))

    current_training_step = 0
    for iteration in range(args.training_iterations):
        mean_kld = 0
        mean_nll = 0
        total_batch = 0
        subset_size_per_gpu = len(subset_indices) // comm.size
        start_time = time.time()

        for subset_loop in range(subset_size_per_gpu):
            random.shuffle(subset_indices)
            subset_index = subset_indices[comm.rank]
            subset = dataset.read(subset_index)
            iterator = gqn.data.Iterator(subset, batch_size=args.batch_size)

            for batch_index, data_indices in enumerate(iterator):
                # shape: (batch, views, height, width, channels)
                # range: [-1, 1]
                images, viewpoints = subset[data_indices]

                # preprocessing
                images = (images - dataset_mean) / dataset_std

                # (batch, views, height, width, channels) ->  (batch, views, channels, height, width)
                images = images.transpose((0, 1, 4, 2, 3))

                total_views = images.shape[1]

                # sample number of views
                num_views = random.choice(range(total_views))
                query_index = random.choice(range(total_views))

                if current_training_step == 0 and num_views == 0:
                    num_views = 1  # avoid OpenMPI error

                if num_views > 0:
                    r = model.compute_observation_representation(
                        images[:, :num_views], viewpoints[:, :num_views])
                else:
                    r = xp.zeros(
                        (args.batch_size, hyperparams.channels_r) +
                        hyperparams.chrz_size,
                        dtype="float32")
                    r = chainer.Variable(r)

                query_images = images[:, query_index]
                query_viewpoints = viewpoints[:, query_index]
                # transfer to gpu
                query_images = to_gpu(query_images)
                query_viewpoints = to_gpu(query_viewpoints)

                h0_gen, c0_gen, u_0, h0_enc, c0_enc = model.generate_initial_state(
                    args.batch_size, xp)

                loss_kld = 0

                hl_enc = h0_enc
                cl_enc = c0_enc
                hl_gen = h0_gen
                cl_gen = c0_gen
                ul_enc = u_0

                xq = model.inference_downsampler.downsample(query_images)

                for l in range(model.generation_steps):
                    inference_core = model.get_inference_core(l)
                    inference_posterior = model.get_inference_posterior(l)
                    generation_core = model.get_generation_core(l)
                    generation_piror = model.get_generation_prior(l)

                    h_next_enc, c_next_enc = inference_core.forward_onestep(
                        hl_gen, hl_enc, cl_enc, xq, query_viewpoints, r)

                    mean_z_q = inference_posterior.compute_mean_z(hl_enc)
                    ln_var_z_q = inference_posterior.compute_ln_var_z(hl_enc)
                    ze_l = cf.gaussian(mean_z_q, ln_var_z_q)

                    mean_z_p = generation_piror.compute_mean_z(hl_gen)
                    ln_var_z_p = generation_piror.compute_ln_var_z(hl_gen)

                    h_next_gen, c_next_gen, u_next_enc = generation_core.forward_onestep(
                        hl_gen, cl_gen, ul_enc, ze_l, query_viewpoints, r)

                    kld = gqn.nn.chainer.functions.gaussian_kl_divergence(
                        mean_z_q, ln_var_z_q, mean_z_p, ln_var_z_p)

                    loss_kld += cf.sum(kld)

                    hl_gen = h_next_gen
                    cl_gen = c_next_gen
                    ul_enc = u_next_enc
                    hl_enc = h_next_enc
                    cl_enc = c_next_enc

                mean_x = model.generation_observation.compute_mean_x(ul_enc)
                negative_log_likelihood = gqn.nn.chainer.functions.gaussian_negative_log_likelihood(
                    query_images, mean_x, pixel_var, pixel_ln_var)
                loss_nll = cf.sum(negative_log_likelihood)

                loss_nll /= args.batch_size
                loss_kld /= args.batch_size
                loss = loss_nll + loss_kld

                model.cleargrads()
                loss.backward()
                optimizer.update(current_training_step)

                if comm.rank == 0:
                    printr(
                        "Iteration {}: Subset {} / {}: Batch {} / {} - loss: nll: {:.3f} kld: {:.3f} - lr: {:.4e} - sigma_t: {:.6f}".
                        format(iteration + 1, subset_loop * comm.size + 1,
                               len(dataset), batch_index + 1,
                               len(subset) // args.batch_size,
                               float(loss_nll.data), float(loss_kld.data),
                               optimizer.learning_rate, sigma_t))

                sf = hyperparams.pixel_sigma_f
                si = hyperparams.pixel_sigma_i
                sigma_t = max(
                    sf + (si - sf) *
                    (1.0 - current_training_step / hyperparams.pixel_n), sf)

                pixel_var[...] = sigma_t**2
                pixel_ln_var[...] = math.log(sigma_t**2)

                total_batch += 1
                current_training_step += comm.size
                # current_training_step += 1
                mean_kld += float(loss_kld.data)
                mean_nll += float(loss_nll.data)

            if comm.rank == 0:
                model.serialize(args.snapshot_path)

        if comm.rank == 0:
            elapsed_time = time.time() - start_time
            print(
                "\033[2KIteration {} - loss: nll: {:.3f} kld: {:.3f} - lr: {:.4e} - sigma_t: {:.6f} - step: {} - elapsed_time: {:.3f} min".
                format(iteration + 1, mean_nll / total_batch,
                       mean_kld / total_batch, optimizer.learning_rate,
                       sigma_t, current_training_step, elapsed_time / 60))
            model.serialize(args.snapshot_path)
示例#35
0
 def test_get_device_for_builtin_int(self):
     # builtins.int is from future package and it is different
     # from builtin int/long on Python 2.
     with testing.assert_warns(DeprecationWarning):
         device = cuda.get_device(builtins.int(0))
     assert device == cuda.Device(0)
示例#36
0
 def test_get_device_for_device(self):
     device = cuda.get_device_from_id(0)
     with testing.assert_warns(DeprecationWarning):
         assert cuda.get_device(device) is device