示例#1
0
class UnpairedIterator(iterator.Iterator):
    """An iterator for unpaired dataset which wraps two SerialIterator.
    """
    def __init__(self, dataset1, dataset2, batch_size, repeat=True):
        if len(dataset2) < len(dataset1):
            self._main_iter = SerialIterator(dataset1,
                                             batch_size=batch_size,
                                             repeat=repeat,
                                             shuffle=True)
            self._sub_iter = SerialIterator(dataset2,
                                            batch_size=batch_size,
                                            repeat=True,
                                            shuffle=True)
            self._rev = False
        else:
            self._main_iter = SerialIterator(dataset2,
                                             batch_size=batch_size,
                                             repeat=repeat,
                                             shuffle=True)
            self._sub_iter = SerialIterator(dataset1,
                                            batch_size=batch_size,
                                            repeat=True,
                                            shuffle=True)
            self._rev = True

    def __next__(self):
        if self._rev:
            return [
                x for x in zip(self._sub_iter.next(), self._main_iter.next())
            ]
        else:
            return [
                x for x in zip(self._main_iter.next(), self._sub_iter.next())
            ]

    next = __next__

    @property
    def epoch(self):
        return self._main_iter.epoch

    @property
    def epoch_detail(self):
        return self._main_iter.epoch_detail

    @property
    def previous_epoch_detail(self):
        return self._main_iter.previous_epoch_detail

    def reset(self):
        self._main_iter.reset()
        self._sub_iter.reset()

    @property
    def repeat(self):
        return self._main_iter.repeat

    @property
    def is_new_epoch(self):
        return self._main_iter.is_new_epoch
示例#2
0
    def act_and_trains(self, imgobj, target_angle):
        x = [self.phi(s) for s in [imgobj]]
        t = np.array([target_angle], np.float32)
        self.data.append(x[0])
        self.target_angles.append(t[0])
        if len(self.data) > MAX_DATA:
            del self.data[0]
            del self.target_angles[0]
        dataset = TupleDataset(self.data, self.target_angles)
        train_iter = SerialIterator(dataset,
                                    batch_size=BATCH_SIZE,
                                    repeat=True,
                                    shuffle=True)
        train_batch = train_iter.next()
        x_train, t_train = chainer.dataset.concat_examples(train_batch, -1)

        y_train = self.net(x_train)
        loss_train = F.mean_squared_error(
            y_train, Variable(t_train.reshape(BATCH_SIZE, 1)))

        self.loss_list.append(loss_train.array)

        self.net.cleargrads()
        loss_train.backward()
        self.optimizer.update()

        self.count += 1

        self.results_train['loss'].append(loss_train.array)
        x_test = chainer.dataset.concat_examples(x, -1)
        with chainer.using_config('train', False), chainer.using_config(
                'enable_backprop', False):
            action_value = self.net(x_test)
        return action_value.data[0][0], loss_train.array
示例#3
0
def feed_data():
    # loads images and prepares dataset
    res_q.put("disp", "preparing training dataset")
    res_q.put("prog_set", TRAIN_PAIRS * 2)
    if RND_RESIZE:
        height, width = (IMG_HEIGHT * 4) // 3, (IMG_WIDTH * 4) // 3
    else:
        height, width = IMG_HEIGHT, IMG_WIDTH
    tr_nw = pool_map(load_img, TRAIN_PAIRS, True, 0, DIGIT_LEN, INPUT_DIR,
                     INPUT_EXT, "train", height, width)
    tr_lb = pool_map(load_img, TRAIN_PAIRS, True, 0, DIGIT_LEN, LABEL_DIR,
                     LABEL_EXT, "label", height, width)
    # prepares an iterator
    if COLOR_SHIFT:
        res_q.put("disp", "performing primary component analysis")
        res_q.put("prog_set", TRAIN_PAIRS)
        tr_pc = pool_map(perform_pca, TRAIN_PAIRS, False, tr_nw)
        tr_data = [(tr_nw[i], tr_lb[i], tr_pc[i]) for i in range(TRAIN_PAIRS)]
    else:
        tr_data = [(tr_nw[i], tr_lb[i]) for i in range(TRAIN_PAIRS)]
    serial_iter = SerialIterator(tr_data, BATCH_SIZE, shuffle=True)
    data_q.put("prepare")
    for b in range(RESTART_POS, MAX_BATCH):
        batch = serial_iter.next()
        batch = [generate_train_data(*x) for x in batch]
        tb = np.concatenate([x[0][np.newaxis, :] for x in batch], axis=0)
        sb = np.concatenate([x[1][np.newaxis, :] for x in batch], axis=0)
        data_q.put("batch", (b + 1, tb.copy(), sb.copy()))
        if (b + 1) % EVAL_ITVL == 0:
            data_q.put("eval", b + 1)
    # termination
    data_q.put("end")
示例#4
0
    def act_and_trains(self, imgobj, correct_action):
        x = [self.phi(s) for s in [imgobj]]
        t = np.array([correct_action], np.int32)
        dataset = TupleDataset(x, t)
        train_iter = SerialIterator(dataset,
                                    batch_size=BATCH_SIZE,
                                    repeat=True,
                                    shuffle=False)
        train_batch = train_iter.next()
        x_train, t_train = chainer.dataset.concat_examples(train_batch, -1)

        y_train = self.net(x_train)

        loss_train = F.softmax_cross_entropy(y_train, t_train)
        acc_train = F.accuracy(y_train, t_train)

        self.loss_list.append(loss_train.array)
        self.acc_list.append(acc_train.array)

        self.net.cleargrads()
        loss_train.backward()
        self.optimizer.update()

        self.count += 1

        self.results_train['loss'].append(loss_train.array)
        self.results_train['accuracy'].append(acc_train.array)

        action = np.argmax(y_train.array)
        self.accuracy = np.mean(self.acc_list)
        #			print('iteration: {}, acc (train): {:.4f}, action: {}'.format(self.count, self.accuracy, action))

        return action
示例#5
0
def train():
    batchsize = 128
    max_epoch = 10
    device = 0
    train_data, test_data = mnist.get_mnist(withlabel=True, ndim=1)
    train_iter = SerialIterator(train_data, batchsize)
    test_iter = SerialIterator(test_data, batchsize, repeat=False, shuffle=False)
    model = MyNetwork()
    if chainer.cuda.available and device >= 0:
        model.to_gpu(device)
    else:
        device = -1
    optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)
    optimizer.setup(model)

    while train_iter.epoch < max_epoch:
        train_batch = train_iter.next()
        image_train, target_train = concat_examples(train_batch, device)
        prediction_train = model(image_train)
        loss = F.softmax_cross_entropy(prediction_train, target_train)
        model.cleargrads()
        loss.backward()
        optimizer.update()
        if train_iter.is_new_epoch:
            loss_array = float(chainer.backends.cuda.to_cpu(loss.array))
            print("epoch{:2d} train_loss:{:.04f}".format(train_iter.epoch, loss_array))
            test_losses = []
            test_accs = []
            while True:
                test_batch = test_iter.next()
                image_test, target_test = concat_examples(test_batch, device)
                prediction_test = model(image_test)
                loss_test = F.softmax_cross_entropy(prediction_test, target_test)
                test_losses.append(chainer.backends.cuda.to_cpu(loss_test.array))
                acc = F.accuracy(prediction_test, target_test)
                test_accs.append(chainer.backends.cuda.to_cpu(acc.array))
                if test_iter.is_new_epoch:
                    test_iter.reset()
                    break
            mean_loss = np.mean(test_losses)
            mean_acc = np.mean(test_accs)
            print("val_loss:{:.04f} val_accuracy:{:.04f}".format(mean_loss, mean_acc))

    chainer.serializers.save_npz("model.npz", model)
示例#6
0
def feed_data():
    # loads images and prepares dataset
    res_q.put("disp", "loading training images")
    res_q.put("prog_set", TRAIN_NUM)
    tr_img = pool_map(load_img, TRAIN_NUM, True, DIGIT_LEN, TRAIN_DIR,
                      COLOR_SHIFT)
    # prepares an iterator
    serial_iter = SerialIterator(tr_img, BATCH_SIZE, shuffle = True)
    data_q.put("prepare")
    for b in range(RESTART_POS, MAX_BATCH):
        batch = serial_iter.next()
        batch = [generate_train_data(x) for x in batch]
        tb = np.concatenate([x[0] for x in batch], axis = 0)
        sb = np.concatenate([x[1] for x in batch], axis = 0)
        data_q.put("batch", (b + 1, tb.copy(), sb.copy()))
        if (b + 1) % EVAL_ITVL == 0 : 
            data_q.put("eval", b + 1)
    # termination
    data_q.put("end")
def run_train(net, optimizer, dataset, save_dir, gpu_id):
    n_batch = 64
    n_epoch = 50

    SAVE_MODEL_PER_ITER = 1000

    # GPUに転送
    if gpu_id is not None:
        net.to_gpu(gpu_id)
    # log
    results_train, results_valid = {}, {}
    results_train['loss'], results_train['accuracy'] = [], []
    results_valid['loss'], results_valid['accuracy'] = [], []

    # 入力データを分割
    train_val, test_data = split_dataset_random(dataset,
                                                int(len(dataset) * 0.8),
                                                seed=0)
    train, valid = split_dataset_random(train_val,
                                        int(len(train_val) * 0.8),
                                        seed=0)

    # iteration数出力
    print('# of epoch:', n_epoch)
    print('# of batch:', n_batch)
    print('# of train data:', len(train))
    print('# of valid data:', len(valid))
    print('# of iteration:', int(max(n_epoch,
                                     n_epoch * len(train) / n_batch)), '\n')

    # ぷよぷよAIを参考にbatch_sizeは64
    train_iter = SerialIterator(train,
                                batch_size=n_batch,
                                repeat=True,
                                shuffle=True)

    count = 0
    for epoch in range(n_epoch):
        while True:
            # ミニバッチの取得
            train_batch = train_iter.next()

            # [(x1,t1), (x2, t2), ...]形式から,[[x1,x2,x3,...], [t1,t2,t3,...]]形式へ
            x0_train, x1_train, t_train = concat_samples(train_batch, gpu_id)

            # 予測値と目的関数の計算
            y_train = net(x0_train, x1_train)
            loss_train = F.softmax_cross_entropy(y_train, t_train)
            acc_train = F.accuracy(y_train, t_train)

            # 勾配の初期化と勾配の計算
            net.cleargrads()
            loss_train.backward()
            # パラメータの更新
            optimizer.update()

            # iteration カウントアップ
            count += 1

            # SAVE_MODEL_PER_ITER iterationごとにモデルを保存
            if count % SAVE_MODEL_PER_ITER == 0:
                # 各epochのモデルの保存
                save_filename = os.path.join(save_dir,
                                             'net_{:03d}.npz'.format(count))
                save_model(net, gpu_id, save_filename)
                print('save model (iteration {}) to {}\n'.format(
                    count, save_filename))

            # 1エポック終えたら、valid データで評価する
            if train_iter.is_new_epoch or count % SAVE_MODEL_PER_ITER == 0:
                # 検証用データに対する結果の確認
                with chainer.using_config('train',
                                          False), chainer.using_config(
                                              'enable_backprop', False):
                    # x_valid, t_valid = chainer.dataset.concat_examples(valid, gpu_id)
                    x0_valid, x1_valid, t_valid = concat_samples(valid, gpu_id)
                    y_valid = net(x0_valid, x1_valid)
                    loss_valid = F.softmax_cross_entropy(y_valid, t_valid)
                    acc_valid = F.accuracy(y_valid, t_valid)
                # 注意:GPU で計算した結果はGPU上に存在するため、CPU上に転送します
                if gpu_id is not None:
                    loss_train.to_cpu()
                    loss_valid.to_cpu()
                    acc_train.to_cpu()
                    acc_valid.to_cpu()
                # 結果の表示
                print(
                    'epoch: {}, iteration: {}, loss (train): {:.4f}, loss (valid): {:.4f}\n'
                    'acc (train): {:.4f}, acc (valid): {:.4f}\n'.format(
                        epoch, count, loss_train.array.mean(),
                        loss_valid.array.mean(), acc_train.array.mean(),
                        acc_valid.array.mean()))
                if train_iter.is_new_epoch:
                    # 可視化用に保存
                    results_train['loss'].append(loss_train.array)
                    results_train['accuracy'].append(acc_train.array)
                    results_valid['loss'].append(loss_valid.array)
                    results_valid['accuracy'].append(acc_valid.array)
                    break

    # モデルの保存
    save_filename = os.path.join(save_dir, 'net_final.npz')
    save_model(net, gpu_id, save_filename)
    print('save model to {} at {}\n'.format(count, save_filename))

    # 損失 (loss)
    plt.plot(results_train['loss'], label='train')  # label で凡例の設定
    plt.plot(results_valid['loss'], label='valid')  # label で凡例の設定
    plt.legend()  # 凡例の表示
    plt.savefig(os.path.join(save_dir, 'loss.png'))
    plt.figure()
    # 精度 (accuracy)
    plt.plot(results_train['accuracy'], label='train')  # label で凡例の設定
    plt.plot(results_valid['accuracy'], label='valid')  # label で凡例の設定
    plt.legend()  # 凡例の表示
    plt.savefig(os.path.join(save_dir, 'accuracy.png'))
from chainer.datasets import TupleDataset
dataset = TupleDataset(x, t)
# print(dataset[0])
# print(dataset[:2])
## データセット分割
from chainer.datasets import split_dataset_random
train_val, test = split_dataset_random(dataset,
                                       int(len(dataset) * 0.7),
                                       seed=0)
train, valid = split_dataset_random(train_val,
                                    int(len(train_val) * 0.7),
                                    seed=0)
## SerialIterator
from chainer.iterators import SerialIterator
train_iter = SerialIterator(train, batch_size=4, repeat=True, shuffle=True)
minibatch = train_iter.next()
print(minibatch)

# ネットワーク決定
## Chain
import chainer
import chainer.links as L
import chainer.functions as F


class Net(chainer.Chain):
    def __init__(self, n_in=4, n_hidden=3, n_out=3):
        super().__init__()
        with self.init_scope():
            self.l1 = L.Linear(n_in, n_hidden)
            self.l2 = L.Linear(n_hidden, n_hidden)
示例#9
0
def train(batch_size, epoch_count, lamda, datasetA_folder_path,
          datasetB_folder_path, output_path):
    dataset_A = data_io.dataset_load(datasetA_folder_path)
    train_iter_A = SerialIterator(dataset_A,
                                  batch_size,
                                  repeat=True,
                                  shuffle=True)
    dataset_B = data_io.dataset_load(datasetB_folder_path)
    train_iter_B = SerialIterator(dataset_B,
                                  batch_size,
                                  repeat=True,
                                  shuffle=True)

    g_ab = Generator()
    g_ba = Generator()
    d_a = Discriminator()
    d_b = Discriminator()

    g_ab.to_gpu(0)
    g_ba.to_gpu(0)
    d_a.to_gpu(0)
    d_b.to_gpu(0)

    opt_g_ab = chainer.optimizers.Adam(alpha=0.0002, beta1=0.5)
    opt_g_ab.setup(g_ab)
    opt_g_ba = chainer.optimizers.Adam(alpha=0.0002, beta1=0.5)
    opt_g_ba.setup(g_ba)
    opt_d_a = chainer.optimizers.Adam(alpha=0.0002, beta1=0.5)
    opt_d_a.setup(d_a)
    opt_d_b = chainer.optimizers.Adam(alpha=0.0002, beta1=0.5)
    opt_d_b.setup(d_b)

    iteration = 0
    train_iter_A.reset()
    train_iter_B.reset()

    log_list = []
    image_path = output_path + "image/"
    disA_model_path = output_path + "dis_A/"
    disB_model_path = output_path + "dis_B/"
    genAB_model_path = output_path + "gen_AB/"
    genBA_model_path = output_path + "gen_BA/"
    os.mkdir(output_path)
    os.mkdir(image_path)
    os.mkdir(disA_model_path)
    os.mkdir(disB_model_path)
    os.mkdir(genAB_model_path)
    os.mkdir(genBA_model_path)

    for epoch in range(epoch_count):
        d_a_loss_list = []
        d_b_loss_list = []
        g_AB_loss_list = []
        g_BA_loss_list = []
        while True:
            mini_batch_images_A = train_iter_A.next()
            mini_batch_images_A = np.array(mini_batch_images_A)
            mini_batch_images_A = (mini_batch_images_A - 128.0) / 128.0
            real_a = Variable(np.array(mini_batch_images_A))
            real_a.to_gpu(0)

            mini_batch_images_B = train_iter_B.next()
            mini_batch_images_B = np.array(mini_batch_images_B)
            mini_batch_images_B = (mini_batch_images_B - 128.0) / 128.0
            real_b = Variable(np.array(mini_batch_images_B))
            real_b.to_gpu(0)

            fake_b = g_ab(real_a)
            fake_a = g_ba(real_b)

            reconstr_a = g_ba(fake_b)
            reconstr_b = g_ab(fake_a)

            d_a_real_result = d_a(real_a)
            d_a_fake_result = d_a(fake_a)
            loss_d_a = loss_dis(batch_size, d_a_real_result, d_a_fake_result)

            d_b_real_result = d_b(real_b)
            d_b_fake_result = d_b(fake_b)
            loss_d_b = loss_dis(batch_size, d_b_real_result, d_b_fake_result)

            d_a.cleargrads()
            loss_d_a.backward()
            opt_d_a.update()

            d_b.cleargrads()
            loss_d_b.backward()
            opt_d_b.update()
            """generatorのloss計算"""
            loss_g_ab = loss_gen(batch_size, d_b_fake_result, real_a,
                                 reconstr_a, lamda)
            loss_g_ba = loss_gen(batch_size, d_a_fake_result, real_b,
                                 reconstr_b, lamda)

            g_ab.cleargrads()
            loss_g_ab.backward()
            opt_g_ab.update()

            g_ba.cleargrads()
            loss_g_ba.backward()
            opt_g_ba.update()

            loss_d_a.to_cpu()
            loss_d_b.to_cpu()
            loss_g_ab.to_cpu()
            loss_g_ba.to_cpu()

            iteration += batch_size
            d_a_loss_list.append(loss_d_a.array)
            d_b_loss_list.append(loss_d_b.array)
            g_AB_loss_list.append(loss_g_ab.array)
            g_BA_loss_list.append(loss_g_ba.array)

            if train_iter_A.is_new_epoch or train_iter_B.is_new_epoch:
                break

        real_a.to_cpu()
        fake_b.to_cpu()
        reconstr_a.to_cpu()
        real_b.to_cpu()
        fake_a.to_cpu()
        reconstr_b.to_cpu()
        real_a_images = real_a.array.transpose(0, 2, 3, 1)
        fake_b_images = fake_b.array.transpose(0, 2, 3, 1)
        reconstr_a_images = reconstr_a.array.transpose(0, 2, 3, 1)
        real_b_images = real_b.array.transpose(0, 2, 3, 1)
        fake_a_images = fake_a.array.transpose(0, 2, 3, 1)
        reconstr_b_images = reconstr_b.array.transpose(0, 2, 3, 1)
        data_io.output_images(image_path + str(epoch), real_a_images,
                              fake_b_images, reconstr_a_images, real_b_images,
                              fake_a_images, reconstr_b_images)

        print("epoch: " + str(epoch) + ", interation: " + str(iteration) + \
            ", d_A_loss: " + str(np.mean(d_a_loss_list)) + ", d_B_loss: " + str(np.mean(d_b_loss_list)) + \
            ", g_AB_loss: " + str(np.mean(g_AB_loss_list)) + ", g_BA_loss: " + str(np.mean(g_BA_loss_list)))

        log_json = {"epoch": str(epoch), "interation": str(iteration), \
            "d_A_loss": str(np.mean(d_a_loss_list)), "d_B_loss": str(np.mean(d_b_loss_list)), \
            "g_AB_loss": str(np.mean(g_AB_loss_list)), "g_BA_loss": str(np.mean(g_BA_loss_list))}
        log_list.append(log_json)
        with open(output_path + 'log.json', 'w') as log_file:
            json.dump(log_list, log_file, indent=4)

        if (epoch % 100 == 0):
            g_ab.to_cpu()
            g_ba.to_cpu()
            d_a.to_cpu()
            d_b.to_cpu()
            save_npz(genAB_model_path + str(epoch) + '.npz', g_ab)
            save_npz(genBA_model_path + str(epoch) + '.npz', g_ba)
            save_npz(disA_model_path + str(epoch) + '.npz', d_a)
            save_npz(disB_model_path + str(epoch) + '.npz', d_b)
            g_ab.to_gpu(0)
            g_ba.to_gpu(0)
            d_a.to_gpu(0)
            d_b.to_gpu(0)

    g_ab.to_cpu()
    g_ba.to_cpu()
    d_a.to_cpu()
    d_b.to_cpu()
    save_npz(genAB_model_path + 'last.npz', g_ab)
    save_npz(genBA_model_path + 'last.npz', g_ba)
    save_npz(disA_model_path + 'last.npz', d_a)
    save_npz(disB_model_path + 'last.npz', d_b)
示例#10
0
def train(batch_size, epoch_count, dataset_folder_path, n_hidden, output_path):
    dataset = data_io.dataset_load(dataset_folder_path)
    train_iter = SerialIterator(dataset, batch_size, repeat=True, shuffle=True)

    gen = Generator(n_hidden=n_hidden)
    dis = Discriminator()

    gen.to_gpu(0)
    dis.to_gpu(0)

    opt_gen = chainer.optimizers.Adam(alpha=0.0002, beta1=0.5)
    opt_gen.setup(gen)
    opt_dis = chainer.optimizers.Adam(alpha=0.0002, beta1=0.5)
    opt_dis.setup(dis)

    iteration = 0
    train_iter.reset()

    log_list = []
    image_path = output_path + "image/"
    dis_model_path = output_path + "dis/"
    gen_model_path = output_path + "gen/"
    os.mkdir(output_path)
    os.mkdir(image_path)
    os.mkdir(dis_model_path)
    os.mkdir(gen_model_path)

    for epoch in range(epoch_count):
        d_loss_list = []
        g_loss_list = []
        while True:
            mini_batch_images = train_iter.next()
            mini_batch_images = np.array(mini_batch_images)
            mini_batch_images = (mini_batch_images - 128.0) / 128.0
            x_real = Variable(np.array(mini_batch_images))

            x_real.to_gpu(0)
            y_real = dis(x_real)

            noise = xp.random.uniform(-1,
                                      1, (batch_size, n_hidden),
                                      dtype=np.float32)
            z = Variable(noise)
            x_fake = gen(z, batch_size)
            y_fake = dis(x_fake)

            d_loss = loss_dis(batch_size, y_real, y_fake)
            g_loss = loss_gen(batch_size, y_fake)

            dis.cleargrads()
            d_loss.backward()
            opt_dis.update()

            gen.cleargrads()
            g_loss.backward()
            opt_gen.update()

            d_loss.to_cpu()
            g_loss.to_cpu()

            iteration += batch_size
            d_loss_list.append(d_loss.array)
            g_loss_list.append(g_loss.array)

            if train_iter.is_new_epoch:
                break

        x_fake.to_cpu()
        generated_images = x_fake.array
        generated_images = generated_images.transpose(0, 2, 3, 1)
        Image.fromarray(
            np.clip(generated_images[0] * 255, 0.0, 255.0).astype(
                np.uint8)).save(image_path + str(epoch) + ".png")

        print("epoch: " + str(epoch) + ", interation: " + str(iteration) +
              ", d_loss: " + str(np.mean(d_loss_list)) + ", g_loss: " +
              str(np.mean(g_loss_list)))

        log_json = {
            "epoch": str(epoch),
            "iteration": str(iteration),
            "d_loss": str(np.mean(d_loss_list)),
            "g_loss": str(np.mean(g_loss_list))
        }
        log_list.append(log_json)
        with open(output_path + 'log.json', 'w') as log_file:
            json.dump(log_list, log_file, indent=4)

        if (epoch % 100 == 0):
            dis.to_cpu()
            save_npz(dis_model_path + str(epoch) + '.npz', dis)
            gen.to_cpu()
            save_npz(gen_model_path + str(epoch) + '.npz', gen)
            gen.to_gpu(0)
            dis.to_gpu(0)

    logGraph.save_log_graph(output_path + 'log.json',
                            output_path + "lossGraph.png")
    dis.to_cpu()
    save_npz(dis_model_path + 'last.npz', dis)
    gen.to_cpu()
    save_npz(gen_model_path + 'last.npz', gen)
示例#11
0
def main():
	parser = argparse.ArgumentParser(
	formatter_class=argparse.ArgumentDefaultsHelpFormatter)
	parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
	parser.add_argument('--pretrained', type=str, help='path to model that has trained classifier but has not been trained through GAIN routine')
	parser.add_argument('--trained', type=str, help='path to model trained through GAIN')
	parser.add_argument('--device', type=int, default=-1, help='gpu id')
	parser.add_argument('--shuffle', type=bool, default=False, help='whether to shuffle dataset')
	parser.add_argument('--whole', type=bool, default=False, help='whether to test for the whole validation dataset')
	parser.add_argument('--no', type=int, default=20, help='if not whole, then no of images to visualize')
	parser.add_argument('--name', type=str, default='viz1', help='name of the subfolder or experiment under which to save')

	args = parser.parse_args()

	pretrained_file = args.pretrained
	trained_file = args.trained
	device = args.device
	shuffle = args.shuffle
	whole = args.whole
	name = args.name
	N = args.no

	dataset = MyTrainingDataset(split='train')
	iterator = SerialIterator(dataset, 1, shuffle=shuffle, repeat=False)
	converter = chainer.dataset.concat_examples
	os.makedirs('viz/'+name, exist_ok=True)
	# no_of_classes = 20
	no_of_classes = 21
	#FCN8s()
	pretrained = FCN8s_hand()
	trained = FCN8s_hand()
	load_npz(pretrained_file, pretrained)
	load_npz(trained_file, trained)
	
	if device >=0:
		pretrained.to_gpu()
		trained.to_gpu()
	i = 0
	
	while not iterator.is_new_epoch:
		
		if not whole and i >= N:
			break

		# image, labels = converter(iterator.next()
		image, labels, metadata = converter(iterator.next())
		image = Variable(image)
		if device >=0:
			image.to_gpu()

		xp = get_array_module(image.data)
		to_substract = np.array((-1, 0))
		noise_classes = np.unique(labels[0]).astype(np.int32)
		target = xp.asarray([[0]*(no_of_classes)])
		gt_labels = np.setdiff1d(noise_classes, to_substract) - 1

		# gcam1, cl_scores1, class_id1 = pretrained.stream_cl(image, gt_labels)
		# gcam2, cl_scores2, class_id2 = trained.stream_cl(image, gt_labels)
		# cl_output = pretrained.classify(image, is_training=False)
		# print(cp.asnumpy(trained.classify(image, is_training=False).data))
		lbl1 = pretrained.predict(image)
		lbl1 = cp.asnumpy(lbl1[0].data)
		# lbl1[lbl1 != 21] = 0
		print(np.unique(lbl1))
		# print("Non zero mask pixels {}".format(np.max(cp.asnumpy(lbl1[0].data))))

		if device>-0:
			class_id = cp.asnumpy(class_id)
		fig1 = plt.figure(figsize=(20,10))
		ax1= plt.subplot2grid((3, 9), (0, 0), colspan=3, rowspan=3)
		ax1.axis('off')
		ax1.imshow(cp.asnumpy(F.transpose(F.squeeze(image, 0), (1, 2, 0)).data) / 255.)

		ax2= plt.subplot2grid((3, 9), (0, 3), colspan=3, rowspan=3)
		ax2.axis('off')
		ax2.imshow(cp.asnumpy(F.transpose(F.squeeze(image, 0), (1, 2, 0)).data) / 255.)
		# ax2.imshow(cp.asnumpy(F.squeeze(gcam1[0], 0).data), cmap='jet', alpha=.5)
		# print("Mask dims {}".format(cp.asnumpy(lbl1[0].data).shape))
		# print("Non zero mask pixels {}".format(np.max(cp.asnumpy(lbl1[0].data))))
		ax2.imshow(lbl1, cmap='jet')
		# ax2.set_title("For class - "+str(voc_semantic_segmentation_label_names[cp.asnumpy(class_id1[0])+1]), color='teal')
		del lbl1
		lbl2 = trained.predict(image)
		lbl2 = cp.asnumpy(lbl2[0].data)
		# lbl2[lbl2 != 21] = 0
		print(np.unique(lbl2))
		ax3= plt.subplot2grid((3, 9), (0, 6), colspan=3, rowspan=3)
		ax3.axis('off')
		ax3.imshow(cp.asnumpy(F.transpose(F.squeeze(image, 0), (1, 2, 0)).data) / 255.)
		# ax3.imshow(cp.asnumpy(F.squeeze(gcam2[0], 0).data), cmap='jet', alpha=.5)
		# print("Mask dims {}".format(cp.asnumpy(lbl2[0].data).shape))
		# print("Non zero mask pixels {}".format(np.max(cp.asnumpy(lbl2[0].data))))
		ax3.imshow(lbl2, cmap='jet')
		# ax3.set_title("For class - "+str(voc_semantic_segmentation_label_names[cp.asnumpy(class_id2[0])+1]), color='teal')
		del lbl2
		fig1.savefig('viz/'+name+'/'+str(i)+'.png')
		plt.close()
		print(i)
		i += 1
示例#12
0
                                   repeat=False,
                                   shuffle=False)

    test_log_name = "test_epoch_" + str(config["test_model_epoch"]) + ".txt"
    test_log_path = os.path.join(config["result_dir"], config["test_dir"],
                                 test_log_name)
    unit_list = []
    est_lv_list = []

    # 推論
    with open(test_log_path, "w") as log_f:
        log_f.write("\t".join(["score_name", "lv", "likelihoods"]) + "\n")
        remaining = len(test_dataset)
        while remaining > 0:
            batch_size = min(test_iterator.batch_size, remaining)
            scores, names = concat_batch(test_iterator.next())
            est = model(scores[:batch_size], True)
            unit_list.extend(model._h.data.get())

            est_softmax = F.softmax(est)
            est_argmax = F.argmax(est_softmax, axis=1)
            est_lv_list.extend(est_argmax.data.get())

            # 尤度をログに出力
            for element, lv, name in zip(est_softmax, est_argmax, names):
                log = [os.path.splitext(name)[0], str(int(lv.data) + 1)]
                log.extend(
                    ["{:.3f}".format(float(value.data)) for value in element])
                log_f.write("\t".join(log) + "\n")

            remaining -= batch_size
示例#13
0
# ログ
results_train, results_valid = {}, {}
results_train['loss'], results_train['accuracy'] = [], []
results_valid['loss'], results_valid['accuracy'] = [], []

train_iter.reset()  # 上で一度 next() が呼ばれているため

count = 1

for epoch in range(n_epoch):

    while True:

        # ミニバッチの取得
        train_batch = train_iter.next()

        # x と t に分割
        # データを GPU に転送するために、concat_examples に gpu_id を渡す
        x_train, t_train = chainer.dataset.concat_examples(train_batch)

        # 予測値と目的関数の計算
        y_train = net(x_train)
        loss_train = F.softmax_cross_entropy(y_train, t_train)
        acc_train = F.accuracy(y_train, t_train)

        # 勾配の初期化と勾配の計算
        net.cleargrads()
        loss_train.backward()

        # パラメータの更新
示例#14
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        '--pretrained',
        type=str,
        help=
        'path to model that has trained classifier but has not been trained through GAIN routine',
        default='classifier_padding_1_model_594832')
    parser.add_argument(
        '--trained',
        type=str,
        help='path to model trained through GAIN',
        default='result/MYGAIN_5_to_1_padding_1_all_update_model_20000')
    parser.add_argument('--device', type=int, default=0, help='gpu id')
    parser.add_argument('--shuffle',
                        type=bool,
                        default=False,
                        help='whether to shuffle dataset')
    parser.add_argument(
        '--whole',
        type=bool,
        default=False,
        help='whether to test for the whole validation dataset')
    parser.add_argument('--no',
                        type=int,
                        default=5,
                        help='if not whole, then no of images to visualize')
    parser.add_argument(
        '--name',
        type=str,
        default='viz1',
        help='name of the subfolder or experiment under which to save')

    args = parser.parse_args()

    pretrained_file = args.pretrained
    trained_file = args.trained
    device = args.device
    shuffle = args.shuffle
    whole = args.whole
    name = args.name
    N = args.no

    dataset = MyTrainingDataset(split='val')
    iterator = SerialIterator(dataset, 1, shuffle=shuffle, repeat=False)
    converter = chainer.dataset.concat_examples
    os.makedirs('viz/' + name, exist_ok=True)
    no_of_classes = 21
    device = 0
    pretrained = FCN8s_hand()
    trained = FCN8s_hand()
    load_npz(pretrained_file, pretrained)
    load_npz(trained_file, trained)

    if device >= 0:
        pretrained.to_gpu()
        trained.to_gpu()
    i = 0

    true_positive = [0 for j in range(21)]
    true_negative = [0 for j in range(21)]
    false_positive = [0 for j in range(21)]
    false_negative = [0 for j in range(21)]

    while not iterator.is_new_epoch:

        if not whole and i >= N:
            break

        image, labels, metadata = converter(iterator.next())
        np_input_img = image
        np_input_img = np.uint8(np_input_img[0])
        np_input_img = np.transpose(np_input_img, (1, 2, 0))
        image = Variable(image)
        if device >= 0:
            image.to_gpu()

        xp = get_array_module(image.data)
        to_substract = np.array((-1, 0))
        noise_classes = np.unique(labels[0]).astype(np.int32)
        target = xp.asarray([[0] * (no_of_classes)])
        gt_labels = np.setdiff1d(noise_classes, to_substract) - 1
        target[0][gt_labels] = 1

        gcam1, cl_scores1, class_id1 = pretrained.stream_cl(image)
        gcam2, cl_scores2, class_id2 = trained.stream_cl(image)
        # gcams1, cl_scores1, class_ids1 = pretrained.stream_cl_multi(image)
        # gcams2, cl_scores2, class_ids2 = trained.stream_cl_multi(image)

        target = cp.asnumpy(target)
        cl_scores2 = cp.asnumpy(cl_scores2.data)
        # print(target)
        # print(cl_scores2)
        # print()
        # score_sigmoid = F.sigmoid(cl_scores2)
        for j in range(0, len(target[0])):
            # print(target[0][j] == 1)
            if target[0][j] == 1:
                if cl_scores2[0][j] >= 0:
                    true_positive[j] += 1
                else:
                    false_negative[j] += 1
            else:
                if cl_scores2[0][j] <= 0:
                    true_negative[j] += 1
                else:
                    false_positive[j] += 1
        # bboxes = gcams_to_bboxes(gcams2, class_ids2, input_image=np_input_img)
        # cv2.imshow('input', np_input_img)
        # cv2.waitKey(0)
        if device > -0:
            class_id = cp.asnumpy(class_id)
        # fig1 = plt.figure(figsize=(20, 10))
        # ax1 = plt.subplot2grid((3, 9), (0, 0), colspan=3, rowspan=3)
        # ax1.axis('off')
        # ax1.imshow(cp.asnumpy(F.transpose(F.squeeze(image, 0), (1, 2, 0)).data) / 255.)
        #
        # ax2 = plt.subplot2grid((3, 9), (0, 3), colspan=3, rowspan=3)
        # ax2.axis('off')
        # ax2.imshow(cp.asnumpy(F.transpose(F.squeeze(image, 0), (1, 2, 0)).data) / 255.)
        # ax2.imshow(cp.asnumpy(F.squeeze(gcam1[0], 0).data), cmap='jet', alpha=.5)
        # ax2.set_title("Before GAIN for class - " + str(dataset.class_names[cp.asnumpy(class_id1)+1]),
        #               color='teal')
        #
        # ax3 = plt.subplot2grid((3, 9), (0, 6), colspan=3, rowspan=3)
        # ax3.axis('off')
        # ax3.imshow(cp.asnumpy(F.transpose(F.squeeze(image, 0), (1, 2, 0)).data) / 255.)
        # ax3.imshow(cp.asnumpy(F.squeeze(gcam2[0], 0).data), cmap='jet', alpha=.5)
        # ax3.set_title("After GAIN for class - " + str(dataset.class_names[cp.asnumpy(class_id2)+1]),
        #               color='teal')
        # fig1.savefig('viz/' + name + '/' + str(i) + '.png')
        # plt.close()
        print(i)
        i += 1
    print("true postive {}".format(true_positive))
    print("true negative {}".format(true_negative))
    print("false positive {}".format(false_positive))
    print("false negative {}".format(false_negative))
示例#15
0
    def calc_s_test(self, z_train, z_test, r=10, t=5000,
                    epsilon=1e-5,
                    lossfun=None,
                    converter=concat_examples):
        """
        
        Args:
            z_train: train dataset, basically it can be whole train dataset.
            z_test: test dataset, it should be one z_minibatch size.
            t (int): batch size used for one iteration when calculating grad of 
                     train dataset
            r (int): repeat time to update HinvV
            lossfun: loss function

        Returns:

        """
        if lossfun is None:
            # use self.target.__call__ as loss function if not set.
            lossfun = self.target

        states = self._infl_states

        self._calc_and_register_grad(z_test, 'V', lossfun, converter)

        # init HinvV
        for name, param in self.target.namedparams():
            with cuda.get_device_from_array(param.data):
                state = states[name]
                state['HinvV'] = state['V'].copy()

        # Train
        train_iter = SerialIterator(z_train, t)

        # Loop to calculate accurate HinvV
        for _ in range(r):
            train_batch = train_iter.next()

            # 1. Calc grad of original param
            self._calc_and_register_grad(train_batch, 'grad_original', lossfun,
                                         converter)

            # 2. Pertuabation of params and calc grad of perturbed param
            for name, param in self.target.namedparams():
                #param = param + epsilon * states[name][self.STATE_HINV_V]
                param.data = param.data + epsilon * states[name][self.STATE_HINV_V]
            self._calc_and_register_grad(train_batch, self.STATE_GRAD_PERTURBED, lossfun,
                                         converter)

            # 3. Revert params
            for name, param in self.target.namedparams():
                # param = states[name][self.STATE_PARAM_ORIGINAL]
                param.data = states[name][self.STATE_PARAM_ORIGINAL]
            #serializers.load_npz(self.target_filepath, self.target)

            # 4. Update HinvV
            # HinvV <- V + HinvV - (H dot HinvV)
            for name, param in self.target.namedparams():
                with cuda.get_device_from_array(param.data):
                    state = states[name]
                    state[self.STATE_HINV_V] = state[self.STATE_V] + state[self.STATE_HINV_V] - (state[self.STATE_GRAD_PERTURBED] - state[self.STATE_GRAD_ORIGINAL]) / epsilon

        # Here, all the repetition process end!
        # state['HinvV'] can be used as s_test.
        self._clear_infl_states_for_calc_s_test()
示例#16
0
	pretrained = FCN8s()
	trainer = FCN8s()
	load_npz(pretrained_file, pretrained)
	load_npz(trained_file, trained)
	
	if device >=0:
		pretrained.to_gpu()
		trained.to_gpu()
	i = 0
	
	while not iterator.is_new_epoch:
		
		if not whole and i >= N:
			break

		image, labels = converter(iterator.next())
		image = Variable(image)
		if device >=0:
			image.to_gpu()

		xp = get_array_module(image.data)
		to_substract = np.array((-1, 0))
		noise_classes = np.unique(labels[0]).astype(np.int32)
		target = xp.asarray([[0]*(no_of_classes)])
		gt_labels = np.setdiff1d(noise_classes, to_substract) - 1

		gcam1, cl_scores1, class_id1 = pretrained.stream_cl(image, gt_labels)
		gcam2, cl_scores2, class_id2 = trained.stream_cl(image, gt_labels)

		if device>-0:
			class_id = cp.asnumpy(class_id)
示例#17
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--pretrained', type=str,
                        help='path to model that has trained classifier but has not been trained through GAIN routine',
                        default='classifier_padding_1_model_594832')
    parser.add_argument('--trained', type=str, help='path to model trained through GAIN',
                        default='result/MYGAIN_5_to_1_padding_1_all_update_model_20000')
    parser.add_argument('--device', type=int, default=0, help='gpu id')
    parser.add_argument('--shuffle', type=bool, default=False, help='whether to shuffle dataset')
    parser.add_argument('--whole', type=bool, default=False, help='whether to test for the whole validation dataset')
    parser.add_argument('--no', type=int, default=50, help='if not whole, then no of images to visualize')
    parser.add_argument('--name', type=str, default='viz1', help='name of the subfolder or experiment under which to save')

    args = parser.parse_args()

    # pretrained_file = args.pretrained
    trained_file = args.trained
    device = args.device
    shuffle = args.shuffle
    whole = args.whole
    name = args.name
    N = args.no

    dataset = MyTrainingDataset(split='val')
    iterator = SerialIterator(dataset, 1, shuffle=shuffle, repeat=False)
    converter = chainer.dataset.concat_examples
    os.makedirs('viz/' + name, exist_ok=True)
    no_of_classes = 20
    device = 0
    pretrained = FCN8s_hand()
    trained = FCN8s_hand()
    # load_npz(pretrained_file, pretrained)
    load_npz(trained_file, trained)

    if device >= 0:
        pretrained.to_gpu()
        trained.to_gpu()
    i = 0

    while not iterator.is_new_epoch:

        if not whole and i >= N:
            break

        image, labels, metadata = converter(iterator.next())
        np_input_img = image
        np_input_img = np.uint8(np_input_img[0])
        np_input_img = np.transpose(np_input_img, (1,2,0))
        image = Variable(image)
        if device >= 0:
            image.to_gpu()

        xp = get_array_module(image.data)
        to_substract = np.array((-1, 0))
        noise_classes = np.unique(labels[0]).astype(np.int32)
        target = xp.asarray([[0] * (no_of_classes)])
        gt_labels = np.setdiff1d(noise_classes, to_substract) - 1

        # gcam1, cl_scores1, class_id1 = pretrained.stream_cl(image)
        # gcam2, cl_scores2, class_id2 = trained.stream_cl(image)
        # gcams1, cl_scores1, class_ids1 = pretrained.stream_cl_multi(image)
        gcams2, cl_scores2, class_ids2 = trained.stream_cl_multi(image)

        print(np_input_img.shape)
        bboxes_per_class, pointed_bbox = gcams_to_bboxes(gcams2, class_ids2, input_image=np_input_img)

        # for bboxes in bboxes_per_class:
        #     for bbox in bboxes:
        #         cv2.rectangle(np_input_img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), [255,255,255], 2)
        display_img = cv2.cvtColor(np_input_img.copy(), cv2.COLOR_RGB2BGR)
        # if there's a hand and a pointed obj, draw rects
        if len(class_ids2) >= 2 and class_ids2[-1] == 20:
            cv2.rectangle(display_img, (int(pointed_bbox[0]), int(pointed_bbox[1])), (int(pointed_bbox[2]), int(pointed_bbox[3])), [255, 255, 255], 2)
            # redraw hand bounding box with different color
            for bbox in bboxes_per_class[-1]:
                cv2.rectangle(display_img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), [0,255,0], 2)
        cv2.imshow('input img', display_img)
        cv2.waitKey(0)

        if device > -0:
            class_id = cp.asnumpy(class_id)
        # fig1 = plt.figure(figsize=(20, 10))
        # ax1 = plt.subplot2grid((3, 9), (0, 0), colspan=3, rowspan=3)
        # ax1.axis('off')
        # ax1.imshow(cp.asnumpy(F.transpose(F.squeeze(image, 0), (1, 2, 0)).data) / 255.)
        #
        # ax2 = plt.subplot2grid((3, 9), (0, 3), colspan=3, rowspan=3)
        # ax2.axis('off')
        # ax2.imshow(cp.asnumpy(F.transpose(F.squeeze(image, 0), (1, 2, 0)).data) / 255.)
        # ax2.imshow(cp.asnumpy(F.squeeze(gcam1[0], 0).data), cmap='jet', alpha=.5)
        # ax2.set_title("Before GAIN for class - " + str(dataset.class_names[cp.asnumpy(class_id1)+1]),
        #               color='teal')
        #
        # ax3 = plt.subplot2grid((3, 9), (0, 6), colspan=3, rowspan=3)
        # ax3.axis('off')
        # ax3.imshow(cp.asnumpy(F.transpose(F.squeeze(image, 0), (1, 2, 0)).data) / 255.)
        # ax3.imshow(cp.asnumpy(F.squeeze(gcam2[0], 0).data), cmap='jet', alpha=.5)
        # ax3.set_title("After GAIN for class - " + str(dataset.class_names[cp.asnumpy(class_id2)+1]),
        #               color='teal')
        # fig1.savefig('viz/' + name + '/' + str(i) + '.png')
        # plt.close()
        print(i)
        i += 1