コード例 #1
0
def train(epochs, iterations, batchsize, validsize, outdir, modeldir,
          extension, train_size, valid_size, data_path, sketch_path, digi_path,
          learning_rate, beta1, weight_decay):

    # Dataset definition
    dataset = DatasetLoader(data_path, sketch_path, digi_path, extension,
                            train_size, valid_size)
    print(dataset)
    x_val, t_val = dataset.valid(validsize)

    # Model & Optimizer definition
    unet = UNet()
    unet.to_gpu()
    unet_opt = set_optimizer(unet, learning_rate, beta1, weight_decay)

    discriminator = Discriminator()
    discriminator.to_gpu()
    dis_opt = set_optimizer(discriminator, learning_rate, beta1, weight_decay)

    # Loss function definition
    lossfunc = Pix2pixLossCalculator()

    # Visualization definition
    visualizer = Visualizer()

    for epoch in range(epochs):
        sum_dis_loss = 0
        sum_gen_loss = 0
        for batch in range(0, iterations, batchsize):
            x, t = dataset.train(batchsize)

            # Discriminator update
            y = unet(x)
            y.unchain_backward()

            dis_loss = lossfunc.dis_loss(discriminator, y, t)

            discriminator.cleargrads()
            dis_loss.backward()
            dis_opt.update()

            sum_dis_loss += dis_loss.data

            # Generator update
            y = unet(x)

            gen_loss = lossfunc.gen_loss(discriminator, y)
            gen_loss += lossfunc.content_loss(y, t)

            unet.cleargrads()
            gen_loss.backward()
            unet_opt.update()

            sum_gen_loss += gen_loss.data

            if batch == 0:
                serializers.save_npz(f"{modeldir}/unet_{epoch}.model", unet)

                with chainer.using_config("train", False):
                    y = unet(x_val)

                x = x_val.data.get()
                t = t_val.data.get()
                y = y.data.get()

                visualizer(x, t, y, outdir, epoch, validsize)

        print(f"epoch: {epoch}")
        print(
            f"dis loss: {sum_dis_loss/iterations} gen loss: {sum_gen_loss/iterations}"
        )
コード例 #2
0
ファイル: train_all.py プロジェクト: Akolada/VideoProcessing
test_path="./test.png"
test=prepare_image(test_path)
test=chainer.as_variable(xp.array(test).astype(xp.float32))
test=F.tile(test,(framesize,1,1,1))

image_encoder=ImageEncoder()
image_encoder.to_gpu()
enc_opt=set_optimizer(image_encoder)

key_point_detector = KeyPointDetector()
key_point_detector.to_gpu()
key_opt = set_optimizer(key_point_detector)

making_optical_flow = UNet(in_ch=4)
making_optical_flow.to_gpu()
ref_opt=set_optimizer(making_optical_flow)

generator = Generator(in_ch=3)
generator.to_gpu()
gen_opt = set_optimizer(generator)

discriminator_temporal = Discriminator_temporal()
discriminator_temporal.to_gpu()
dis_temp_opt=set_optimizer(discriminator_temporal)

discriminator_image = Discriminator_image()
discriminator_image.to_gpu()
dis_img_opt = set_optimizer(discriminator_image)

for epoch in range(epochs):
コード例 #3
0
gen_opt_xy = set_optimizer(generator_xy)

generator_yx = Generator()
generator_yx.to_gpu()
gen_opt_yx = set_optimizer(generator_yx)

discriminator_xy = Discriminator()
discriminator_xy.to_gpu()
dis_opt_xy = set_optimizer(discriminator_xy)

discriminator_yx = Discriminator()
discriminator_yx.to_gpu()
dis_opt_yx = set_optimizer(discriminator_yx)

predictor_x = UNet()
predictor_x.to_gpu()
pre_opt_x = set_optimizer(predictor_x)

predictor_y = UNet()
predictor_y.to_gpu()
pre_opt_y = set_optimizer(predictor_y)

for epoch in range(epochs):
    sum_dis_loss = 0
    sum_gen_loss = 0
    for batch in range(0, iterations, batchsize):
        x_box = []
        y_box = []
        rnd1 = np.random.randint(x_len - batchsize)
        rnd2 = np.random.randint(y_len - batchsize)
        for index in range(batchsize):