Ejemplo n.º 1
0
def run():
    # settings
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--filename_obj', type=str)
    parser.add_argument('-o', '--filename_output', type=str)
    parser.add_argument('-d', '--output_directory', type=str)
    parser.add_argument('-al', '--adam_lr', type=float, default=0.01)
    parser.add_argument('-ab1', '--adam_beta1', type=float, default=0.9)
    parser.add_argument('-bs', '--batch_size', type=int, default=4)
    parser.add_argument('-ni', '--num_iteration', type=int, default=1000)
    parser.add_argument('-cd', '--camera_distance', type=float, default=2.5)
    parser.add_argument('-ib', '--init_bias', type=str, default='(0,0,0)')
    parser.add_argument('-g', '--gpu', type=int, default=0)
    args = parser.parse_args()
    args.init_bias = tuple([float(v) for v in args.init_bias[1:-1].split(',')])

    # create output directory
    if not os.path.exists(args.output_directory):
        os.makedirs(args.output_directory)

    # setup chainer
    chainer.cuda.get_device_from_id(args.gpu).use()
    cp.random.seed(0)
    np.random.seed(0)

    # setup scene & optimizer
    model = deep_dream_3d.DeepDreamModel(
        args.filename_obj,
        camera_distance=args.camera_distance,
        init_bias=args.init_bias)
    model.to_gpu()
    optimizer = neural_renderer.Adam(alpha=args.adam_lr, beta1=args.adam_beta1)
    optimizer.setup(model)

    # optimization
    loop = tqdm.tqdm(range(args.num_iteration))
    for _ in loop:
        optimizer.target.cleargrads()
        loss = model(args.batch_size)
        loss.backward()
        optimizer.update()
        loop.set_description('Optimizing. Loss %.4f' % loss.data)

    # draw object
    model.renderer.background_color = (1, 1, 1)
    loop = tqdm.tqdm(range(0, 360, 4))
    for num, azimuth in enumerate(loop):
        loop.set_description('Drawing')
        model.renderer.eye = neural_renderer.get_points_from_angles(2.732, 30, azimuth)
        images = model.renderer.render(*model.mesh.get_batch(1))
        image = images.data.get()[0].transpose((1, 2, 0))
        scipy.misc.toimage(image, cmin=0, cmax=1).save('%s/_tmp_%04d.png' % (args.output_directory, num))
    make_gif(args.output_directory, args.filename_output)
Ejemplo n.º 2
0
def run():
    # load settings
    parser = argparse.ArgumentParser()
    parser.add_argument('-im', '--filename_mesh', type=str)
    parser.add_argument('-is', '--filename_style', type=str)
    parser.add_argument('-o', '--filename_output', type=str)
    parser.add_argument('-ls', '--lambda_style', type=float, default=1.)
    parser.add_argument('-lc', '--lambda_content', type=float, default=2e9)
    parser.add_argument('-ltv', '--lambda_tv', type=float, default=1e7)
    parser.add_argument('-emax', '--elevation_max', type=float, default=40.)
    parser.add_argument('-emin', '--elevation_min', type=float, default=20.)
    parser.add_argument('-lrv', '--lr_vertices', type=float, default=0.01)
    parser.add_argument('-lrt', '--lr_textures', type=float, default=1.0)
    parser.add_argument('-cd', '--camera_distance', type=float, default=2.732)
    parser.add_argument('-cdn',
                        '--camera_distance_noise',
                        type=float,
                        default=0.1)
    parser.add_argument('-ts', '--texture_size', type=int, default=4)
    parser.add_argument('-lr', '--adam_lr', type=float, default=0.05)
    parser.add_argument('-ab1', '--adam_beta1', type=float, default=0.9)
    parser.add_argument('-ab2', '--adam_beta2', type=float, default=0.999)
    parser.add_argument('-bs', '--batch_size', type=int, default=4)
    parser.add_argument('-im_s', '--image_size', type=int, default=400)
    parser.add_argument('-ni', '--num_iteration', type=int, default=1000)
    parser.add_argument('-g', '--gpu', type=int, default=0)
    args = parser.parse_args()

    # create output directory
    directory_output = os.path.dirname(args.filename_output)
    if not os.path.exists(directory_output):
        os.makedirs(directory_output)

    # setup chainer
    #os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"   # see issue #152
    #os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)
    chainer.cuda.get_device_from_id(args.gpu).use()
    cp.random.seed(0)
    np.random.seed(0)

    # setup scene
    model = style_transfer_3d.StyleTransferModel(
        filename_mesh=args.filename_mesh,
        filename_style=args.filename_style,
        lambda_style=args.lambda_style,
        lambda_content=args.lambda_content,
        lambda_tv=args.lambda_tv,
        elevation_max=args.elevation_max,
        elevation_min=args.elevation_min,
        lr_vertices=args.lr_vertices,
        lr_textures=args.lr_textures,
        camera_distance=args.camera_distance,
        camera_distance_noise=args.camera_distance_noise,
        texture_size=args.texture_size,
        image_size=args.image_size)
    model.to_gpu()
    optimizer = neural_renderer.Adam(alpha=args.adam_lr, beta1=args.adam_beta1)
    optimizer.setup(model)

    # optimization
    #import pdb
    #pdb.set_trace()
    loop = tqdm.tqdm(range(args.num_iteration))
    for _ in loop:
        optimizer.target.cleargrads()
        loss = model(args.batch_size)
        loss.backward()
        optimizer.update()
        loop.set_description('Optimizing. Loss %.4f' % loss.data)

    # save obj
    ##pdb.set_trace()
    #model.textures_1 = chainer.functions.concat((model.mesh.textures, model.mesh.textures.transpose((0, 1, 4, 3, 2, 5))), axis=1)
    ##model.textures_1 = chainer.functions.concat((model.mesh.textures, model.mesh.textures), axis=1)
    #obj_fn = args.filename_output.split('/')[-1].split('.')[0]
    #output_directory = os.path.split(args.filename_output)[0]#'/'.join(args.filename_output.split('/')[-3:-1])
    ##neural_renderer.save_obj('%s/%s.obj'% (output_directory,obj_fn), model.mesh.vertices, model.mesh.faces, chainer.functions.tanh(model.textures_1).array)
    #neural_renderer.save_obj('%s/%s.obj'% (output_directory,obj_fn), model.mesh.vertices[0], model.mesh.faces[0], chainer.functions.tanh(model.textures_1[0]).array)
    #
    vertices, faces, textures = model.mesh.get_batch(args.batch_size)
    ## fill back
    textures_1 = chainer.functions.concat(
        (textures, textures.transpose((0, 1, 4, 3, 2, 5))), axis=1)
    faces_1 = chainer.functions.concat((faces, faces[:, :, ::-1]), axis=1).data
    #
    obj_fn = args.filename_output.split('/')[-1].split('.')[0]
    output_directory = os.path.split(args.filename_output)[
        0]  #'/'.join(args.filename_output.split('/')[-3:-1])
    #neural_renderer.save_obj('%s/%s.obj'% (output_directory,obj_fn), model.mesh.vertices.array, model.mesh.faces, model.mesh.textures.array)
    neural_renderer.save_obj('%s/%s.obj' % (output_directory, obj_fn),
                             vertices[0], faces[0], textures[0].array)
    #neural_renderer.save_obj('%s/%s.obj'% (output_directory,obj_fn), model.mesh.vertices[0], model.mesh.faces[0], chainer.functions.tanh(model.textures_1[0]).array)

    # draw object
    model.renderer.background_color = (1, 1, 1)
    #model.renderer.background_color = (0, 0, 0)
    loop = tqdm.tqdm(range(0, 360, 4))
    for num, azimuth in enumerate(loop):
        loop.set_description('Drawing')
        model.renderer.eye = neural_renderer.get_points_from_angles(
            2.732, 30, azimuth)
        images = model.renderer.render(*model.mesh.get_batch(1))
        image = images.data.get()[0].transpose((1, 2, 0))
        scipy.misc.toimage(image, cmin=0, cmax=1).save('%s/_tmp_%04d.png' %
                                                       (directory_output, num))
    make_gif(directory_output, args.filename_output)
Ejemplo n.º 3
0
def run():
    class_list_shapenet = [
        '02691156', '02828884', '02933112', '02958343', '03001627', '03211117',
        '03636649', '03691459', '04090263', '04256520', '04379243', '04401088',
        '04530566'
    ]
    class_list_pascal = ['aeroplane', 'car', 'chair']

    parser = argparse.ArgumentParser()

    # system
    parser.add_argument('-eid', '--experiment_id', type=str, required=True)
    parser.add_argument('-md',
                        '--model_directory',
                        type=str,
                        default='./data/models')
    parser.add_argument('-dd',
                        '--dataset_directory',
                        type=str,
                        default='./data/dataset')
    parser.add_argument('-li', '--log_interval', type=int, default=10000)
    parser.add_argument('-sm', '--save_model', type=int, default=0)
    parser.add_argument('-rs', '--random_seed', type=int, default=0)
    parser.add_argument('-g', '--gpu', type=int, default=0)

    # dataset
    parser.add_argument('-is', '--image_size', type=int, default=224)

    # loss function
    parser.add_argument('-sll',
                        '--silhouette_loss_levels',
                        type=int,
                        default=5)
    parser.add_argument('-ls', '--lambda_silhouettes', type=float, default=1)
    parser.add_argument('-lt', '--lambda_textures', type=float, default=0)
    parser.add_argument('-lp', '--lambda_perceptual', type=float, default=0)
    parser.add_argument('-ld', '--lambda_discriminator', type=float, default=0)
    parser.add_argument('-ld2',
                        '--lambda_discriminator2',
                        type=float,
                        default=0)
    parser.add_argument('-cc', '--class_conditional', type=int, default=0)

    # components
    parser.add_argument('-ts', '--texture_scaling', type=float, default=1)

    # training
    parser.add_argument('-dm',
                        '--discriminator_mode',
                        type=str,
                        default='seen_unseen')
    parser.add_argument('-io', '--iterative_optimization', type=int, default=0)
    parser.add_argument('-ab1', '--adam_beta1', type=float, default=0.5)
    parser.add_argument('-ab2', '--adam_beta2', type=float, default=0.999)

    if 'shapenet' in sys.argv:
        # shapenet
        parser.add_argument('-ds', '--dataset', type=str, default='shapnet')
        parser.add_argument('-cls',
                            '--class_ids',
                            type=str,
                            default=','.join(class_list_shapenet))
        parser.add_argument('-nv', '--num_views', type=int, default=20)
        parser.add_argument('-svt',
                            '--single_view_training',
                            type=int,
                            default=0)
        parser.add_argument('-wov',
                            '--without_viewpoints',
                            type=int,
                            default=0)
        parser.add_argument('-sym', '--symmetric', type=int, default=0)

        # loss function
        parser.add_argument('-slt',
                            '--silhouette_loss_type',
                            type=str,
                            default='l2')
        parser.add_argument('-linf',
                            '--lambda_inflation',
                            type=float,
                            default=1e-4)
        parser.add_argument('-lgl',
                            '--lambda_graph_laplacian',
                            type=float,
                            default=0)
        parser.add_argument('-lel',
                            '--lambda_edge_length',
                            type=float,
                            default=0)

        # training
        parser.add_argument('-bs', '--batch_size', type=int, default=64)
        parser.add_argument('-lr', '--learning_rate', type=float, default=4e-4)
        parser.add_argument('-ni',
                            '--num_iterations',
                            type=int,
                            default=1000000)

        # components
        parser.add_argument('-et',
                            '--encoder_type',
                            type=str,
                            default='resnet18')
        parser.add_argument('-sdt',
                            '--shape_decoder_type',
                            type=str,
                            default='conv')
        parser.add_argument('-tdt',
                            '--texture_decoder_type',
                            type=str,
                            default='conv')
        parser.add_argument('-dt',
                            '--discriminator_type',
                            type=str,
                            default='shapenet_patch')
        parser.add_argument('-vs',
                            '--vertex_scaling',
                            type=float,
                            default=0.01)

    elif 'pascal' in sys.argv:
        # dataset
        parser.add_argument('-ds', '--dataset', type=str, default='pascal')
        parser.add_argument('-cls',
                            '--class_ids',
                            type=str,
                            default=','.join(class_list_pascal))
        parser.add_argument('-sym', '--symmetric', type=int, default=1)

        # loss function
        parser.add_argument('-slt',
                            '--silhouette_loss_type',
                            type=str,
                            default='iou')
        parser.add_argument('-linf',
                            '--lambda_inflation',
                            type=float,
                            default=0)
        parser.add_argument('-lgl',
                            '--lambda_graph_laplacian',
                            type=float,
                            default=0)

        # training
        parser.add_argument('-bs', '--batch_size', type=int, default=16)
        parser.add_argument('-lr', '--learning_rate', type=float, default=2e-5)
        parser.add_argument('-ni',
                            '--num_iterations',
                            type=int,
                            default=1000000)

        # components
        parser.add_argument('-et',
                            '--encoder_type',
                            type=str,
                            default='resnet18pt')
        parser.add_argument('-sdt',
                            '--shape_decoder_type',
                            type=str,
                            default='fc')
        parser.add_argument('-tdt',
                            '--texture_decoder_type',
                            type=str,
                            default='conv')
        parser.add_argument('-dt',
                            '--discriminator_type',
                            type=str,
                            default='pascal_patch')
        parser.add_argument('-vs', '--vertex_scaling', type=float, default=0.1)

    args = parser.parse_args()
    directory_output = os.path.join(args.model_directory, args.experiment_id)
    class_ids = args.class_ids.split(',')

    # set random seed, gpu
    chainer.cuda.get_device(args.gpu).use()
    random.seed(args.random_seed)
    np.random.seed(args.random_seed)
    cp.random.seed(args.random_seed)

    # load dataset
    if args.dataset == 'shapenet':
        dataset_train = dataset_shapenet.ShapeNet(args.dataset_directory,
                                                  class_ids,
                                                  'train',
                                                  args.num_views,
                                                  args.single_view_training,
                                                  device=args.gpu)
        dataset_val = dataset_shapenet.ShapeNet(args.dataset_directory,
                                                class_ids,
                                                'val',
                                                device=args.gpu)
    else:
        dataset_train = dataset_pascal.Pascal(args.dataset_directory,
                                              class_ids, 'train')
        dataset_val = dataset_pascal.Pascal(args.dataset_directory, class_ids,
                                            'val')
    iterator_train = training.Iterator(dataset_train, args.batch_size)
    draw_batch_train = dataset_train.get_random_batch(16)
    draw_batch_val = dataset_val.get_random_batch(16)

    no_texture = (args.lambda_textures == 0) and (args.lambda_perceptual == 0)
    if not no_texture:
        import perceptual_loss
        perceptual_loss.get_alex_net()

    # setup model & optimizer
    if args.dataset == 'shapenet':
        if not args.without_viewpoints:
            Model = models.ShapeNetModel
        else:
            Model = models.ShapeNetModelWithoutViewpoint
        model = Model(
            encoder_type=args.encoder_type,
            shape_decoder_type=args.shape_decoder_type,
            texture_decoder_type=args.texture_decoder_type,
            discriminator_type=args.discriminator_type,
            vertex_scaling=args.vertex_scaling,
            texture_scaling=args.texture_scaling,
            silhouette_loss_type=args.silhouette_loss_type,
            silhouette_loss_levels=args.silhouette_loss_levels,
            lambda_silhouettes=args.lambda_silhouettes,
            lambda_textures=args.lambda_textures,
            lambda_perceptual=args.lambda_perceptual,
            lambda_inflation=args.lambda_inflation,
            lambda_discriminator=args.lambda_discriminator,
            lambda_graph_laplacian=args.lambda_graph_laplacian,
            lambda_edge_length=args.lambda_edge_length,
            single_view_training=args.single_view_training,
            class_conditional=args.class_conditional,
            iterative_optimization=args.iterative_optimization,
            discriminator_mode=args.discriminator_mode,
            no_texture=no_texture,
            symmetric=args.symmetric,
            num_views=args.num_views,
        )
        num_views_for_validation = 1
    elif args.dataset == 'pascal':
        model = models.PascalModel(
            encoder_type=args.encoder_type,
            shape_decoder_type=args.shape_decoder_type,
            texture_decoder_type=args.texture_decoder_type,
            discriminator_type=args.discriminator_type,
            silhouette_loss_type=args.silhouette_loss_type,
            vertex_scaling=args.vertex_scaling,
            texture_scaling=args.texture_scaling,
            silhouette_loss_levels=args.silhouette_loss_levels,
            lambda_silhouettes=args.lambda_silhouettes,
            lambda_perceptual=args.lambda_perceptual,
            lambda_inflation=args.lambda_inflation,
            lambda_graph_laplacian=args.lambda_graph_laplacian,
            lambda_discriminator=args.lambda_discriminator,
            no_texture=no_texture,
            symmetric=args.symmetric,
            class_conditional=args.class_conditional,
        )
        num_views_for_validation = None
    model.to_gpu(args.gpu)
    adam_params = {
        'alpha': args.learning_rate,
        'beta1': args.adam_beta1,
        'beta2': args.adam_beta2,
    }
    optimizers = {
        'encoder': neural_renderer.Adam(**adam_params),
        'shape_decoder': neural_renderer.Adam(**adam_params),
        'texture_decoder': neural_renderer.Adam(**adam_params),
        'discriminator': neural_renderer.Adam(**adam_params),
    }
    optimizers['encoder'].setup(model.encoder)
    optimizers['shape_decoder'].setup(model.shape_decoder)
    optimizers['texture_decoder'].setup(model.texture_decoder)
    optimizers['discriminator'].setup(model.discriminator)

    # setup trainer
    updater = training.Updater(model,
                               iterator_train,
                               optimizers,
                               converter=training.converter,
                               iterative=args.iterative_optimization)
    trainer = chainer.training.Trainer(updater,
                                       stop_trigger=(args.num_iterations,
                                                     'iteration'),
                                       out=directory_output)
    model.trainer = trainer
    trainer.extend(
        chainer.training.extensions.LogReport(trigger=(args.log_interval,
                                                       'iteration')))
    trainer.extend(
        chainer.training.extensions.PrintReport([
            'iteration', 'main/loss_silhouettes', 'main/loss_discriminator',
            'val/iou', 'elapsed_time'
        ]))
    trainer.extend(chainer.training.extensions.ProgressBar(update_interval=10))
    trainer.extend(functools.partial(training.validation,
                                     model=model,
                                     dataset=dataset_val,
                                     num_views=num_views_for_validation),
                   name='validation',
                   priority=chainer.training.PRIORITY_WRITER,
                   trigger=(args.log_interval, 'iteration'))
    trainer.extend(functools.partial(training.draw,
                                     model=model,
                                     batch=draw_batch_val,
                                     prefix='val'),
                   name='draw_val',
                   trigger=(args.log_interval, 'iteration'))
    trainer.extend(functools.partial(training.draw,
                                     model=model,
                                     batch=draw_batch_train,
                                     prefix='train'),
                   name='draw_train',
                   trigger=(args.log_interval, 'iteration'))
    trainer.reporter.add_observer('main', model)
    trainer.reporter.add_observers('main', model.namedlinks(skipself=True))

    # main loop
    if True:
        trainer.run()
    else:
        from chainer.function_hooks import TimerHook
        hook = TimerHook()
        import cupy
        from cupy import prof

        with cupy.cuda.profile():
            with cupy.prof.time_range('some range in green', color_id=0):
                with hook:
                    trainer.run()
                hook.print_report()
                print hook.total_time()

    # save model
    if args.save_model:
        chainer.serializers.save_npz(
            os.path.join(directory_output, 'model.npz'), model)
Ejemplo n.º 4
0
def run():
    # arguments
    parser = argparse.ArgumentParser()
    parser.add_argument('-eid', '--experiment_id', type=str)
    parser.add_argument('-d',
                        '--model_directory',
                        type=str,
                        default=MODEL_DIRECTORY)
    parser.add_argument('-dd',
                        '--dataset_directory',
                        type=str,
                        default=DATASET_DIRECTORY)
    parser.add_argument('-dv', '--dataset_views', type=int, default=24)
    parser.add_argument('-cls', '--class_ids', type=str, default=CLASS_IDS_ALL)
    parser.add_argument('-bs', '--batch_size', type=int, default=BATCH_SIZE)
    parser.add_argument('-ls',
                        '--lambda_smoothness',
                        type=float,
                        default=LAMBDA_SMOOTHNESS)
    parser.add_argument('-lr',
                        '--learning_rate',
                        type=float,
                        default=LEARNING_RATE)
    parser.add_argument('-lrp',
                        '--lr_reduce_point',
                        type=float,
                        default=LR_REDUCE_POINT)
    parser.add_argument('-ni',
                        '--num_iterations',
                        type=int,
                        default=NUM_ITERATIONS)
    parser.add_argument('-li',
                        '--log_interval',
                        type=int,
                        default=LOG_INTERVAL)
    parser.add_argument('-s', '--seed', type=int, default=RANDOM_SEED)
    parser.add_argument('-g', '--gpu', type=int, default=GPU)
    parser.add_argument('-c', '--con', type=bool, default=False)
    parser.add_argument('-lstd',
                        '--lambda_std',
                        type=float,
                        default=LAMBDA_STD)
    parser.add_argument('-nviews', '--n_views', type=int, default=N_VIEWS)
    parser.add_argument('-lgp', '--lambda_gp', type=float, default=10)
    args = parser.parse_args()
    directory_output = os.path.join(args.model_directory, args.experiment_id)

    # set random seed, gpu
    random.seed(args.seed)
    np.random.seed(args.seed)
    cp.random.seed(args.seed)
    chainer.cuda.get_device(args.gpu).use()

    # load dataset
    dataset_train = datasets.ShapeNet_NView_Gan(args.dataset_directory,
                                                args.class_ids.split(','),
                                                'train',
                                                n_views=args.n_views,
                                                total_views=args.dataset_views)
    dataset_val = datasets.ShapeNet_NView(args.dataset_directory,
                                          args.class_ids.split(','),
                                          'val',
                                          total_views=args.dataset_views)
    train_iter = training.M_SerialIterator(dataset_train, args.batch_size)

    # setup model & optimizer
    model = model_nview.Model(img_size=64,
                              lambda_smoothness=args.lambda_smoothness,
                              lambda_std=args.lambda_std,
                              n_views=args.n_views)
    model.to_gpu()

    dis = model_discriminator.Discriminator(img_size=64,
                                            img_channel=1,
                                            pos_size=3)
    dis.to_gpu()
    if args.con:
        print 'loading pretrained model'
        chainer.serializers.load_npz(
            os.path.join(directory_output, 'model.npz'), model)
        print 'loading dis'
        chainer.serializers.load_npz(os.path.join(directory_output, 'dis.npz'),
                                     dis)

    opt_gen = neural_renderer.Adam(args.learning_rate)
    opt_gen.setup(model)

    opt_dis = neural_renderer.Adam(5e-6)
    opt_dis.setup(dis)

    # setup trainer
    updater = gan_updater.Updater(
        models=(model, dis),
        iterator={
            'main': train_iter,
        },
        optimizer={
            'gen': opt_gen,
            'dis': opt_dis
        },
        device=args.gpu,
        params={
            'batch_size': args.batch_size,
            #'img_size': 64,
            #'img_chan': 4,
            #'latent_len': args.latent_len,
            'dis_iter': 3,
            'lambda_gp': args.lambda_gp,
            'n_views': args.n_views
        },
        converter=training.my_convertor)
    #updater = chainer.training.StandardUpdater(train_iter, optimizer)
    trainer = chainer.training.Trainer(updater,
                                       stop_trigger=(args.num_iterations,
                                                     'iteration'),
                                       out=directory_output)
    trainer.extend(
        chainer.training.extensions.LogReport(trigger=(args.log_interval,
                                                       'iteration')))
    trainer.extend(
        chainer.training.extensions.PrintReport([
            'iteration', 'main/loss_silhouettes', 'main/loss_smoothness',
            'val/iou', 'elapsed_time'
        ]))
    trainer.extend(chainer.training.extensions.ProgressBar(update_interval=10))
    trainer.extend(functools.partial(training.validation,
                                     model=model,
                                     dataset=dataset_val,
                                     directory_output=directory_output),
                   name='validation',
                   priority=chainer.training.PRIORITY_WRITER,
                   trigger=(args.log_interval, 'iteration'))
    trainer.extend(functools.partial(chainer.serializers.save_npz,
                                     os.path.join(directory_output, 'dis.npz'),
                                     dis),
                   name='save_dis',
                   trigger=(args.log_interval, 'iteration'))

    trainer.extend(functools.partial(
        training.lr_shift,
        optimizer=opt_gen,
        iterations=[args.num_iterations * args.lr_reduce_point]),
                   name='lr_shift',
                   trigger=(1, 'iteration'))
    trainer.extend(functools.partial(
        training.lr_shift,
        optimizer=opt_dis,
        iterations=[args.num_iterations * args.lr_reduce_point]),
                   name='lr_shift',
                   trigger=(1, 'iteration'))

    # main loop
    trainer.run()

    # save model
    chainer.serializers.save_npz(os.path.join(directory_output, 'model.npz'),
                                 model)
    chainer.serializers.save_npz(os.path.join(directory_output, 'dis.npz'),
                                 dis)
Ejemplo n.º 5
0
def run():
    # load settings
    parser = argparse.ArgumentParser()
    parser.add_argument('-im', '--filename_mesh', type=str)
    parser.add_argument('-is', '--filename_style', type=str)
    parser.add_argument('-o', '--filename_output', type=str)
    parser.add_argument('-ls', '--lambda_style', type=float, default=1.)
    parser.add_argument('-lc', '--lambda_content', type=float, default=2e9)
    parser.add_argument('-ltv', '--lambda_tv', type=float, default=1e7)
    parser.add_argument('-emax', '--elevation_max', type=float, default=40.)
    parser.add_argument('-emin', '--elevation_min', type=float, default=20.)
    parser.add_argument('-lrv', '--lr_vertices', type=float, default=0.01)
    parser.add_argument('-lrt', '--lr_textures', type=float, default=1.0)
    parser.add_argument('-cd', '--camera_distance', type=float, default=2.732)
    parser.add_argument('-cdn',
                        '--camera_distance_noise',
                        type=float,
                        default=0.1)
    parser.add_argument('-ts', '--texture_size', type=int, default=4)
    parser.add_argument('-lr', '--adam_lr', type=float, default=0.05)
    parser.add_argument('-ab1', '--adam_beta1', type=float, default=0.9)
    parser.add_argument('-ab2', '--adam_beta2', type=float, default=0.999)
    parser.add_argument('-bs', '--batch_size', type=int, default=4)
    parser.add_argument('-ni', '--num_iteration', type=int, default=1000)
    parser.add_argument('-g', '--gpu', type=int, default=0)
    args = parser.parse_args()

    # create output directory
    directory_output = os.path.dirname(args.filename_output)
    if not os.path.exists(directory_output):
        os.makedirs(directory_output)

    # setup chainer
    chainer.cuda.get_device_from_id(args.gpu).use()
    cp.random.seed(0)
    np.random.seed(0)

    # setup scene
    model = style_transfer_3d.StyleTransferModel(
        filename_mesh=args.filename_mesh,
        filename_style=args.filename_style,
        lambda_style=args.lambda_style,
        lambda_content=args.lambda_content,
        lambda_tv=args.lambda_tv,
        elevation_max=args.elevation_max,
        elevation_min=args.elevation_min,
        lr_vertices=args.lr_vertices,
        lr_textures=args.lr_textures,
        camera_distance=args.camera_distance,
        camera_distance_noise=args.camera_distance_noise,
        texture_size=args.texture_size,
    )
    model.to_gpu()
    optimizer = neural_renderer.Adam(alpha=args.adam_lr, beta1=args.adam_beta1)
    optimizer.setup(model)

    # optimization
    loop = tqdm.tqdm(range(args.num_iteration))
    for _ in loop:
        optimizer.target.cleargrads()
        loss = model(args.batch_size)
        loss.backward()
        optimizer.update()
        loop.set_description('Optimizing. Loss %.4f' % loss.data)

    # draw object
    model.renderer.background_color = (1, 1, 1)
    loop = tqdm.tqdm(range(0, 360, 4))
    for num, azimuth in enumerate(loop):
        loop.set_description('Drawing')
        model.renderer.eye = neural_renderer.get_points_from_angles(
            2.732, 30, azimuth)
        images = model.renderer.render(*model.mesh.get_batch(1))
        image = images.data.get()[0].transpose((1, 2, 0))
        scipy.misc.toimage(image, cmin=0, cmax=1).save('%s/_tmp_%04d.png' %
                                                       (directory_output, num))
    make_gif(directory_output, args.filename_output)
    print(model.getLosses())
Ejemplo n.º 6
0
def run():
    # arguments
    parser = argparse.ArgumentParser()
    parser.add_argument('-eid', '--experiment_id', type=str)
    parser.add_argument('-d',
                        '--model_directory',
                        type=str,
                        default=MODEL_DIRECTORY)
    parser.add_argument('-dd',
                        '--dataset_directory',
                        type=str,
                        default=DATASET_DIRECTORY)
    parser.add_argument('-cls', '--class_ids', type=str, default=CLASS_IDS_ALL)
    parser.add_argument('-bs', '--batch_size', type=int, default=BATCH_SIZE)
    parser.add_argument('-ls',
                        '--lambda_smoothness',
                        type=float,
                        default=LAMBDA_SMOOTHNESS)
    parser.add_argument('-lr',
                        '--learning_rate',
                        type=float,
                        default=LEARNING_RATE)
    parser.add_argument('-lrp',
                        '--lr_reduce_point',
                        type=float,
                        default=LR_REDUCE_POINT)
    parser.add_argument('-ni',
                        '--num_iterations',
                        type=int,
                        default=NUM_ITERATIONS)
    parser.add_argument('-li',
                        '--log_interval',
                        type=int,
                        default=LOG_INTERVAL)
    parser.add_argument('-s', '--seed', type=int, default=RANDOM_SEED)
    parser.add_argument('-g', '--gpu', type=int, default=GPU)
    args = parser.parse_args()
    directory_output = os.path.join(args.model_directory, args.experiment_id)

    # set random seed, gpu
    random.seed(args.seed)
    np.random.seed(args.seed)
    cp.random.seed(args.seed)
    chainer.cuda.get_device(args.gpu).use()

    # load dataset
    dataset_train = datasets.ShapeNet(args.dataset_directory,
                                      args.class_ids.split(','), 'train')
    dataset_val = datasets.ShapeNet(args.dataset_directory,
                                    args.class_ids.split(','), 'val')
    train_iter = training.MyIterator(dataset_train, args.batch_size)

    # setup model & optimizer
    model = models.Model(lambda_smoothness=args.lambda_smoothness)
    model.to_gpu()
    optimizer = neural_renderer.Adam(args.learning_rate)
    optimizer.setup(model)

    # setup trainer
    updater = chainer.training.StandardUpdater(train_iter,
                                               optimizer,
                                               converter=training.my_convertor)
    trainer = chainer.training.Trainer(updater,
                                       stop_trigger=(args.num_iterations,
                                                     'iteration'),
                                       out=directory_output)
    trainer.extend(
        chainer.training.extensions.LogReport(trigger=(args.log_interval,
                                                       'iteration')))
    trainer.extend(
        chainer.training.extensions.PrintReport([
            'iteration', 'main/loss_silhouettes', 'main/loss_smoothness',
            'val/iou', 'elapsed_time'
        ]))
    trainer.extend(chainer.training.extensions.ProgressBar(update_interval=10))
    trainer.extend(functools.partial(training.validation,
                                     model=model,
                                     dataset=dataset_val),
                   name='validation',
                   priority=chainer.training.PRIORITY_WRITER,
                   trigger=(args.log_interval, 'iteration'))
    trainer.extend(functools.partial(
        training.lr_shift,
        optimizer=optimizer,
        iterations=[args.num_iterations * args.lr_reduce_point]),
                   name='lr_shift',
                   trigger=(1, 'iteration'))

    # main loop
    trainer.run()

    # save model
    chainer.serializers.save_npz(os.path.join(directory_output, 'model.npz'),
                                 model)