コード例 #1
0
    def __init__(self, policy, policy_target, qfns, qfns_target, sampler, *,
                 FLAGS):
        super().__init__()
        self.FLAGS = FLAGS
        self.policy = policy
        self.policy_target = policy_target
        self.qfns = qfns
        self.qfns_target = qfns_target
        self.sampler = sampler

        self.qfns_opt = nn.Adam(
            sum([qfn.parameters() for qfn in self.qfns], []), FLAGS.lr)
        self.policy_opt = nn.Adam(self.policy.parameters(), FLAGS.lr)

        self.n_batches = 0
コード例 #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-io', '--filename_obj', type=str, default=os.path.join(data_dir, 'teapot.obj'))
    parser.add_argument('-ir', '--filename_ref', type=str, default=os.path.join(data_dir, 'example4_ref.png'))
    parser.add_argument('-or', '--filename_output', type=str, default=os.path.join(data_dir, 'example4_result.gif'))
    parser.add_argument('-mr', '--make_reference_image', type=int, default=0)
    parser.add_argument('-g', '--gpu', type=int, default=0)
    args = parser.parse_args()

    if args.make_reference_image:
        make_reference_image(args.filename_ref, args.filename_obj)

    model = Model(args.filename_obj, args.filename_ref)

    optimizer = nn.Adam(model.parameters(), lr=0.1)
    loop = tqdm.tqdm(range(1000))
    for i in loop:
        loss = model()
        optimizer.step(loss)
        images, _, _ = model.renderer(model.vertices, model.faces, jt.tanh(model.textures))
        image = images.numpy()[0].transpose(1,2,0)
        imsave('/tmp/_tmp_%04d.png' % i, image)
        loop.set_description('Optimizing (loss %.4f)' % loss.data)
        if loss.data < 70:
            break
    make_gif(args.filename_output)
コード例 #3
0
ファイル: demo2-deform.py プロジェクト: shuiguoli/jrender
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--filename-input', type=str, 
        default=os.path.join(data_dir, 'source.npy'))
    parser.add_argument('-c', '--camera-input', type=str, 
        default=os.path.join(data_dir, 'camera.npy'))
    parser.add_argument('-t', '--template-mesh', type=str, 
        default=os.path.join(data_dir, 'obj/sphere/sphere_1352.obj'))
    parser.add_argument('-o', '--output-dir', type=str, 
        default=os.path.join(data_dir, 'results/output_deform'))
    parser.add_argument('-b', '--batch-size', type=int,
        default=120)
    args = parser.parse_args()

    os.makedirs(args.output_dir, exist_ok=True)
    model = Model(args.template_mesh)

    renderer = jr.Renderer(image_size=64, sigma_val=1e-4, aggr_func_rgb='hard', camera_mode='look_at', viewing_angle=15, dr_type='softras')

    # read training images and camera poses
    images = np.load(args.filename_input).astype('float32') / 255.
    cameras = np.load(args.camera_input).astype('float32')
    optimizer = nn.Adam(model.parameters(), 0.01, betas=(0.5, 0.99))
    
    camera_distances = jt.array(cameras[:, 0])
    elevations = jt.array(cameras[:, 1])
    viewpoints = jt.array(cameras[:, 2])
    renderer.transform.set_eyes_from_angles(camera_distances, elevations, viewpoints)

    import time
    sta = time.time()
    loop = tqdm.tqdm(list(range(0, 1000)))
    writer = imageio.get_writer(os.path.join(args.output_dir, 'deform.gif'), mode='I')
    for i in loop:
        images_gt = jt.array(images)

        mesh, laplacian_loss, flatten_loss = model(args.batch_size)
        images_pred = renderer.render_mesh(mesh, mode='silhouettes')

        # optimize mesh with silhouette reprojection error and 
        # geometry constraints
        loss = neg_iou_loss(images_pred, images_gt[:, 3]) + \
               0.03 * laplacian_loss + \
               0.0003 * flatten_loss
            
        loop.set_description('Loss: %.4f'%(loss.item()))
        optimizer.step(loss)
        
        if i % 100 == 0:
            image = images_pred.numpy()[0]#.transpose((1, 2, 0))
            imageio.imsave(os.path.join(args.output_dir, 'deform_%05d.png'%i), (255*image).astype(np.uint8))
            writer.append_data((255*image).astype(np.uint8))

    # save optimized mesh
    model(1)[0].save_obj(os.path.join(args.output_dir, 'plane.obj'), save_texture=False)
    print(f"Cost {time.time() - sta} secs.")
コード例 #4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-io',
                        '--filename_obj',
                        type=str,
                        default=os.path.join(data_dir, 'teapot.obj'))
    parser.add_argument('-ir',
                        '--filename_ref',
                        type=str,
                        default=os.path.join(data_dir, 'example2_ref.png'))
    parser.add_argument('-oo',
                        '--filename_output_optimization',
                        type=str,
                        default=os.path.join(data_dir,
                                             'example2_optimization.gif'))
    parser.add_argument('-or',
                        '--filename_output_result',
                        type=str,
                        default=os.path.join(data_dir, 'example2_result.gif'))
    parser.add_argument('-g', '--gpu', type=int, default=0)
    args = parser.parse_args()

    model = Model(args.filename_obj, args.filename_ref)

    optimizer = nn.Adam(model.parameters(), lr=1e-3)
    loop = tqdm.tqdm(range(300))
    for i in loop:
        loop.set_description('Optimizing')
        loss = model()
        optimizer.step(loss)
        images = model.renderer(model.vertices,
                                model.faces,
                                mode='silhouettes')
        image = images.numpy()[0, 0]
        imsave('/tmp/_tmp_%04d.png' % i, image)
    make_gif(args.filename_output_optimization)

    # draw object
    loop = tqdm.tqdm(range(0, 360, 4))
    for num, azimuth in enumerate(loop):
        loop.set_description('Drawing')
        model.renderer.eye = nr.get_points_from_angles(2.732, 0, azimuth)
        images, _, _ = model.renderer(model.vertices, model.faces,
                                      model.textures)
        image = images.numpy()[0].transpose((1, 2, 0))
        imsave('/tmp/_tmp_%04d.png' % num, image)
    make_gif(args.filename_output_result)
コード例 #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-io',
                        '--filename_obj',
                        type=str,
                        default=os.path.join(data_dir,
                                             'obj/spot/spot_triangulated.obj'))
    parser.add_argument('-ir',
                        '--filename_ref',
                        type=str,
                        default=os.path.join(data_dir,
                                             'ref/ref_roughness.png'))
    parser.add_argument('-or',
                        '--filename_output',
                        type=str,
                        default=os.path.join(
                            data_dir,
                            'results/output_optim_roughness_textures'))
    parser.add_argument('-g', '--gpu', type=int, default=0)
    args = parser.parse_args()
    os.makedirs(args.filename_output, exist_ok=True)

    model = Model(args.filename_obj, args.filename_ref)

    optimizer = nn.Adam([model.roughness_textures], lr=0.1, betas=(0.5, 0.999))
    loop = tqdm.tqdm(range(15))
    for num in loop:
        loop.set_description('Optimizing')
        loss = model()
        optimizer.step(loss)

        model.renderer.transform.set_eyes_from_angles(2.732, 30, 140)
        images = model.renderer(model.vertices,
                                model.faces,
                                model.textures,
                                metallic_textures=model.metallic_textures,
                                roughness_textures=model.roughness_textures)
        image = images.numpy()[0].transpose((1, 2, 0))
        imsave('/tmp/_tmp_%04d.png' % num, image)

    make_gif(os.path.join(args.filename_output, 'result.gif'))
コード例 #6
0
discriminator = Discriminator()

# 导入MNIST数据集
from jittor.dataset.mnist import MNIST
import jittor.transform as transform
transform = transform.Compose([
    transform.Resize(opt.img_size),
    transform.Gray(),
    transform.ImageNormalize(mean=[0.5], std=[0.5]),
])
dataloader = MNIST(train=True,
                   transform=transform).set_attrs(batch_size=opt.batch_size,
                                                  shuffle=True)

optimizer_G = nn.Adam(generator.parameters(),
                      lr=opt.lr,
                      betas=(opt.b1, opt.b2))
optimizer_D = nn.Adam(discriminator.parameters(),
                      lr=opt.lr,
                      betas=(opt.b1, opt.b2))

from PIL import Image


def save_image(img, path, nrow=10, padding=5):
    N, C, W, H = img.shape
    if (N % nrow != 0):
        print("N%nrow!=0")
        return
    ncol = int(N / nrow)
    img_all = []
コード例 #7
0
        output = net(pts, normals)
        pred = np.argmax(output.data, axis=1)
        acc = np.sum(pred == labels.data)
        total_acc += acc
        total_num += labels.shape[0]

    acc = total_acc / total_num
    return acc


if __name__ == '__main__':
    freeze_random_seed()

    net = PointNet(n_classes=40)
    optimizer = nn.Adam(net.parameters(), lr=1e-3)

    lr_scheduler = LRScheduler(optimizer)

    batch_size = 32
    train_dataloader = ModelNet40(n_points=4096, batch_size=batch_size, train=True, shuffle=True)
    val_dataloader = ModelNet40(n_points=4096, batch_size=batch_size, train=False, shuffle=False)

    step = 0
    best_acc = 0
    for epoch in range(1000):
        lr_scheduler.step(len(train_dataloader) * batch_size)

        train(net, optimizer, epoch, train_dataloader)
        acc = evaluate(net, epoch, val_dataloader)
コード例 #8
0
        for conv in self.convs:
            x = nn.dropout(x, self.dropout)
            x = conv(x, x_0, edge_index, edge_weight)
            x = nn.relu(x)

        x = nn.dropout(x, self.dropout)
        x = self.lins[1](x)

        return nn.log_softmax(x, dim=-1)


model = Net(hidden_channels=64, num_layers=64, alpha=0.1, theta=0.5,
            shared_weights=True, dropout=0.6)
optimizer = nn.Adam([
    dict(params=model.convs.parameters(), weight_decay=0.01),
    dict(params=model.lins.parameters(), weight_decay=5e-4)
], lr=0.01)


def train():
    model.train()
    out = model()[data.train_mask]
    label = data.y[data.train_mask]
    loss = nn.nll_loss(
        out, label)
    optimizer.step(loss)
    return float(loss)


def test():
    model.eval()
コード例 #9
0
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = SGConv(dataset.num_features,
                            dataset.num_classes,
                            K=2,
                            cached=True)

    def execute(self):
        x, edge_index = data.x, data.edge_index
        x = self.conv1(x, edge_index)
        return nn.log_softmax(x, dim=1)


model, data = Net(), data
# Only perform weight-decay on first convolution.
optimizer = nn.Adam(model.parameters(), lr=0.2, weight_decay=0.005)


def train():
    model.train()
    pred = model()[data.train_mask]
    label = data.y[data.train_mask]
    loss = nn.nll_loss(pred, label)
    # print(loss)
    optimizer.step(loss)


def test():
    model.eval()
    logits, accs = model(), []
    for _, mask in data('train_mask', 'val_mask', 'test_mask'):
コード例 #10
0
coupled_generators = CoupledGenerators()
coupled_discriminators = CoupledDiscriminators()

print(coupled_generators)
print(coupled_discriminators)

transform = transform.Compose([
    transform.Resize(opt.img_size),
    transform.ImageNormalize(mean=[0.5], std=[0.5]),
])
dataloader1 = MNIST(train=True, transform=transform).set_attrs(batch_size=opt.batch_size, shuffle=True)

dataloader2 = mnistm.MNISTM(mnist_root = "../../data/mnistm", train=True, transform = transform).set_attrs(batch_size=opt.batch_size, shuffle=True)

# Optimizers
optimizer_G = nn.Adam(coupled_generators.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = nn.Adam(coupled_discriminators.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))

# ----------
#  Training
# ----------
for epoch in range(opt.n_epochs):
    for i, ((imgs1, _), (imgs2, _)) in enumerate(zip(dataloader1, dataloader2)):
        jt.sync_all(True)
        batch_size = imgs1.shape[0]

        # Adversarial ground truths
        valid = jt.ones([batch_size, 1]).float32().stop_grad()
        fake = jt.zeros([batch_size, 1]).float32().stop_grad()

         # ------------------
コード例 #11
0
ファイル: aae.py プロジェクト: whuyyc/gan-jittor
# Configure data loader
from jittor.dataset.mnist import MNIST
import jittor.transform as transform

transform = transform.Compose([
    transform.Resize(opt.img_size),
    transform.Gray(),
    transform.ImageNormalize(mean=[0.5], std=[0.5]),
])
train_loader = MNIST(train=True,
                     transform=transform).set_attrs(batch_size=opt.batch_size,
                                                    shuffle=True)

# Optimizers
optimizer_G = nn.Adam(encoder.parameters() + decoder.parameters(),
                      lr=opt.lr,
                      betas=(opt.b1, opt.b2))
optimizer_D = nn.Adam(discriminator.parameters(),
                      lr=opt.lr,
                      betas=(opt.b1, opt.b2))


def save_image(img, path, nrow=10, padding=5):
    N, C, W, H = img.shape
    if (N % nrow != 0):
        print("N%nrow!=0")
        return
    ncol = int(N / nrow)
    img_all = []
    for i in range(ncol):
        img_ = []
コード例 #12
0
def train_net(cfg):
    # Enable the inbuilt cudnn auto-tuner to find the best algorithm to use

    # train_dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[cfg.DATASET.TRAIN_DATASET](cfg)
    # test_dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[cfg.DATASET.TEST_DATASET](cfg)

    train_dataset_loader = dataloader_jt.DATASET_LOADER_MAPPING[
        cfg.DATASET.TRAIN_DATASET](cfg)
    test_dataset_loader = dataloader_jt.DATASET_LOADER_MAPPING[
        cfg.DATASET.TEST_DATASET](cfg)

    train_data_loader = train_dataset_loader.get_dataset(
        dataloader_jt.DatasetSubset.TRAIN,
        batch_size=cfg.TRAIN.BATCH_SIZE,
        num_workers=cfg.CONST.NUM_WORKERS,
        shuffle=True)
    val_data_loader = test_dataset_loader.get_dataset(
        dataloader_jt.DatasetSubset.VAL,
        batch_size=cfg.TRAIN.BATCH_SIZE,
        num_workers=cfg.CONST.NUM_WORKERS,
        shuffle=False)

    # Set up folders for logs and checkpoints
    output_dir = os.path.join(cfg.DIR.OUT_PATH, '%s',
                              datetime.now().isoformat())
    cfg.DIR.CHECKPOINTS = output_dir % 'checkpoints'
    cfg.DIR.LOGS = output_dir % 'logs'
    if not os.path.exists(cfg.DIR.CHECKPOINTS):
        os.makedirs(cfg.DIR.CHECKPOINTS)

    # Create tensorboard writers
    train_writer = SummaryWriter(os.path.join(cfg.DIR.LOGS, 'train'))
    val_writer = SummaryWriter(os.path.join(cfg.DIR.LOGS, 'test'))
    model = Model(dataset=cfg.DATASET.TRAIN_DATASET)
    init_epoch = 0
    best_metrics = float('inf')

    optimizer = nn.Adam(model.parameters(),
                        lr=cfg.TRAIN.LEARNING_RATE,
                        weight_decay=cfg.TRAIN.WEIGHT_DECAY,
                        betas=cfg.TRAIN.BETAS)
    lr_scheduler = jittor.lr_scheduler.MultiStepLR(
        optimizer,
        milestones=cfg.TRAIN.LR_MILESTONES,
        gamma=cfg.TRAIN.GAMMA,
        last_epoch=init_epoch)

    # Training/Testing the network
    for epoch_idx in range(init_epoch + 1, cfg.TRAIN.N_EPOCHS + 1):
        epoch_start_time = time()

        model.train()

        loss_metric = AverageMeter()
        n_batches = len(train_data_loader)
        print('epoch: ', epoch_idx, 'optimizer: ', lr_scheduler.get_lr())
        with tqdm(train_data_loader) as t:
            for batch_idx, (taxonomy_ids, model_ids, data) in enumerate(t):
                partial = jittor.array(data['partial_cloud'])
                gt = jittor.array(data['gtcloud'])
                pcds, deltas = model(partial)

                cd1 = chamfer(pcds[0], gt)
                cd2 = chamfer(pcds[1], gt)
                cd3 = chamfer(pcds[2], gt)
                loss_cd = cd1 + cd2 + cd3

                delta_losses = []
                for delta in deltas:
                    delta_losses.append(jittor.sum(delta**2))

                loss_pmd = jittor.sum(jittor.stack(delta_losses)) / 3

                loss = loss_cd * cfg.TRAIN.LAMBDA_CD + loss_pmd * cfg.TRAIN.LAMBDA_PMD
                optimizer.step(loss)

                loss_item = loss.item()
                loss_metric.update(loss_item)

                jittor.sync_all()

                t.set_description(
                    '[Epoch %d/%d][Batch %d/%d]' %
                    (epoch_idx, cfg.TRAIN.N_EPOCHS, batch_idx + 1, n_batches))
                t.set_postfix(loss='%s' % ['%.4f' % l for l in [loss_item]])

        lr_scheduler.step()
        epoch_end_time = time()
        train_writer.add_scalar('Loss/Epoch/loss', loss_metric.avg(),
                                epoch_idx)
        logging.info(
            '[Epoch %d/%d] EpochTime = %.3f (s) Losses = %s' %
            (epoch_idx, cfg.TRAIN.N_EPOCHS, epoch_end_time - epoch_start_time,
             ['%.4f' % l for l in [loss_metric.avg()]]))

        # Validate the current model
        cd_eval = test_net(cfg, epoch_idx, val_data_loader, val_writer, model)

        # Save checkpoints
        if epoch_idx % cfg.TRAIN.SAVE_FREQ == 0 or cd_eval < best_metrics:
            file_name = 'ckpt-best.pkl' if cd_eval < best_metrics else 'ckpt-epoch-%03d.pkl' % epoch_idx
            output_path = os.path.join(cfg.DIR.CHECKPOINTS, file_name)

            model.save(output_path)

            logging.info('Saved checkpoint to %s ...' % output_path)
            if cd_eval < best_metrics:
                best_metrics = cd_eval

    train_writer.close()
    val_writer.close()
コード例 #13
0
 def __setup_dis_optim(self, learning_rate, beta_1, beta_2, eps):
     # self.dis_optim = torch.optim.Adam(self.dis.parameters(), lr=learning_rate, betas=(beta_1, beta_2), eps=eps)
     self.dis_optim = nn.Adam(self.dis.parameters(),
                              lr=learning_rate,
                              betas=(beta_1, beta_2),
                              eps=eps)
コード例 #14
0
                             dataset.num_classes,
                             cached=True,
                             normalize=not args.use_gdc)

    def execute(self):
        x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
        x = nn.relu(self.conv1(x, edge_index, edge_weight))
        x = nn.dropout(x)
        x = self.conv2(x, edge_index, edge_weight)
        return nn.log_softmax(x, dim=1)


model, data = Net(), data
optimizer = nn.Adam([
    dict(params=model.conv1.parameters(), weight_decay=5e-4),
    dict(params=model.conv2.parameters(), weight_decay=0)
],
                    lr=0.01)  # Only perform weight-decay on first convolution.


def train():
    model.train()
    pred = model()[data.train_mask]
    label = data.y[data.train_mask]
    loss = nn.nll_loss(pred, label)
    optimizer.step(loss)


def test():
    model.eval()
    logits, accs = model(), []
コード例 #15
0
parser.add_argument('-df', '--demo-freq', type=int, default=DEMO_FREQ)
parser.add_argument('-sf', '--save-freq', type=int, default=SAVE_FREQ)
parser.add_argument('-s', '--seed', type=int, default=RANDOM_SEED)
args = parser.parse_args()

np.random.seed(args.seed)

directory_output = os.path.join(args.model_directory, args.experiment_id)
os.makedirs(directory_output, exist_ok=True)
image_output = os.path.join(directory_output, 'pic')
os.makedirs(image_output, exist_ok=True)

# setup model & optimizer
model = models.Model('data/obj/sphere_642.obj', args=args)

optimizer = nn.Adam(model.model_param(), args.learning_rate)

start_iter = START_ITERATION
if args.resume_path:
    state_dicts = jt.load(args.resume_path)
    model.load_state_dict(state_dicts['model'])
    optimizer.load_state_dict(state_dicts['optimizer'])
    start_iter = int(os.path.split(args.resume_path)[1][11:].split('.')[0]) + 1
    print('Resuming from %s iteration' % start_iter)

dataset_train = datasets.ShapeNet(args.dataset_directory,
                                  args.class_ids.split(','), 'train')


def train():
    end = time.time()