예제 #1
0
 def __init__(self, config, net=None):
     self.log_dir = config.log_dir
     self.model_dir = config.model_dir
     self.net = net
     self.best_val_loss = np.inf
     self.tb_writer = SummaryWriter(log_dir=self.log_dir)
     self.clock = TrainClock()
예제 #2
0
    def __init__(self, train_spec, net=None):
        # setproctitle(config.exp_name)

        self.log_dir = train_spec.log_dir
        ensure_dir(self.log_dir)
        # logconf.set_output_file(os.path.join(self.log_dir, 'log.txt'))
        self.model_dir = train_spec.log_model_dir
        ensure_dir(self.model_dir)

        self.net = net
        self.clock = TrainClock()
예제 #3
0
    def __init__(self, config):
        self.model_dir = config.model_dir
        self.clock = TrainClock()
        self.batch_size = config.batch_size

        # build network
        self.net = self.build_net(config)

        # set loss function
        self.set_loss_function()

        # set optimizer
        self.set_optimizer(config)
    def __init__(self, config):
        self.log_dir = config.log_dir
        self.model_dir = config.model_dir
        self.clock = TrainClock()
        self.device = config.device
        self.batch_size = config.batch_size

        # build network
        self.net = self.build_net()

        # set loss function
        self.set_loss_function()

        # set optimizer
        self.set_optimizer(config)

        # set tensorboard writer
        self.train_tb = SummaryWriter(os.path.join(self.log_dir, 'train.events'))
        self.val_tb = SummaryWriter(os.path.join(self.log_dir, 'val.events'))
예제 #5
0
    def __init__(self, config):
        self.log_dir = config.log_dir
        self.model_dir = config.model_dir
        self.clock = TrainClock()
        self.batch_size = config.batch_size

        # build network
        self.net = self.build_net(config)
        # print('-----network architecture-----')
        # print(self.net)

        # set loss function
        self.set_loss_function()

        # set optimizer and scheduler
        self.set_optimizer(config)
        self.set_scheduler(config)

        # set tensorboard writer
        self.train_tb = SummaryWriter(os.path.join(self.log_dir, 'train.events'))
        self.val_tb = SummaryWriter(os.path.join(self.log_dir, 'val.events'))
예제 #6
0
파일: eval.py 프로젝트: a1600012888/GradTV
    parser = argparse.ArgumentParser()
    parser.add_argument('--val', action='store_true', help='weither to run validation')
    parser.add_argument('--bench', action='store_true',
                        help = 'weither to generate results on benchmark dataset')
    parser.add_argument('--smooth', action='store_true',
                        help = 'weither to generator smootGrad results on benchmark dataset')
    parser.add_argument('--gen', action = 'store_true',
                        help = 'weither to generate Large eps adversarial examples on benchmark data')

    parser.add_argument('--resume', type=str, default=None,
                        help='checkpoint path')

    args = parser.parse_args()

    clock = TrainClock()
    clock.epoch = 21
    net = ant_model()
    net.cuda()
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            check_point = torch.load(args.resume)
            net.load_state_dict(check_point['state_dict'])

            print('Modeled loaded from {} with metrics:'.format(args.resume))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

        base_path = os.path.split(args.resume)[0]
    else:
예제 #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-c',
                        '--continue',
                        dest='continue_path',
                        type=str,
                        required=False)
    parser.add_argument('-g', '--gpu_ids', type=int, default=0, required=False)
    args = parser.parse_args()

    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_ids)
    config.device = torch.device(
        "cuda:0" if torch.cuda.is_available() else "cpu")

    config.isTrain = True

    if not os.path.exists('train_log'):
        os.symlink(config.exp_dir, 'train_log')

    # get dataset
    train_loader = get_dataloader("train", batch_size=config.batch_size)
    val_loader = get_dataloader("test", batch_size=config.batch_size)
    val_cycle = cycle(val_loader)
    dataset_size = len(train_loader)
    print('The number of training motions = %d' %
          (dataset_size * config.batch_size))

    # create tensorboard writer
    train_tb = SummaryWriter(os.path.join(config.log_dir, 'train.events'))
    val_tb = SummaryWriter(os.path.join(config.log_dir, 'val.events'))

    # get model
    net = CycleGANModel(config)
    net.print_networks(True)

    # start training
    clock = TrainClock()
    net.train()

    for e in range(config.nr_epochs):
        # begin iteration
        pbar = tqdm(train_loader)
        for b, data in enumerate(pbar):
            net.train()
            net.set_input(
                data)  # unpack data from dataset and apply preprocessing
            net.optimize_parameters(
            )  # calculate loss functions, get gradients, update network weights

            # get loss
            losses_values = net.get_current_losses()

            # update tensorboard
            train_tb.add_scalars('train_loss',
                                 losses_values,
                                 global_step=clock.step)

            # visualize
            if clock.step % config.visualize_frequency == 0:
                motion_dict = net.infer()
                for k, v in motion_dict.items():
                    phase = 'h' if k[-1] == 'A' else 'nh'
                    motion3d = train_loader.dataset.preprocess_inv(
                        v.detach().cpu().numpy()[0], phase)
                    img = plot_motion(motion3d, phase)
                    train_tb.add_image(k, img, global_step=clock.step)

            pbar.set_description("EPOCH[{}][{}/{}]".format(
                e, b, len(train_loader)))
            pbar.set_postfix(OrderedDict(losses_values))

            # validation
            if clock.step % config.val_frequency == 0:
                net.eval()
                data = next(val_cycle)
                net.set_input(data)
                net.forward()

                losses_values = net.get_current_losses()
                val_tb.add_scalars('val_loss',
                                   losses_values,
                                   global_step=clock.step)

                # visualize
                if clock.step % config.visualize_frequency == 0:
                    motion_dict = net.infer()
                    for k, v in motion_dict.items():
                        phase = 'h' if k[-1] == 'A' else 'nh'
                        motion3d = val_loader.dataset.preprocess_inv(
                            v.detach().cpu().numpy()[0], phase)
                        img = plot_motion(motion3d, phase)
                        val_tb.add_image(k, img, global_step=clock.step)

            clock.tick()

        # leraning_rate to tensorboarrd
        lr = net.optimizers[0].param_groups[0]['lr']
        train_tb.add_scalar("learning_rate", lr, global_step=clock.step)

        if clock.epoch % config.save_frequency == 0:
            net.save_networks(epoch=e)

        clock.tock()
        net.update_learning_rate(
        )  # update learning rates at the end of every epoch.