def main():
    args = parse_args()
    user_config = importlib.import_module('user_config.' + args.config)
    config = user_config.EvalConfig()
    if not isinstance(config.dataset, list):
        config.dataset = [config.dataset]

    copy_model(args.config, args.epoch)

    saver = Saver(config)
    model, optim, global_step, epoch = saver.load()

    if config.multi_gpu:
        model = torch.nn.DataParallel(model,
                                      device_ids=[0, 1],
                                      output_device=0)

    model.eval()

    results_path = "./results/{}".format(args.config.replace('.', '_'))

    if args.shortseq:
        test_mixamo(model, results_path, config.device, max_steps=120)

    if args.fullseq:
        test_mixamo(model, results_path, config.device, max_steps=None)
def main():
    args = parse_args()
    user_config = importlib.import_module('user_config.' + args.config)

    configG = user_config.GenConfig()
    configD = user_config.DiscConfig()
    if not isinstance(configG.dataset, list):
        configG.dataset = [configG.dataset]

    torch.set_default_dtype(configG.dtype)

    print("set config G: %s" % configG)
    print("set config D: %s" % configD)

    saverG = Saver(configG)
    modelG, optimG, global_step, last_epoch = saverG.load()

    saverD = Saver(configD)
    modelD, optimD, _, _ = saverD.load()

    models = EasyDict({'G': modelG, 'D': modelD})

    optims = EasyDict({'G': optimG, 'D': optimD})

    save_modelG = modelG
    save_modelD = modelD

    if configG.multi_gpu:
        models.G = torch.nn.DataParallel(modelG)
        models.D = torch.nn.DataParallel(modelD)

    schedulerG, schedulerD = None, None
    if hasattr(configG, 'scheduler'):
        configG.scheduler_param['last_epoch'] = -1
        schedulerG = configG.scheduler(optims.G, **configG.scheduler_param)
        schedulerD = configG.scheduler(optims.D, **configG.scheduler_param)

    log_dir = os.path.join(configG.ckpt_path, 'tb', configG.start_time)
    writer = SummaryWriter(log_dir=log_dir)
    trainer = configG.trainer(configG, models, optims, writer)

    for epoch in range(last_epoch + 1, configG.epoch):
        _, global_step, avg_loss = trainer.step(epoch, global_step)
        print('Training epoch %d was done. (avg_loss: %f)' % (epoch, avg_loss))

        print('Saving the trained generator model... (%d epoch, %d step)' %
              (epoch, global_step))
        saverG.save(save_modelG, optimG, global_step, epoch)
        print('Saving G is finished.')

        print('Saving the trained discriminator model... (%d epoch, %d step)' %
              (epoch, global_step))
        saverD.save(save_modelD, optimD, global_step, epoch)
        print('Saving D is finished.')

        if schedulerG:
            schedulerG.step(epoch)
            schedulerD.step(epoch)
    async def core_session(self, app):
        app['ips'] = await _get_self_ips()
        app['config'] = self._config
        app['redis'] = await aioredis.create_redis_pool(app['config'].redis_addr,
                                                        password=app['config'].redis_password,
                                                        encoding='utf8')
        checker = Checker(global_blacklist=app['config'].global_blacklist)
        saver = Saver(app['redis'])
        proxy_manager = ProxyManager(
            config=app['config'],
            redis=app['redis']
        )
        pattern_manager = PatternManager(checker, saver, app['redis'])

        await pattern_manager.__aenter__()
        await proxy_manager.__aenter__()
        app['pom'] = proxy_manager
        app['pam'] = pattern_manager
        app['ck'] = checker
        app['sv'] = saver
        app['client_session'] = init_session()
        yield
        await app['pam'].__aexit__(None, None, None)
        await app['pom'].__aexit__(None, None, None)
        await app['client_session'].close
        await app['redis'].close
Exemple #4
0
 async def core_session(self, app):
     checker = Checker(global_blacklist=self._config.global_blacklist)
     saver = Saver(redis_addr=self._config.redis_addr)
     proxy_manager = ProxyManager(redis_addr=self._config.redis_addr)
     pattern_manager = PatternManager(checker,
                                      saver,
                                      redis_addr=self._config.redis_addr)
     await saver.__aenter__()
     await proxy_manager.__aenter__()
     await pattern_manager.__aenter__()
     await proxy_manager.add_proxies_for_pattern('public_proxies')
     app['pom'] = proxy_manager
     app['pam'] = pattern_manager
     app['ck'] = checker
     app['sv'] = saver
     yield
     await app['pam'].__aexit__(None, None, None)
     await app['pom'].__aexit__(None, None, None)
     await app['sv'].__aexit__(None, None, None)
Exemple #5
0
def test(config, logger):
    eval_config = config['eval_config']
    model_config = config['model_config']
    data_config = config['eval_data_config']

    np.random.seed(eval_config['rng_seed'])

    logger.info('Using config:')
    pprint.pprint({
        'model_config': model_config,
        'data_config': data_config,
        'eval_config': eval_config
    })

    eval_out = eval_config['eval_out']
    if not os.path.exists(eval_out):
        logger.info('creat eval out directory {}'.format(eval_out))
        os.makedirs(eval_out)
    else:
        logger.warning('dir {} exist already!'.format(eval_out))

    # restore from random or checkpoint
    restore = True
    # two methods to load model
    # 1. load from any other dirs,it just needs config and model path
    # 2. load from training dir
    if args.model is not None:
        # assert args.model is not None, 'please determine model or checkpoint'
        # it should be a path to model
        checkpoint_name = os.path.basename(args.model)
        input_dir = os.path.dirname(args.model)
    elif args.checkpoint is not None:
        checkpoint_name = 'detector_{}.pth'.format(args.checkpoint)
        assert args.load_dir is not None, 'please choose a directory to load checkpoint'
        eval_config['load_dir'] = args.load_dir
        input_dir = os.path.join(eval_config['load_dir'], model_config['type'],
                                 data_config['dataset_config']['type'])
        if not os.path.exists(input_dir):
            raise Exception(
                'There is no input directory for loading network from {}'.
                format(input_dir))
    else:
        restore = False

    # log for restore
    if restore:
        logger.info("restore from checkpoint")
    else:
        logger.info("use pytorch default initialization")

    # model
    model = detectors.build(model_config)
    model.eval()

    if restore:
        # saver
        saver = Saver(input_dir)
        saver.load({'model': model}, checkpoint_name)

    if args.cuda:
        model = model.cuda()

    dataloader = dataloaders.make_data_loader(data_config, training=False)

    tester = Tester(eval_config)

    tester.test(dataloader, model, logger)
Exemple #6
0
    assert args.net is not None, 'please select a base model'
    model_config['net'] = args.net

    output_dir = train_config['save_dir'] + "/" + model_config[
        'net'] + "/" + data_config['name']
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    else:
        print('output_dir is already exist')
    print('checkpoint will be saved to {}'.format(output_dir))

    # model
    fasterRCNN = model_builder.build(model_config)

    # saver
    saver = Saver(output_dir)

    if args.mGPUs:
        fasterRCNN = nn.DataParallel(fasterRCNN, train_config['device_ids'])

    if args.cuda:
        fasterRCNN.cuda()

    data_loader_builder = KITTIBEVDataLoaderBuilder(data_config, training=True)
    data_loader = data_loader_builder.build()

    # optimizer
    optimizer_builder = OptimizerBuilder(fasterRCNN,
                                         train_config['optimizer_config'])
    optimizer = optimizer_builder.build()
Exemple #7
0
def train(config, logger):
    data_config = config['data_config']
    model_config = config['model_config']
    train_config = config['train_config']

    # build model
    model = detectors.build(model_config)
    model.train()

    # move to gpus before building optimizer
    if train_config['mGPUs']:
        model = common.MyParallel(model)

    if train_config['cuda']:
        model = model.cuda()

    # build optimizer and scheduler
    optimizer = optimizers.build(train_config['optimizer_config'], model)

    # force to change lr before scheduler
    if train_config['lr']:
        common.change_lr(optimizer, train_config['lr'])

    scheduler = schedulers.build(train_config['scheduler_config'], optimizer)

    # some components for logging and saving(saver and summaryer)
    output_dir = os.path.join(train_config['output_path'],
                              model_config['type'],
                              data_config['dataset_config']['type'])
    saver = Saver(output_dir)

    # resume
    if train_config['resume']:
        checkpoint_path = 'detector_{}.pth'.format(train_config['checkpoint'])
        logger.info(
            'resume from checkpoint detector_{}'.format(checkpoint_path))
        params_dict = {
            'model': model,
            'optimizer': optimizer,
            'scheduler': scheduler,
            'start_iters': None
        }

        saver.load(params_dict, checkpoint_path)
        # train_config['num_iters'] = params_dict['num_iters']
        train_config['start_iters'] = params_dict['start_iters']
    else:
        train_config['start_iters'] = 1

    # build dataloader after resume(may be or not)
    # dataloader = dataloaders.build(data_config)
    dataloader = dataloaders.make_data_loader(data_config)

    # use model to initialize
    if train_config['model']:
        model_path = train_config['model']
        assert os.path.isabs(model_path)
        logger.info('initialize model from {}'.format(model_path))
        params_dict = {'model': model}
        saver.load(params_dict, model_path)

    summary_path = os.path.join(output_dir, './summary')
    logger.info('setup summary_dir: {}'.format(summary_path))
    summary_writer = SummaryWriter(summary_path)
    os.chmod(summary_path, 0o777)

    logger.info('setup trainer')
    trainer = Trainer(train_config, logger)
    trainer.train(dataloader, model, optimizer, scheduler, saver,
                  summary_writer)
Exemple #8
0
def main():
    args = parse_args()

    user_config = importlib.import_module('user_config.' + args.config.replace('/', '.'))
    gen_config = user_config.GenConfig()
    disc_config = user_config.DiscConfig()

    L = init_logger(gen_config.tag, gen_config.ckpt_path)
    L.info("set config G: %s" % gen_config)
    L.info("set config D: %s" % disc_config)

    gen_saver = Saver(gen_config)
    generator, gen_optim, global_step, last_epoch = gen_saver.load()

    disc_saver = Saver(disc_config)
    discriminator, disc_optim, _, _ = disc_saver.load()

    models = {
        'gen': generator,
        'disc': discriminator
    }

    optims = {
        'gen': gen_optim,
        'disc': disc_optim
    }

    gen_to_save = generator
    disc_to_save = discriminator

    if gen_config.multi_gpu:
        models['gen'] = torch.nn.DataParallel(generator)
        models['disc'] = torch.nn.DataParallel(discriminator)

    gen_scheduler, disc_scheduler = None, None
    if hasattr(gen_config, 'scheduler'):
        gen_config.scheduler_param['last_epoch'] = -1
        gen_scheduler = gen_config.scheduler(gen_optim, **gen_config.scheduler_param)
        disc_scheduler = gen_config.scheduler(disc_optim, **gen_config.scheduler_param)

    log_dir = os.path.join(gen_config.ckpt_path, 'tb')
    writer = SummaryWriter(log_dir=log_dir)
    trainer = gen_config.trainer(gen_config, models, optims, writer)
    # validator = gen_config.validator(gen_config, generator, writer)

    for epoch in range(last_epoch + 1, gen_config.epoch):
        _, global_step, avg_loss = trainer.step(epoch, global_step)
        L.info('Training epoch %d was done. (avg_loss: %f)' % (epoch, avg_loss))

        # result, avg_acc = validator.step(epoch, global_step)
        # L.info('Validation epoch %d was done. (avg_acc: %f)' % (epoch, avg_acc))

        L.info('Saving the trained generator model... (%d epoch, %d step)' % (epoch, global_step))
        gen_saver.save(gen_to_save, gen_optim, global_step, epoch,
                       performance=0,
                       perf_op='lt')
        L.info('Saving G is finished.')

        L.info('Saving the trained discriminator model... (%d epoch, %d step)' % (epoch, global_step))
        disc_saver.save(disc_to_save, disc_optim, global_step, epoch,
                        performance=0,
                        perf_op='lt')
        L.info('Saving D is finished.')

        if gen_scheduler:
            gen_scheduler.step(epoch)
            disc_scheduler.step(epoch)
Exemple #9
0
    eval_out = eval_config['eval_out']
    if not os.path.exists(eval_out):
        os.makedirs(eval_out)
    else:
        print('dir {} exist already!'.format(eval_out))

    checkpoint_name = 'faster_rcnn_{}_{}.pth'.format(args.checkepoch,
                                                     args.checkpoint)

    # model
    # fasterRCNN = resnet(model_config)
    # fasterRCNN.eval()
    # fasterRCNN.create_architecture()
    fasterRCNN = model_builder.build(model_config, training=False)

    # saver
    saver = Saver(input_dir)
    saver.load({'model': fasterRCNN}, checkpoint_name)

    if args.cuda:
        fasterRCNN.cuda()

    start = time.time()

    vis = args.vis
    data_loader = dataloader_builder.build(data_config, training=False)

    #  data_loader = data_loader_builder.build()

    tester.test(eval_config, data_loader, fasterRCNN)
Exemple #10
0
    def inference(self, im, p2):
        """
        Args:
            im: shape(N, 3, H, W)

        Returns:
            dets: shape(N, M, 8)
        """
        config = self.config
        args = self.args
        eval_config = config['eval_config']
        model_config = config['model_config']
        data_config = config['eval_data_config']

        np.random.seed(eval_config['rng_seed'])

        self.logger.info('Using config:')
        pprint.pprint({
            'model_config': model_config,
            'data_config': data_config,
            'eval_config': eval_config
        })

        eval_out = eval_config['eval_out']
        if not os.path.exists(eval_out):
            self.logger.info('creat eval out directory {}'.format(eval_out))
            os.makedirs(eval_out)
        else:
            self.logger.warning('dir {} exist already!'.format(eval_out))

        # restore from random or checkpoint
        restore = True
        # two methods to load model
        # 1. load from any other dirs,it just needs config and model path
        # 2. load from training dir
        if args.model is not None:
            # assert args.model is not None, 'please determine model or checkpoint'
            # it should be a path to model
            checkpoint_name = os.path.basename(args.model)
            input_dir = os.path.dirname(args.model)
        elif args.checkpoint is not None:
            checkpoint_name = 'detector_{}.pth'.format(args.checkpoint)
            assert args.load_dir is not None, 'please choose a directory to load checkpoint'
            eval_config['load_dir'] = args.load_dir
            input_dir = os.path.join(eval_config['load_dir'],
                                     model_config['type'], data_config['name'])
            if not os.path.exists(input_dir):
                raise Exception(
                    'There is no input directory for loading network from {}'.
                    format(input_dir))
        else:
            restore = False

        # log for restore
        if restore:
            self.logger.info("restore from checkpoint")
        else:
            self.logger.info("use pytorch default initialization")

        # model
        model = detectors.build(model_config)
        model.eval()

        if restore:
            # saver
            saver = Saver(input_dir)
            saver.load({'model': model}, checkpoint_name)

        model = model.cuda()

        #  dataloader = dataloaders.make_data_loader(data_config, training=False)

        self.logger.info('Start testing')
        #  num_samples = len(dataloader)

        #  for step, data in enumerate(dataloader):
        data = self.preprocess(im, p2)
        data = self.to_batch(data)
        data = common.to_cuda(data)
        #  image_path = data[constants.KEY_IMAGE_PATH]

        with torch.no_grad():
            prediction = model(data)

        # initialize dets for each classes
        dets = [[]]

        scores = prediction[constants.KEY_CLASSES]
        boxes_2d = prediction[constants.KEY_BOXES_2D]
        dims = prediction[constants.KEY_DIMS]
        orients = prediction[constants.KEY_ORIENTS_V2]
        p2 = data[constants.KEY_STEREO_CALIB_P2_ORIG]

        # rcnn_3d = prediction['rcnn_3d']
        batch_size = scores.shape[0]
        scores = scores.view(-1, self.n_classes)
        new_scores = torch.zeros_like(scores)
        _, scores_argmax = scores.max(dim=-1)
        row = torch.arange(0, scores_argmax.numel()).type_as(scores_argmax)
        new_scores[row, scores_argmax] = scores[row, scores_argmax]
        scores = new_scores.view(batch_size, -1, self.n_classes)

        boxes_2d_per_img = boxes_2d[0]
        scores_per_img = scores[0]
        dims_per_img = dims[0]
        orients_per_img = orients[0]
        p2_per_img = p2[0]
        # rcnn_3d_per_img = rcnn_3d[batch_ind]
        # import ipdb
        # ipdb.set_trace()
        for class_ind in range(1, self.n_classes):
            # cls thresh
            inds = torch.nonzero(
                scores_per_img[:, class_ind] > self.thresh).view(-1)
            threshed_scores_per_img = scores_per_img[inds, class_ind]
            if inds.numel() > 0:
                threshed_boxes_2d_per_img = boxes_2d_per_img[inds]
                threshed_dims_per_img = dims_per_img[inds]
                threshed_orients_per_img = orients_per_img[inds]
                threshed_dets_per_img = torch.cat([
                    threshed_boxes_2d_per_img,
                    threshed_scores_per_img.unsqueeze(-1),
                    threshed_dims_per_img,
                    threshed_orients_per_img.unsqueeze(-1)
                ],
                                                  dim=-1)

                # sort by scores
                _, order = torch.sort(threshed_scores_per_img, 0, True)
                threshed_dets_per_img = threshed_dets_per_img[order]

                # nms
                keep = nms(threshed_dets_per_img[:, :4],
                           threshed_dets_per_img[:, 4],
                           self.nms).view(-1).long()
                nms_dets_per_img = threshed_dets_per_img[keep].detach().cpu(
                ).numpy()

                # calculate location
                location = geometry_utils.calc_location(
                    nms_dets_per_img[:, 5:8], nms_dets_per_img[:, :5],
                    nms_dets_per_img[:, 8], p2_per_img.cpu().numpy())

                nms_dets_per_img = np.concatenate(
                    [
                        nms_dets_per_img[:, :5], nms_dets_per_img[:, 5:8],
                        location, nms_dets_per_img[:, -1:]
                    ],
                    axis=-1)

                dets.append(nms_dets_per_img)
            else:
                dets.append([])

            #  duration_time = time.time() - end_time
            #  label_path = self._generate_label_path(image_path[batch_ind])
            #  self.save_mono_3d_dets(dets, label_path)
            #  sys.stdout.write('\r{}/{},duration: {}'.format(
            #  step + 1, num_samples, duration_time))
            #  sys.stdout.flush()

            #  end_time = time.time()

            #  xmin, ymin, xmax, ymax, cf, h, w, l, x, y, z, ry
        return dets