def get_am_version_info():
    am_cfg = AMConfig()

    logger.info("Get software version of the OpenAM instance")
    headers = {
        'X-OpenAM-Username': '******',
        'X-OpenAM-Password': os.environ['AM_ADMIN_PWD'],
        'Content-Type': 'application/json',
        'Accept-API-Version': 'resource=2.0, protocol=1.0'
    }

    response = post(verify=am_cfg.ssl_verify,
                    url=am_cfg.rest_authn_url,
                    headers=headers)
    rest.check_http_status(http_result=response, expected_status=200)
    admin_token = response.json()['tokenId']

    logger.info('Get AM version')
    headers = {
        'Content-Type': 'application/json',
        'Accept-API-Version': 'resource=1.0',
        'iplanetdirectorypro': admin_token
    }
    response = get(verify=am_cfg.ssl_verify,
                   url=am_cfg.am_url + '/json/serverinfo/version',
                   headers=headers)
    rest.check_http_status(http_result=response, expected_status=200)
    version_info = "{} (build: {}, revision: {})".format(
        response.json()['version'],
        response.json()['date'],
        response.json()['revision'])
    return version_info
示例#2
0
def handle_unresolved_url(data, action):
    url = unquote(data)
    logger.info(u'Trying to resolve URL (%s): %s' % (action, url))
    if xbmc.Player().isPlaying():
        utils.show_info_notification(utils.translation(32007), 1000)
    else:
        utils.show_info_notification(utils.translation(32007))
    if 'youtube.com' in url or 'youtu.be' in url:
        youtube_addon = xbmcaddon.Addon(id="plugin.video.youtube")
        if youtube_addon:
            if utils.get_setting('preferYoutubeAddon') == 'true' or youtube_addon.getSetting("kodion.video.quality.mpd") == "true":
                logger.info(u'Youtube addon have DASH enabled or is configured as preferred use it')
                clean_url = list(urlparse(url))
                clean_url[4] = '&'.join(
                    [x for x in clean_url[4].split('&') if not re.match(r'app=', x)])
                url = urlunparse(clean_url)
                utils.play_url('plugin://plugin.video.youtube/uri2addon/?uri=%s' % url, action)
                return
    logger.info(u'Trying to resolve with YoutubeDL')
    result = resolve_with_youtube_dl(url, {'format': 'best', 'no_color': 'true', 'ignoreerrors': 'true'}, action)
    if result:
        return
    # Second pass with new params to fix site like reddit dash streams
    logger.info(u'Trying to resolve with YoutubeDL other options')
    result = resolve_with_youtube_dl(url, {'format': 'bestvideo+bestaudio/best', 'no_color': 'true', 'ignoreerrors': 'true'}, action)
    if result:
        return
    logger.error(u'Url not resolved by YoutubeDL')

    logger.info(u'Trying to play as basic url')
    utils.play_url(url, action)
    if url:
        utils.show_error_notification(utils.translation(32006))
示例#3
0
def handler(event, context):
    try:
        logger.info(event)
        authlete = AuthleteSdk(api_key=os.environ['AUTHLETE_API_KEY'],
                               api_secret=os.environ['AUTHLETE_API_SECRET'])

        congito_user_pool = CognitoUserPool(
            user_pool_id=os.environ['COGNITO_USER_POOL_ID'])

        access_token = get_access_token_from_header(headers=event['headers'])
        response_content = authlete.get_user_info(access_token=access_token)
        sub = congito_user_pool.get_user_sub_value(
            username=response_content['sub'])
    except ValidationError as e:
        return response_builder(e.status_code, {'error_message': e.message})
    except AuthleteApiError as e:
        if e.status_code != 401:
            logger.error(e)
            return response_builder(500,
                                    {'error_message': 'Internal Server Error'})

        return response_builder(e.status_code, {'error_message': e.message})
    except ClientError as e:
        logger.error(e)
        return response_builder(500,
                                {'error_message': 'Internal Server Error'})
    return response_builder(200, {'sub': sub, 'name': response_content['sub']})
示例#4
0
    def start_ds_port_forward(self, instance_name='userstore', instance_nb=0):
        if not is_cluster_mode():
            ds_pod_name = '%s-%s' % (instance_name, instance_nb)
            ds_local_port = eval('self.%s%s_local_port' %
                                 (instance_name, instance_nb))
            command = self.helm_cmd + ' --namespace %s port-forward pod/%s %s:8080' % \
                  (tests_namespace(), ds_pod_name, ds_local_port)
            ds_popen = cmd.run_cmd_process(command)

            duration = 60
            start_time = time.time()
            while time.time() - start_time < duration:
                soc = socket.socket()
                result = soc.connect_ex(("", ds_local_port))
                soc.close()
                if result != 0:
                    logger.warning(
                        'Port-forward for pod %s on port %s not ready, waiting 5s...'
                        % (ds_pod_name, ds_local_port))
                    time.sleep(5)
                else:
                    logger.info('Port-forward for pod %s on port %s is ready' %
                                (ds_pod_name, ds_local_port))
                    return ds_popen

            raise Exception(
                'Port-forward for pod %s on port %s not ready after %ss' %
                (ds_pod_name, ds_local_port, duration))
示例#5
0
    def sync(self):
        from lib.sync import UpdateFund
        from lib.sync import UpdateIndice
        from lib.sync import UpdatePePb

        #import pdb;pdb.set_trace()

        if not (self.ui.checkBox_fund.isChecked()
                or self.ui.checkBox_indice.isChecked()
                or self.ui.checkBox_pepb.isChecked()):
            logger.info('请选择要更新的内容')

        else:
            if self.ui.checkBox_fund.isChecked():
                updateFund = UpdateFund()
                updateFund.progConn.signal.connect(self.onCountChanged)
                self.pool.start(updateFund)

            if self.ui.checkBox_indice.isChecked():
                updateIndice = UpdateIndice()
                updateIndice.progConn.signal.connect(self.onCountChanged)
                self.pool.start(updateIndice)

            if self.ui.checkBox_pepb.isChecked():
                updatePePb = UpdatePePb()
                updatePePb.progConn.signal.connect(self.onCountChanged)
                self.pool.start(updatePePb)

            self.ui.pushButton_run.setEnabled(False)
def handler(event, context):
    try:
        logger.info(event)
        if verify_supported_media_type(event['headers']) is False:
            return response_builder(
                415, {
                    'error_message':
                    "This API only support 'content-type: application/x-www-form-urlencoded' media type"
                })

        token = parse_qs(event['body']).get('token', None)
        if token is None:
            return response_builder(
                400, {'error_message': 'Missing token patameter'})
        authlete = AuthleteSdk(api_key=os.environ['AUTHLETE_API_KEY'],
                               api_secret=os.environ['AUTHLETE_API_SECRET'])

        result = authlete.verify_access_token(token=token[0])
    except AuthleteApiError as e:
        if e.status_code != 400:
            logger.error(e)
            return response_builder(500,
                                    {'error_message': 'Internal Server Error'})

        return response_builder(e.status_code, {'error_message': e.message})
    return response_builder(200, result)
示例#7
0
    def __call__(self, param):
        """Callback to Show speed."""
        count = param.nbatch
        if self.last_count > count:
            self.init = False
        self.last_count = count

        if self.init:
            if count % self.frequent == 0:
                speed = self.frequent * self.batch_size / (time.time() -
                                                           self.tic)
                s = ""
                if param.eval_metric is not None:
                    name, value = param.eval_metric.get()
                    s = "Epoch[%d] Batch [%d]\tSpeed: %.2f samples/sec\tTrain-" % (
                        param.epoch, count, speed)
                    for n, v in zip(name, value):
                        s += "%s=%f,\t" % (n, v)
                else:
                    s = "Iter[%d] Batch [%d]\tSpeed: %.2f samples/sec" % (
                        param.epoch, count, speed)

                logger.info(s)
                self.tic = time.time()
        else:
            self.init = True
            self.tic = time.time()
示例#8
0
def handleTenant_Delete(*args,**kwargs):
    event = kwargs["sender"]
    accountId = event.accountId
    session = kwargs["session"]
    rdclient = ServiceContext().getRdClient()
    rdclient.api_version = 1
    try:
        account = session.query(Tenant).filter(Tenant.id == accountId).one()
        services = account.services
        for svc in services:
            nodes = svc.nodes
            for node in nodes:
                jobs = node.jobs
                for job in jobs:
                    try:
                        rdclient.delete_job(job.jobid)
                    except:
                        logger.warning("delete job(%s) error, just ignore any way" % job.jobid)
                    session.delete(job)
                session.delete(node)
            session.delete(svc)
        session.delete(account)
        session.commit()
        logger.info("delete account<%s>." % accountId)
    except NoResultFound:
        logger.warning("account(<%s>) has been deleted, just ignore" % accountId    )
    finally:
        del rdclient.api_version
示例#9
0
 def run(self):
     try:
         logger.info("Starting '%s' suite" % self.name)
         self._run()
     finally:
         logger.info("Cleaning up '%s' suite." % self.name)
         self.cleanup()
示例#10
0
def check_bop_results(path, version='bop19'):
    """Checks if the format of BOP results is correct.

  :param result_filenames: Path to a file with pose estimates.
  :param version: Version of the results.
  :return: True if the format is correct, False if it is not correct.
  """
    check_passed = True
    check_msg = 'OK'
    try:
        results = load_bop_results(path, version)

        if version == 'bop19':
            # Check if the time for all estimates from the same image are the same.
            times = {}
            for result in results:
                result_key = '{:06d}_{:06d}'.format(result['scene_id'],
                                                    result['im_id'])
                if result_key in times:
                    if abs(times[result_key] - result['time']) > 0.001:
                        check_passed = False
                        check_msg = \
                          'The running time for scene {} and image {} is not the same for' \
                          ' all estimates.'.format(result['scene_id'], result['im_id'])
                        logger.info(check_msg)
                        break
                else:
                    times[result_key] = result['time']

    except Exception as e:
        check_passed = False
        check_msg = 'Error when loading BOP results: {}'.format(e)
        logger.info(check_msg)

    return check_passed, check_msg
示例#11
0
def handlePackage_Activate(*args,**kwargs):
    event = kwargs["sender"]
    session = kwargs["session"]
    accountId = event.accountId
    try:
        account = session.query(Tenant).filter(Tenant.id == accountId).one()
        logger.error("<%s> account is in db , need to clean.")
    except NoResultFound:
        logger.info("<%s> is new accountid , create tenant id in db" % accountId)
        tenant = Tenant()
        tenant.id = accountId
        tenant.name = "TBD"
        tenant.state = Tenant_Sate.INIT
        session.add(tenant)
        packageName = event.packageName
        manager = ModelManager()
        svcs = manager.listsvcbypath(packageName)
        if svcs is None:
          logger.error("package(%s) has no corresponding service definition..." % pkg_path)
          return
        else:
          for model_svc in svcs:
              servcie = createRuntimeService(model_svc,tenant.id)
              session.add(servcie)
          tenant.getSM().trigger("package_activate",tenant = tenant,payload = event)
示例#12
0
def check_http_status(http_result, expected_status, known_issue=None):
    """
    Check HTTP status code
    :param http_result : request.models.Response - request response
    :param expected_status : int or list(int) - status codes to detect that application is deployed
    :param known_issue : Jira issue code (ex : OPENAM-567) - used to add a tag
    """

    if isinstance(expected_status, list):
        is_success = (http_result.status_code
                      in [int(x) for x in expected_status])
    else:
        try:
            is_success = (http_result.status_code == int(expected_status))
        except ValueError:
            is_success = False

    if not is_success:
        # if known_issue is not None:
        #     set_known_issue(known_issue)
        pytest.fail(
            'ERROR:\n-- http status --\nreturned %s, expected %s\n-- content --\n%s'
            % (http_result.status_code, expected_status, http_result.text))
    else:
        success = 'SUCCESS:\n-- http status --\nreturned %s, expected %s' % (
            http_result.status_code, expected_status)
        logger.info(success)

        content = '\n-- content --\n%s' % http_result.text
        logger.debug(content)
def handler(event, context):
    try:
        logger.info(event)
        data = json.loads(event['body'])
        user_id = event['pathParameters']['user_id']

        if 'email' not in data:
            return response_builder(400,
                                    {'error_message': 'emailのパラメータが足りません'})

        if users.exists_user(user_id=user_id):
            return response_builder(409, {'error_message': '既にそのユーザは存在しています'})

        users.create(
            user_id=user_id,
            email=data['email'],
        )
    except json.decoder.JSONDecodeError:
        return response_builder(400, {'error_message': 'JSONが不正です'})

    except ClientError as e:
        logger.error(e)
        return response_builder(500,
                                {'error_message': 'Internal Server Error'})

    return response_builder(201)
示例#14
0
def handleCreate_VM(*args,**kwargs):
    event = kwargs["sender"]
    session = kwargs["session"]
    try:
        account = session.query(Tenant).filter(Tenant.id == event.accountId).one()
    except NoResultFound,e:
        logger.info("miss package activate message, no handle it")
        return
def handler(event, context):
    try:
        logger.info(event)
        result = users.get_all()

    except ClientError as e:
        logger.error(e)
        return response_builder(500,
                                {'error_message': 'Internal Server Error'})

    return response_builder(200, result['Items'])
示例#16
0
def handle_magnet(data):
    open_with = utils.get_setting('openMagnetWith')
    logger.info('Sharing magnet with %s' % open_with)
    if open_with == 'Elementum':
        utils.call_plugin('plugin://plugin.video.elementum/playuri?uri=' + data)
    elif open_with == 'Torrenter V2':
        utils.call_plugin('plugin://plugin.video.torrenter/?action=playSTRM&url=' + data)
    elif open_with == 'Quasar':
        utils.call_plugin('plugin://plugin.video.quasar/playuri?uri=' + data)
    elif open_with == 'YATP':
        utils.call_plugin('plugin://plugin.video.yatp/?action=play&torrent=' + data + '&file_index=dialog')
def handler(event, context):
    try:
        logger.info(event)
        user_id = event['pathParameters']['user_id']
        users.delete(user_id=user_id)

    except ClientError as e:
        logger.error(e)
        return response_builder(500,
                                {'error_message': 'Internal Server Error'})

    return response_builder(204)
示例#18
0
    def print_table(table_data):
        """
        Print a table of version metadata.
        :param table_data: Dictionary of data to be printed
        """

        table = PrettyTable([table_data['TITLE'], table_data['DESCRIPTION']])
        for key, value in table_data.items():
            if key in ['TITLE', 'DESCRIPTION']:
                continue
            table.add_row([key, value])
        logger.info(table)
示例#19
0
def par_generate_gt(config, pair_rec, flow_depth_rendered=None):
    from lib.pair_matching.flow import calc_flow

    target_size, max_size = config.SCALES[0][0], config.SCALES[0][1]

    if flow_depth_rendered is None:
        flow_depth_rendered = cv2.imread(pair_rec["depth_rendered"],
                                         cv2.IMREAD_UNCHANGED).astype(
                                             np.float32)
        flow_depth_rendered /= config.dataset.DEPTH_FACTOR

        flow_depth_rendered, _ = resize(flow_depth_rendered, target_size,
                                        max_size)

    if "depth_gt_observed" in pair_rec:
        flow_depth_observed = cv2.imread(pair_rec["depth_gt_observed"],
                                         cv2.IMREAD_UNCHANGED).astype(
                                             np.float32)
        flow_depth_observed /= config.dataset.DEPTH_FACTOR
    else:
        logger.info("not using gt_observed depth in par_generate_gt")
        flow_depth_observed = cv2.imread(pair_rec["depth_observed"],
                                         cv2.IMREAD_UNCHANGED).astype(
                                             np.float32)
        flow_depth_observed /= config.dataset.DEPTH_FACTOR

    flow_depth_observed, _ = resize(flow_depth_observed, target_size, max_size)

    if "mask_gt_observed" or "mask_observed" in pair_rec:
        mask_observed_path = pair_rec["mask_gt_observed"]
        assert os.path.exists(mask_observed_path), "%s does not exist".format(
            pair_rec["mask_gt_observed"])
        mask_observed = cv2.imread(mask_observed_path, cv2.IMREAD_UNCHANGED)
        mask_observed, _ = resize(mask_observed, target_size, max_size)
        flow_depth_observed[mask_observed != pair_rec["mask_idx"]] = 0

    if config.network.PRED_FLOW:
        flow_i2r, visible, _ = calc_flow(
            flow_depth_rendered,
            pair_rec["pose_rendered"],
            pair_rec["pose_observed"],
            config.dataset.INTRINSIC_MATRIX,
            flow_depth_observed,
            standard_rep=config.network.STANDARD_FLOW_REP,
        )
        flow_i2r_list = [
            flow_i2r, visible,
            np.logical_and(visible == 0, flow_depth_rendered == 0)
        ]

    return {"flow": flow_i2r_list}
示例#20
0
def main():
    logger.info("Called with argument: {}".format(args))
    ctx = [mx.gpu(int(i)) for i in args.gpus.split(",")]
    train_net(
        args,
        ctx,
        config.network.pretrained,
        config.network.pretrained_epoch,
        config.TRAIN.model_prefix,
        config.TRAIN.begin_epoch,
        config.TRAIN.end_epoch,
        config.TRAIN.lr,
        config.TRAIN.lr_step,
    )
示例#21
0
def handler(event, context):
    try:
        logger.info(event)
        authlete = AuthleteSdk(api_key=os.environ['AUTHLETE_API_KEY'],
                               api_secret=os.environ['AUTHLETE_API_SECRET'])

        configuration = authlete.get_openid_configuration()

    except AuthleteApiError as e:
        logger.error(e)
        return response_builder(500,
                                {'error_message': 'Internal Server Error'})

    return response_builder(200, configuration)
def get_idm_version_info():
    idm_cfg = IDMConfig()

    logger.info("Get software version of the OpenIDM instance")
    headers = idm_cfg.get_admin_headers({'Content-Type': 'application/json'})
    response = get(verify=idm_cfg.ssl_verify,
                   url=idm_cfg.idm_url + '/info/version',
                   headers=headers)
    rest.check_http_status(http_result=response, expected_status=200)
    version_info = "{} (build: {}, revision: {})".format(
        response.json()['productVersion'],
        response.json()['productBuildDate'],
        response.json()['productRevision'])
    return version_info
示例#23
0
def handler(event, context):
    try:
        logger.info(event)
        user_id = event['pathParameters']['user_id']

        if not users.exists_user(user_id=user_id):
            return response_builder(404, {'error_message': 'そのユーザは存在しません'})

        result = users.get(user_id=user_id)

    except ClientError as e:
        logger.error(e)
        return response_builder(500,
                                {'error_message': 'Internal Server Error'})

    return response_builder(200, result['Item'])
示例#24
0
def _get_optimizer(params, optimizer_cfg, use_hvd=False):
    # cfg.optimizer = dict(type='RMSprop', lr=1e-4, weight_decay=0)
    # cfg.optimizer = dict(type='Ranger', lr=1e-4) # , N_sma_threshhold=5, betas=(.95, 0.999))  # 4, (0.90, 0.999)
    optim_type_str = optimizer_cfg.pop("type")
    if optim_type_str.lower() in ["rangerlars", "over9000"]:  # RangerLars
        optim_type_str = "lookahead_Ralamb"
    optim_split = optim_type_str.split("_")

    optim_type = optim_split[-1]
    logger.info(f"optimizer: {optim_type_str} {optim_split}")

    if optim_type == "Ranger":
        from lib.torch_utils.solver.ranger import Ranger

        optimizer_cls = Ranger
    elif optim_type == "Ralamb":
        from lib.torch_utils.solver.ralamb import Ralamb

        optimizer_cls = Ralamb
    elif optim_type == "RAdam":
        from lib.torch_utils.solver.radam import RAdam

        optimizer_cls = RAdam
    else:
        optimizer_cls = getattr(torch.optim, optim_type)
    opt_kwargs = {
        k: v
        for k, v in optimizer_cfg.items() if "lookahead" not in k
    }
    optimizer = optimizer_cls(params, **opt_kwargs)

    if len(optim_split) > 1 and not use_hvd:
        if optim_split[0].lower() == "lookahead":
            from lib.torch_utils.solver.lookahead import Lookahead

            # TODO: pass lookahead hyper-params
            optimizer = Lookahead(
                optimizer,
                alpha=optimizer_cfg.get("lookahead_alpha", 0.5),
                k=optimizer_cfg.get("lookahead_k", 6),
            )
    # logger.info(msg(type(optimizer)))
    return optimizer
示例#25
0
    def save_checkpoint(self, prefix, epoch, save_optimizer_states=False):
        """Save current progress to checkpoint.
        Use mx.callback.module_checkpoint as epoch_end_callback to save during training.

        Parameters
        ----------
        prefix : str
            The file prefix to checkpoint to
        epoch : int
            The current epoch number
        save_optimizer_states : bool
            Whether to save optimizer states for continue training
        """
        self._symbol.save("%s-symbol.json" % prefix)
        param_name = "%s-%04d.params" % (prefix, epoch)
        self.save_params(param_name)
        logger.info('Saved checkpoint to "%s"', param_name)
        if save_optimizer_states:
            state_name = "%s-%04d.states" % (prefix, epoch)
            self.save_optimizer_states(state_name)
            logger.info('Saved optimizer state to "%s"', state_name)
示例#26
0
    def version(self):
        """
        Return the product version information.
        :return: Dictionary
        """
        idm_cfg = IDMConfig()

        logger.info("Get software version of the OpenIDM instance")
        headers = idm_cfg.get_admin_headers(
            {'Content-Type': 'application/json'})
        response = get(verify=idm_cfg.ssl_verify,
                       url=idm_cfg.idm_url + '/info/version',
                       headers=headers)
        rest.check_http_status(http_result=response, expected_status=200)

        return {
            'TITLE': self.product_type,
            'DESCRIPTION': self.name,
            'VERSION': response.json()['productVersion'],
            'REVISION': response.json()['productRevision'],
            'DATE': response.json()['productBuildDate']
        }
示例#27
0
    def __call__(self, num_update):
        """
        Call to schedule current learning rate

        Parameters
        ----------
        num_update: int
            the maximal number of updates applied to a weight.
        """

        # NOTE: use while rather than if  (for continuing training via load_epoch)
        if self.warmup and num_update < self.warmup_step:
            return self.warmup_lr
        while self.cur_step_ind <= len(self.step) - 1:
            if num_update > self.step[self.cur_step_ind]:
                self.count = self.step[self.cur_step_ind]
                self.cur_step_ind += 1
                self.base_lr *= self.factor
                logger.info("Update[%d]: Change learning rate to %0.5e",
                            num_update, self.base_lr)
            else:
                return self.base_lr
        return self.base_lr
示例#28
0
        db_url = config.get("DB","mysql_url")
        engine = create_engine(db_url)
        self.sessionmaker = sessionmaker(bind=engine)
        self.session = self.sessionmaker()

@register
def handleCreate_VM(*args,**kwargs):
    event = kwargs["sender"]
    session = kwargs["session"]
    try:
        account = session.query(Tenant).filter(Tenant.id == event.accountId).one()
    except NoResultFound,e:
        logger.info("miss package activate message, no handle it")
        return

    logger.info("handling account[%s],create_vm[%s],stackid[%s]...." % (event.accountId,event.vmType,event.stackId))
    account.getSM().trigger("create_vm",tenant = account,payload=event)


@register
def handlePackage_Activate(*args,**kwargs):
    event = kwargs["sender"]
    session = kwargs["session"]
    accountId = event.accountId
    try:
        account = session.query(Tenant).filter(Tenant.id == accountId).one()
        logger.error("<%s> account is in db , need to clean.")
    except NoResultFound:
        logger.info("<%s> is new accountid , create tenant id in db" % accountId)
        tenant = Tenant()
        tenant.id = accountId
示例#29
0
def pred_eval(config,
              predictor,
              test_data,
              imdb_test,
              vis=False,
              ignore_cache=None,
              logger=None,
              pairdb=None):
    """
    wrapper for calculating offline validation for faster data analysis
    in this example, all threshold are set by hand
    :param predictor: Predictor
    :param test_data: data iterator, must be non-shuffle
    :param imdb_test: image database
    :param vis: controls visualization
    :param ignore_cache: ignore the saved cache file
    :param logger: the logger instance
    :return:
    """
    logger.info(imdb_test.result_path)
    logger.info("test iter size: {}".format(config.TEST.test_iter))
    pose_err_file = os.path.join(
        imdb_test.result_path,
        imdb_test.name + "_pose_iter{}.pkl".format(config.TEST.test_iter))
    if os.path.exists(pose_err_file) and not ignore_cache and not vis:
        with open(pose_err_file, "rb") as fid:
            if six.PY3:
                [all_rot_err, all_trans_err, all_poses_est,
                 all_poses_gt] = cPickle.load(fid, encoding="latin1")
            else:
                [all_rot_err, all_trans_err, all_poses_est,
                 all_poses_gt] = cPickle.load(fid)
        imdb_test.evaluate_pose(config, all_poses_est, all_poses_gt)
        pose_add_plots_dir = os.path.join(imdb_test.result_path, "add_plots")
        mkdir_p(pose_add_plots_dir)
        imdb_test.evaluate_pose_add(config,
                                    all_poses_est,
                                    all_poses_gt,
                                    output_dir=pose_add_plots_dir)
        pose_arp2d_plots_dir = os.path.join(imdb_test.result_path,
                                            "arp_2d_plots")
        mkdir_p(pose_arp2d_plots_dir)
        imdb_test.evaluate_pose_arp_2d(config,
                                       all_poses_est,
                                       all_poses_gt,
                                       output_dir=pose_arp2d_plots_dir)
        return

    assert vis or not test_data.shuffle
    assert config.TEST.BATCH_PAIRS == 1
    if not isinstance(test_data, PrefetchingIter):
        test_data = PrefetchingIter(test_data)

    num_pairs = len(pairdb)
    height = 480
    width = 640

    data_time, net_time, post_time = 0.0, 0.0, 0.0

    sum_EPE_all = 0.0
    num_inst_all = 0.0
    sum_EPE_viz = 0.0
    num_inst_viz = 0.0
    sum_EPE_vizbg = 0.0
    num_inst_vizbg = 0.0
    sum_PoseErr = [
        np.zeros((len(imdb_test.classes) + 1, 2))
        for batch_idx in range(config.TEST.test_iter)
    ]

    all_rot_err = [[[] for j in range(config.TEST.test_iter)]
                   for batch_idx in range(len(imdb_test.classes))
                   ]  # num_cls x test_iter
    all_trans_err = [[[] for j in range(config.TEST.test_iter)]
                     for batch_idx in range(len(imdb_test.classes))]

    all_poses_est = [[[] for j in range(config.TEST.test_iter)]
                     for batch_idx in range(len(imdb_test.classes))]
    all_poses_gt = [[[] for j in range(config.TEST.test_iter)]
                    for batch_idx in range(len(imdb_test.classes))]

    num_inst = np.zeros(len(imdb_test.classes) + 1)

    K = config.dataset.INTRINSIC_MATRIX
    if (config.TEST.test_iter > 1 or config.TEST.VISUALIZE) and True:
        print(
            "************* start setup render_glumpy environment... ******************"
        )
        if config.dataset.dataset.startswith("ModelNet"):
            from lib.render_glumpy.render_py_light_modelnet_multi import Render_Py_Light_ModelNet_Multi

            modelnet_root = config.modelnet_root
            texture_path = os.path.join(modelnet_root, "gray_texture.png")

            model_path_list = [
                os.path.join(config.dataset.model_dir,
                             "{}.obj".format(model_name))
                for model_name in config.dataset.class_name
            ]
            render_machine = Render_Py_Light_ModelNet_Multi(
                model_path_list,
                texture_path,
                K,
                width,
                height,
                config.dataset.ZNEAR,
                config.dataset.ZFAR,
                brightness_ratios=[0.7],
            )
        else:
            render_machine = Render_Py(
                config.dataset.model_dir,
                config.dataset.class_name,
                K,
                width,
                height,
                config.dataset.ZNEAR,
                config.dataset.ZFAR,
            )

        def render(render_machine, pose, cls_idx, K=None):
            if config.dataset.dataset.startswith("ModelNet"):
                idx = 2
                # generate random light_position
                if idx % 6 == 0:
                    light_position = [1, 0, 1]
                elif idx % 6 == 1:
                    light_position = [1, 1, 1]
                elif idx % 6 == 2:
                    light_position = [0, 1, 1]
                elif idx % 6 == 3:
                    light_position = [-1, 1, 1]
                elif idx % 6 == 4:
                    light_position = [-1, 0, 1]
                elif idx % 6 == 5:
                    light_position = [0, 0, 1]
                else:
                    raise Exception("???")
                light_position = np.array(light_position) * 0.5
                # inverse yz
                light_position[0] += pose[0, 3]
                light_position[1] -= pose[1, 3]
                light_position[2] -= pose[2, 3]

                colors = np.array([1, 1, 1])  # white light
                intensity = np.random.uniform(0.9, 1.1, size=(3, ))
                colors_randk = 0
                light_intensity = colors[colors_randk] * intensity

                # randomly choose a render machine
                rm_randk = 0  # random.randint(0, len(brightness_ratios) - 1)
                rgb_gl, depth_gl = render_machine.render(
                    cls_idx,
                    pose[:3, :3],
                    pose[:3, 3],
                    light_position,
                    light_intensity,
                    brightness_k=rm_randk,
                    r_type="mat",
                )
                rgb_gl = rgb_gl.astype("uint8")
            else:
                rgb_gl, depth_gl = render_machine.render(cls_idx,
                                                         pose[:3, :3],
                                                         pose[:, 3],
                                                         r_type="mat",
                                                         K=K)
                rgb_gl = rgb_gl.astype("uint8")
            return rgb_gl, depth_gl

        print(
            "***************setup render_glumpy environment succeed ******************"
        )

    if config.TEST.PRECOMPUTED_ICP:
        print("precomputed_ICP")
        config.TEST.test_iter = 1
        all_rot_err = [[[] for j in range(1)]
                       for batch_idx in range(len(imdb_test.classes))]
        all_trans_err = [[[] for j in range(1)]
                         for batch_idx in range(len(imdb_test.classes))]

        all_poses_est = [[[] for j in range(1)]
                         for batch_idx in range(len(imdb_test.classes))]
        all_poses_gt = [[[] for j in range(1)]
                        for batch_idx in range(len(imdb_test.classes))]

        xy_trans_err = [[[] for j in range(1)]
                        for batch_idx in range(len(imdb_test.classes))]
        z_trans_err = [[[] for j in range(1)]
                       for batch_idx in range(len(imdb_test.classes))]
        for idx in range(len(pairdb)):
            pose_path = pairdb[idx]["depth_rendered"][:-10] + "-pose_icp.txt"
            pose_rendered_update = np.loadtxt(pose_path, skiprows=1)
            pose_observed = pairdb[idx]["pose_observed"]
            r_dist_est, t_dist_est = calc_rt_dist_m(pose_rendered_update,
                                                    pose_observed)
            xy_dist = np.linalg.norm(pose_rendered_update[:2, -1] -
                                     pose_observed[:2, -1])
            z_dist = np.linalg.norm(pose_rendered_update[-1, -1] -
                                    pose_observed[-1, -1])
            print(
                "{}: r_dist_est: {}, t_dist_est: {}, xy_dist: {}, z_dist: {}".
                format(idx, r_dist_est, t_dist_est, xy_dist, z_dist))
            class_id = imdb_test.classes.index(pairdb[idx]["gt_class"])
            # store poses estimation and gt
            all_poses_est[class_id][0].append(pose_rendered_update)
            all_poses_gt[class_id][0].append(pairdb[idx]["pose_observed"])
            all_rot_err[class_id][0].append(r_dist_est)
            all_trans_err[class_id][0].append(t_dist_est)
            xy_trans_err[class_id][0].append(xy_dist)
            z_trans_err[class_id][0].append(z_dist)
        all_rot_err = np.array(all_rot_err)
        all_trans_err = np.array(all_trans_err)
        print("rot = {} +/- {}".format(np.mean(all_rot_err[class_id][0]),
                                       np.std(all_rot_err[class_id][0])))
        print("trans = {} +/- {}".format(np.mean(all_trans_err[class_id][0]),
                                         np.std(all_trans_err[class_id][0])))
        num_list = all_trans_err[class_id][0]
        print("xyz: {:.2f} +/- {:.2f}".format(
            np.mean(num_list) * 100,
            np.std(num_list) * 100))
        num_list = xy_trans_err[class_id][0]
        print("xy: {:.2f} +/- {:.2f}".format(
            np.mean(num_list) * 100,
            np.std(num_list) * 100))
        num_list = z_trans_err[class_id][0]
        print("z: {:.2f} +/- {:.2f}".format(
            np.mean(num_list) * 100,
            np.std(num_list) * 100))

        imdb_test.evaluate_pose(config, all_poses_est, all_poses_gt)
        pose_add_plots_dir = os.path.join(imdb_test.result_path,
                                          "add_plots_precomputed_ICP")
        mkdir_p(pose_add_plots_dir)
        imdb_test.evaluate_pose_add(config,
                                    all_poses_est,
                                    all_poses_gt,
                                    output_dir=pose_add_plots_dir)
        pose_arp2d_plots_dir = os.path.join(imdb_test.result_path,
                                            "arp_2d_plots_precomputed_ICP")
        mkdir_p(pose_arp2d_plots_dir)
        imdb_test.evaluate_pose_arp_2d(config,
                                       all_poses_est,
                                       all_poses_gt,
                                       output_dir=pose_arp2d_plots_dir)
        return

    if config.TEST.BEFORE_ICP:
        print("before_ICP")
        config.TEST.test_iter = 1
        all_rot_err = [[[] for j in range(1)]
                       for batch_idx in range(len(imdb_test.classes))]
        all_trans_err = [[[] for j in range(1)]
                         for batch_idx in range(len(imdb_test.classes))]

        all_poses_est = [[[] for j in range(1)]
                         for batch_idx in range(len(imdb_test.classes))]
        all_poses_gt = [[[] for j in range(1)]
                        for batch_idx in range(len(imdb_test.classes))]

        xy_trans_err = [[[] for j in range(1)]
                        for batch_idx in range(len(imdb_test.classes))]
        z_trans_err = [[[] for j in range(1)]
                       for batch_idx in range(len(imdb_test.classes))]
        for idx in range(len(pairdb)):
            pose_path = pairdb[idx]["depth_rendered"][:-10] + "-pose.txt"
            pose_rendered_update = np.loadtxt(pose_path, skiprows=1)
            pose_observed = pairdb[idx]["pose_observed"]
            r_dist_est, t_dist_est = calc_rt_dist_m(pose_rendered_update,
                                                    pose_observed)
            xy_dist = np.linalg.norm(pose_rendered_update[:2, -1] -
                                     pose_observed[:2, -1])
            z_dist = np.linalg.norm(pose_rendered_update[-1, -1] -
                                    pose_observed[-1, -1])
            class_id = imdb_test.classes.index(pairdb[idx]["gt_class"])
            # store poses estimation and gt
            all_poses_est[class_id][0].append(pose_rendered_update)
            all_poses_gt[class_id][0].append(pairdb[idx]["pose_observed"])
            all_rot_err[class_id][0].append(r_dist_est)
            all_trans_err[class_id][0].append(t_dist_est)
            xy_trans_err[class_id][0].append(xy_dist)
            z_trans_err[class_id][0].append(z_dist)

        all_trans_err = np.array(all_trans_err)
        imdb_test.evaluate_pose(config, all_poses_est, all_poses_gt)
        pose_add_plots_dir = os.path.join(imdb_test.result_path,
                                          "add_plots_before_ICP")
        mkdir_p(pose_add_plots_dir)
        imdb_test.evaluate_pose_add(config,
                                    all_poses_est,
                                    all_poses_gt,
                                    output_dir=pose_add_plots_dir)
        pose_arp2d_plots_dir = os.path.join(imdb_test.result_path,
                                            "arp_2d_plots_before_ICP")
        mkdir_p(pose_arp2d_plots_dir)
        imdb_test.evaluate_pose_arp_2d(config,
                                       all_poses_est,
                                       all_poses_gt,
                                       output_dir=pose_arp2d_plots_dir)
        return

    # ------------------------------------------------------------------------------
    t_start = time.time()
    t = time.time()
    for idx, data_batch in enumerate(test_data):
        if np.sum(pairdb[idx]
                  ["pose_rendered"]) == -12:  # NO POINT VALID IN INIT POSE
            print(idx)
            class_id = imdb_test.classes.index(pairdb[idx]["gt_class"])
            for pose_iter_idx in range(config.TEST.test_iter):
                all_poses_est[class_id][pose_iter_idx].append(
                    pairdb[idx]["pose_rendered"])
                all_poses_gt[class_id][pose_iter_idx].append(
                    pairdb[idx]["pose_observed"])

                r_dist = 1000
                t_dist = 1000
                all_rot_err[class_id][pose_iter_idx].append(r_dist)
                all_trans_err[class_id][pose_iter_idx].append(t_dist)
                sum_PoseErr[pose_iter_idx][class_id, :] += np.array(
                    [r_dist, t_dist])
                sum_PoseErr[pose_iter_idx][-1, :] += np.array([r_dist, t_dist])
                # post process
            if idx % 50 == 0:
                logger.info(
                    "testing {}/{} data {:.4f}s net {:.4f}s calc_gt {:.4f}s".
                    format(
                        (idx + 1),
                        num_pairs,
                        data_time / ((idx + 1) * test_data.batch_size),
                        net_time / ((idx + 1) * test_data.batch_size),
                        post_time / ((idx + 1) * test_data.batch_size),
                    ))
            print("in test: NO POINT_VALID IN rendered")
            continue
        data_time += time.time() - t

        t = time.time()

        pose_rendered = pairdb[idx]["pose_rendered"]
        if np.sum(pose_rendered) == -12:
            print(idx)
            class_id = imdb_test.classes.index(pairdb[idx]["gt_class"])
            num_inst[class_id] += 1
            num_inst[-1] += 1
            for pose_iter_idx in range(config.TEST.test_iter):
                all_poses_est[class_id][pose_iter_idx].append(pose_rendered)
                all_poses_gt[class_id][pose_iter_idx].append(
                    pairdb[idx]["pose_observed"])

            # post process
            if idx % 50 == 0:
                logger.info(
                    "testing {}/{} data {:.4f}s net {:.4f}s calc_gt {:.4f}s".
                    format(
                        (idx + 1),
                        num_pairs,
                        data_time / ((idx + 1) * test_data.batch_size),
                        net_time / ((idx + 1) * test_data.batch_size),
                        post_time / ((idx + 1) * test_data.batch_size),
                    ))

            t = time.time()
            continue

        output_all = predictor.predict(data_batch)
        net_time += time.time() - t

        t = time.time()
        rst_iter = []
        for output in output_all:
            cur_rst = {}
            cur_rst["se3"] = np.squeeze(
                output["se3_output"].asnumpy()).astype("float32")

            if not config.TEST.FAST_TEST and config.network.PRED_FLOW:
                cur_rst["flow"] = np.squeeze(
                    output["flow_est_crop_output"].asnumpy().transpose(
                        (2, 3, 1, 0))).astype("float16")
            else:
                cur_rst["flow"] = None
            if config.network.PRED_MASK and config.TEST.UPDATE_MASK not in [
                    "init", "box_rendered"
            ]:
                mask_pred = np.squeeze(
                    output["mask_observed_pred_output"].asnumpy()).astype(
                        "float32")
                cur_rst["mask_pred"] = mask_pred

            rst_iter.append(cur_rst)

        post_time += time.time() - t
        # sample_ratio = 1  # 0.01
        for batch_idx in range(0, test_data.batch_size):
            # if config.TEST.VISUALIZE and not (r_dist>15 and t_dist>0.05):
            #     continue # 3388, 5326
            # calculate the flow error --------------------------------------------
            t = time.time()
            if config.network.PRED_FLOW and not config.TEST.FAST_TEST:
                # evaluate optical flow
                flow_gt = par_generate_gt(config, pairdb[idx])
                if config.network.PRED_FLOW:
                    all_diff = calc_EPE_one_pair(rst_iter[batch_idx], flow_gt,
                                                 "flow")
                sum_EPE_all += all_diff["epe_all"]
                num_inst_all += all_diff["num_all"]
                sum_EPE_viz += all_diff["epe_viz"]
                num_inst_viz += all_diff["num_viz"]
                sum_EPE_vizbg += all_diff["epe_vizbg"]
                num_inst_vizbg += all_diff["num_vizbg"]

            # calculate the se3 error ---------------------------------------------
            # evaluate se3 estimation
            pose_rendered = pairdb[idx]["pose_rendered"]
            class_id = imdb_test.classes.index(pairdb[idx]["gt_class"])
            num_inst[class_id] += 1
            num_inst[-1] += 1
            post_time += time.time() - t

            # iterative refine se3 estimation --------------------------------------------------
            for pose_iter_idx in range(config.TEST.test_iter):
                t = time.time()
                pose_rendered_update = RT_transform(
                    pose_rendered,
                    rst_iter[0]["se3"][:-3],
                    rst_iter[0]["se3"][-3:],
                    config.dataset.trans_means,
                    config.dataset.trans_stds,
                    config.network.ROT_COORD,
                )

                # calculate error
                r_dist, t_dist = calc_rt_dist_m(pose_rendered_update,
                                                pairdb[idx]["pose_observed"])

                # store poses estimation and gt
                all_poses_est[class_id][pose_iter_idx].append(
                    pose_rendered_update)
                all_poses_gt[class_id][pose_iter_idx].append(
                    pairdb[idx]["pose_observed"])

                all_rot_err[class_id][pose_iter_idx].append(r_dist)
                all_trans_err[class_id][pose_iter_idx].append(t_dist)
                sum_PoseErr[pose_iter_idx][class_id, :] += np.array(
                    [r_dist, t_dist])
                sum_PoseErr[pose_iter_idx][-1, :] += np.array([r_dist, t_dist])
                if config.TEST.VISUALIZE:
                    print("idx {}, iter {}: rError: {}, tError: {}".format(
                        idx + batch_idx, pose_iter_idx + 1, r_dist, t_dist))

                post_time += time.time() - t

                # # if more than one iteration
                if pose_iter_idx < (config.TEST.test_iter -
                                    1) or config.TEST.VISUALIZE:
                    t = time.time()
                    # get refined image
                    K_path = pairdb[idx]["image_observed"][:-10] + "-K.txt"
                    if os.path.exists(K_path):
                        K = np.loadtxt(K_path)
                    image_refined, depth_refined = render(
                        render_machine,
                        pose_rendered_update,
                        config.dataset.class_name.index(
                            pairdb[idx]["gt_class"]),
                        K=K,
                    )
                    image_refined = image_refined[:, :, :3]

                    # update minibatch
                    update_package = [{
                        "image_rendered": image_refined,
                        "src_pose": pose_rendered_update
                    }]
                    if config.network.INPUT_DEPTH:
                        update_package[0]["depth_rendered"] = depth_refined
                    if config.network.INPUT_MASK:
                        mask_rendered_refined = np.zeros(depth_refined.shape)
                        mask_rendered_refined[depth_refined > 0.2] = 1
                        update_package[0][
                            "mask_rendered"] = mask_rendered_refined
                        if config.network.PRED_MASK:
                            # init, box_rendered, mask_rendered, box_observed, mask_observed
                            if config.TEST.UPDATE_MASK == "box_rendered":
                                input_names = [
                                    blob_name[0]
                                    for blob_name in data_batch.provide_data[0]
                                ]
                                update_package[0][
                                    "mask_observed"] = np.squeeze(
                                        data_batch.data[0][input_names.index(
                                            "mask_rendered")].asnumpy()
                                        [batch_idx])  # noqa
                            elif config.TEST.UPDATE_MASK == "init":
                                pass
                            else:
                                raise Exception(
                                    "Unknown UPDATE_MASK type: {}".format(
                                        config.network.UPDATE_MASK))

                    pose_rendered = pose_rendered_update
                    data_batch = update_data_batch(config, data_batch,
                                                   update_package)

                    data_time += time.time() - t

                    # forward and get rst
                    if pose_iter_idx < config.TEST.test_iter - 1:
                        t = time.time()
                        output_all = predictor.predict(data_batch)
                        net_time += time.time() - t

                        t = time.time()
                        rst_iter = []
                        for output in output_all:
                            cur_rst = {}
                            if config.network.REGRESSOR_NUM == 1:
                                cur_rst["se3"] = np.squeeze(
                                    output["se3_output"].asnumpy()).astype(
                                        "float32")

                            if not config.TEST.FAST_TEST and config.network.PRED_FLOW:
                                cur_rst["flow"] = np.squeeze(
                                    output["flow_est_crop_output"].asnumpy().
                                    transpose((2, 3, 1, 0))).astype("float16")
                            else:
                                cur_rst["flow"] = None

                            if config.network.PRED_MASK and config.TEST.UPDATE_MASK not in [
                                    "init", "box_rendered"
                            ]:
                                mask_pred = np.squeeze(
                                    output["mask_observed_pred_output"].
                                    asnumpy()).astype("float32")
                                cur_rst["mask_pred"] = mask_pred

                            rst_iter.append(cur_rst)
                            post_time += time.time() - t

        # post process
        if idx % 50 == 0:
            logger.info(
                "testing {}/{} data {:.4f}s net {:.4f}s calc_gt {:.4f}s".
                format(
                    (idx + 1),
                    num_pairs,
                    data_time / ((idx + 1) * test_data.batch_size),
                    net_time / ((idx + 1) * test_data.batch_size),
                    post_time / ((idx + 1) * test_data.batch_size),
                ))

        t = time.time()

    all_rot_err = np.array(all_rot_err)
    all_trans_err = np.array(all_trans_err)

    # save inference results
    if not config.TEST.VISUALIZE:
        with open(pose_err_file, "wb") as f:
            logger.info("saving result cache to {}".format(pose_err_file))
            cPickle.dump(
                [all_rot_err, all_trans_err, all_poses_est, all_poses_gt],
                f,
                protocol=2)
            logger.info("done")

    if config.network.PRED_FLOW:
        logger.info("evaluate flow:")
        logger.info("EPE all: {}".format(sum_EPE_all / max(num_inst_all, 1.0)))
        logger.info("EPE ignore unvisible: {}".format(
            sum_EPE_vizbg / max(num_inst_vizbg, 1.0)))
        logger.info("EPE visible: {}".format(sum_EPE_viz /
                                             max(num_inst_viz, 1.0)))

    logger.info("evaluate pose:")
    imdb_test.evaluate_pose(config, all_poses_est, all_poses_gt)
    # evaluate pose add
    pose_add_plots_dir = os.path.join(imdb_test.result_path, "add_plots")
    mkdir_p(pose_add_plots_dir)
    imdb_test.evaluate_pose_add(config,
                                all_poses_est,
                                all_poses_gt,
                                output_dir=pose_add_plots_dir)
    pose_arp2d_plots_dir = os.path.join(imdb_test.result_path, "arp_2d_plots")
    mkdir_p(pose_arp2d_plots_dir)
    imdb_test.evaluate_pose_arp_2d(config,
                                   all_poses_est,
                                   all_poses_gt,
                                   output_dir=pose_arp2d_plots_dir)

    logger.info("using {} seconds in total".format(time.time() - t_start))
示例#30
0
    def evaluate_pose_arp_2d(self, config, all_poses_est, all_poses_gt,
                             output_dir):
        """
        evaluate average re-projection 2d error
        :param config:
        :param all_poses_est:
        :param all_poses_gt:
        :param output_dir:
        :param logger:
        :return:
        """
        logger.info("evaluating pose average re-projection 2d error")
        num_iter = config.TEST.test_iter
        K = config.dataset.INTRINSIC_MATRIX

        count_all = np.zeros((self.num_classes, ), dtype=np.float32)
        count_correct = {
            k: np.zeros((self.num_classes, num_iter), dtype=np.float32)
            for k in ["2", "5", "10", "20"]
        }

        threshold_2 = np.zeros((self.num_classes, num_iter), dtype=np.float32)
        threshold_5 = np.zeros((self.num_classes, num_iter), dtype=np.float32)
        threshold_10 = np.zeros((self.num_classes, num_iter), dtype=np.float32)
        threshold_20 = np.zeros((self.num_classes, num_iter), dtype=np.float32)
        dx = 0.1
        threshold_mean = np.tile(
            np.arange(0, 50, dx).astype(np.float32),
            (self.num_classes, num_iter,
             1))  # (num_class, num_iter, num_thresh)
        num_thresh = threshold_mean.shape[-1]
        count_correct["mean"] = np.zeros(
            (self.num_classes, num_iter, num_thresh), dtype=np.float32)

        for i in range(self.num_classes):
            threshold_2[i, :] = 2
            threshold_5[i, :] = 5
            threshold_10[i, :] = 10
            threshold_20[i, :] = 20

        num_valid_class = 0
        for cls_idx, cls_name in enumerate(self.classes):
            if not (all_poses_est[cls_idx][0] and all_poses_gt[cls_idx][0]):
                continue
            num_valid_class += 1
            for iter_i in range(num_iter):
                curr_poses_gt = all_poses_gt[cls_idx][0]
                num = len(curr_poses_gt)
                curr_poses_est = all_poses_est[cls_idx][iter_i]

                for j in range(num):
                    if iter_i == 0:
                        count_all[cls_idx] += 1
                    RT = curr_poses_est[j]  # est pose
                    pose_gt = curr_poses_gt[j]  # gt pose

                    error_rotation = re(RT[:3, :3], pose_gt[:3, :3])
                    if cls_name == "eggbox" and error_rotation > 90:
                        RT_z = np.array([[-1, 0, 0, 0], [0, -1, 0, 0],
                                         [0, 0, 1, 0]])
                        RT_sym = se3_mul(RT, RT_z)
                        error = arp_2d(RT_sym[:3, :3], RT_sym[:, 3],
                                       pose_gt[:3, :3], pose_gt[:, 3],
                                       self._points[cls_name], K)
                    else:
                        error = arp_2d(RT[:3, :3], RT[:, 3], pose_gt[:3, :3],
                                       pose_gt[:,
                                               3], self._points[cls_name], K)

                    if error < threshold_2[cls_idx, iter_i]:
                        count_correct["2"][cls_idx, iter_i] += 1
                    if error < threshold_5[cls_idx, iter_i]:
                        count_correct["5"][cls_idx, iter_i] += 1
                    if error < threshold_10[cls_idx, iter_i]:
                        count_correct["10"][cls_idx, iter_i] += 1
                    if error < threshold_20[cls_idx, iter_i]:
                        count_correct["20"][cls_idx, iter_i] += 1
                    for thresh_i in range(num_thresh):
                        if error < threshold_mean[cls_idx, iter_i, thresh_i]:
                            count_correct["mean"][cls_idx, iter_i,
                                                  thresh_i] += 1
        import matplotlib

        matplotlib.use("Agg")
        import matplotlib.pyplot as plt

        # store plot data
        plot_data = {}
        sum_acc_mean = np.zeros(num_iter)
        sum_acc_02 = np.zeros(num_iter)
        sum_acc_05 = np.zeros(num_iter)
        sum_acc_10 = np.zeros(num_iter)
        sum_acc_20 = np.zeros(num_iter)
        for cls_idx, cls_name in enumerate(self.classes):
            if count_all[cls_idx] == 0:
                continue
            plot_data[cls_name] = []
            for iter_i in range(num_iter):
                logger.info("** {}, iter {} **".format(cls_name, iter_i + 1))
                from scipy.integrate import simps

                area = simps(count_correct["mean"][cls_idx, iter_i] /
                             float(count_all[cls_idx]),
                             dx=dx) / (50.0)
                acc_mean = area * 100
                sum_acc_mean[iter_i] += acc_mean
                acc_02 = 100 * float(count_correct["2"][cls_idx,
                                                        iter_i]) / float(
                                                            count_all[cls_idx])
                sum_acc_02[iter_i] += acc_02
                acc_05 = 100 * float(count_correct["5"][cls_idx,
                                                        iter_i]) / float(
                                                            count_all[cls_idx])
                sum_acc_05[iter_i] += acc_05
                acc_10 = 100 * float(
                    count_correct["10"][cls_idx, iter_i]) / float(
                        count_all[cls_idx])
                sum_acc_10[iter_i] += acc_10
                acc_20 = 100 * float(
                    count_correct["20"][cls_idx, iter_i]) / float(
                        count_all[cls_idx])
                sum_acc_20[iter_i] += acc_20

                fig = plt.figure()
                x_s = np.arange(0, 50, dx).astype(np.float32)
                y_s = 100 * count_correct["mean"][cls_idx, iter_i] / float(
                    count_all[cls_idx])
                plot_data[cls_name].append((x_s, y_s))
                plt.plot(x_s, y_s, "-")
                plt.xlim(0, 50)
                plt.ylim(0, 100)
                plt.grid(True)
                plt.xlabel("px")
                plt.ylabel("correctly estimated poses in %")
                plt.savefig(os.path.join(
                    output_dir,
                    "arp_2d_{}_iter{}.png".format(cls_name, iter_i + 1)),
                            dpi=fig.dpi)

                logger.info("threshold=[0, 50], area: {:.2f}".format(acc_mean))
                logger.info(
                    "threshold=2, correct poses: {}, all poses: {}, accuracy: {:.2f}"
                    .format(count_correct["2"][cls_idx, iter_i],
                            count_all[cls_idx], acc_02))
                logger.info(
                    "threshold=5, correct poses: {}, all poses: {}, accuracy: {:.2f}"
                    .format(count_correct["5"][cls_idx, iter_i],
                            count_all[cls_idx], acc_05))
                logger.info(
                    "threshold=10, correct poses: {}, all poses: {}, accuracy: {:.2f}"
                    .format(count_correct["10"][cls_idx, iter_i],
                            count_all[cls_idx], acc_10))
                logger.info(
                    "threshold=20, correct poses: {}, all poses: {}, accuracy: {:.2f}"
                    .format(count_correct["20"][cls_idx, iter_i],
                            count_all[cls_idx], acc_20))
                logger.info(" ")

        with open(os.path.join(output_dir, "arp_2d_xys.pkl"), "wb") as f:
            cPickle.dump(plot_data, f, protocol=2)
        logger.info("=" * 30)

        print(" ")
        # overall performance of arp 2d
        for iter_i in range(num_iter):
            logger.info(
                "---------- arp 2d performance over {} classes -----------".
                format(num_valid_class))
            logger.info("** iter {} **".format(iter_i + 1))

            logger.info("threshold=[0, 50], area: {:.2f}".format(
                sum_acc_mean[iter_i] / num_valid_class))
            logger.info("threshold=2, mean accuracy: {:.2f}".format(
                sum_acc_02[iter_i] / num_valid_class))
            logger.info("threshold=5, mean accuracy: {:.2f}".format(
                sum_acc_05[iter_i] / num_valid_class))
            logger.info("threshold=10, mean accuracy: {:.2f}".format(
                sum_acc_10[iter_i] / num_valid_class))
            logger.info("threshold=20, mean accuracy: {:.2f}".format(
                sum_acc_20[iter_i] / num_valid_class))
            logger.info(" ")

        logger.info("=" * 30)
示例#31
0
    def evaluate_pose_add(self, config, all_poses_est, all_poses_gt,
                          output_dir):
        """

        :param config:
        :param all_poses_est:
        :param all_poses_gt:
        :param output_dir:
        :param logger:
        :return:
        """
        logger.info("evaluating pose add")
        eval_method = "add"
        num_iter = config.TEST.test_iter

        count_all = np.zeros((self.num_classes, ), dtype=np.float32)
        count_correct = {
            k: np.zeros((self.num_classes, num_iter), dtype=np.float32)
            for k in ["0.02", "0.05", "0.10"]
        }

        threshold_002 = np.zeros((self.num_classes, num_iter),
                                 dtype=np.float32)
        threshold_005 = np.zeros((self.num_classes, num_iter),
                                 dtype=np.float32)
        threshold_010 = np.zeros((self.num_classes, num_iter),
                                 dtype=np.float32)
        dx = 0.0001
        threshold_mean = np.tile(
            np.arange(0, 0.1, dx).astype(np.float32),
            (self.num_classes, num_iter,
             1))  # (num_class, num_iter, num_thresh)
        num_thresh = threshold_mean.shape[-1]
        count_correct["mean"] = np.zeros(
            (self.num_classes, num_iter, num_thresh), dtype=np.float32)

        for i, cls_name in enumerate(self.classes):
            threshold_002[i, :] = 0.02 * self._diameters[cls_name]
            threshold_005[i, :] = 0.05 * self._diameters[cls_name]
            threshold_010[i, :] = 0.10 * self._diameters[cls_name]
            threshold_mean[i, :, :] *= self._diameters[cls_name]

        num_valid_class = 0
        for cls_idx, cls_name in enumerate(self.classes):
            if not (all_poses_est[cls_idx][0] and all_poses_gt[cls_idx][0]):
                continue
            num_valid_class += 1
            for iter_i in range(num_iter):
                curr_poses_gt = all_poses_gt[cls_idx][0]
                num = len(curr_poses_gt)
                curr_poses_est = all_poses_est[cls_idx][iter_i]

                for j in range(num):
                    if iter_i == 0:
                        count_all[cls_idx] += 1
                    RT = curr_poses_est[j]  # est pose
                    pose_gt = curr_poses_gt[j]  # gt pose
                    if cls_name == "eggbox" or cls_name == "glue" or cls_name == "bowl" or cls_name == "cup":
                        eval_method = "adi"
                        error = adi(RT[:3, :3], RT[:, 3], pose_gt[:3, :3],
                                    pose_gt[:, 3], self._points[cls_name])
                    else:
                        error = add(RT[:3, :3], RT[:, 3], pose_gt[:3, :3],
                                    pose_gt[:, 3], self._points[cls_name])

                    if error < threshold_002[cls_idx, iter_i]:
                        count_correct["0.02"][cls_idx, iter_i] += 1
                    if error < threshold_005[cls_idx, iter_i]:
                        count_correct["0.05"][cls_idx, iter_i] += 1
                    if error < threshold_010[cls_idx, iter_i]:
                        count_correct["0.10"][cls_idx, iter_i] += 1
                    for thresh_i in range(num_thresh):
                        if error < threshold_mean[cls_idx, iter_i, thresh_i]:
                            count_correct["mean"][cls_idx, iter_i,
                                                  thresh_i] += 1

        import matplotlib

        matplotlib.use("Agg")
        import matplotlib.pyplot as plt

        plot_data = {}

        sum_acc_mean = np.zeros(num_iter)
        sum_acc_002 = np.zeros(num_iter)
        sum_acc_005 = np.zeros(num_iter)
        sum_acc_010 = np.zeros(num_iter)
        for cls_idx, cls_name in enumerate(self.classes):
            if count_all[cls_idx] == 0:
                continue
            plot_data[cls_name] = []
            for iter_i in range(num_iter):
                logger.info("** {}, iter {} **".format(cls_name, iter_i + 1))
                from scipy.integrate import simps

                area = simps(count_correct["mean"][cls_idx, iter_i] /
                             float(count_all[cls_idx]),
                             dx=dx) / 0.1
                acc_mean = area * 100
                sum_acc_mean[iter_i] += acc_mean
                acc_002 = 100 * float(
                    count_correct["0.02"][cls_idx, iter_i]) / float(
                        count_all[cls_idx])
                sum_acc_002[iter_i] += acc_002
                acc_005 = 100 * float(
                    count_correct["0.05"][cls_idx, iter_i]) / float(
                        count_all[cls_idx])
                sum_acc_005[iter_i] += acc_005
                acc_010 = 100 * float(
                    count_correct["0.10"][cls_idx, iter_i]) / float(
                        count_all[cls_idx])
                sum_acc_010[iter_i] += acc_010

                fig = plt.figure()
                x_s = np.arange(0, 0.1, dx).astype(np.float32)
                y_s = count_correct["mean"][cls_idx, iter_i] / float(
                    count_all[cls_idx])
                plot_data[cls_name].append((x_s, y_s))
                plt.plot(x_s, y_s, "-")
                plt.xlim(0, 0.1)
                plt.ylim(0, 1)
                plt.xlabel("Average distance threshold in meter (symmetry)")
                plt.ylabel("accuracy")
                plt.savefig(os.path.join(
                    output_dir,
                    "acc_thres_{}_iter{}.png".format(cls_name, iter_i + 1)),
                            dpi=fig.dpi)

                logger.info(
                    "threshold=[0.0, 0.10], area: {:.2f}".format(acc_mean))
                logger.info(
                    "threshold=0.02, correct poses: {}, all poses: {}, accuracy: {:.2f}"
                    .format(count_correct["0.02"][cls_idx, iter_i],
                            count_all[cls_idx], acc_002))
                logger.info(
                    "threshold=0.05, correct poses: {}, all poses: {}, accuracy: {:.2f}"
                    .format(count_correct["0.05"][cls_idx, iter_i],
                            count_all[cls_idx], acc_005))
                logger.info(
                    "threshold=0.10, correct poses: {}, all poses: {}, accuracy: {:.2f}"
                    .format(count_correct["0.10"][cls_idx, iter_i],
                            count_all[cls_idx], acc_010))
                logger.info(" ")

        with open(os.path.join(output_dir, "{}_xys.pkl".format(eval_method)),
                  "wb") as f:
            cPickle.dump(plot_data, f, protocol=2)

        logger.info("=" * 30)

        print(" ")
        # overall performance of add
        for iter_i in range(num_iter):
            logger.info(
                "---------- add performance over {} classes -----------".
                format(num_valid_class))
            logger.info("** iter {} **".format(iter_i + 1))
            logger.info("threshold=[0.0, 0.10], area: {:.2f}".format(
                sum_acc_mean[iter_i] / num_valid_class))
            logger.info("threshold=0.02, mean accuracy: {:.2f}".format(
                sum_acc_002[iter_i] / num_valid_class))
            logger.info("threshold=0.05, mean accuracy: {:.2f}".format(
                sum_acc_005[iter_i] / num_valid_class))
            logger.info("threshold=0.10, mean accuracy: {:.2f}".format(
                sum_acc_010[iter_i] / num_valid_class))
            print(" ")

        logger.info("=" * 30)
示例#32
0
    def evaluate_pose(self, config, all_poses_est, all_poses_gt):
        # evaluate and display
        logger.info("evaluating pose")
        rot_thresh_list = np.arange(1, 11, 1)
        trans_thresh_list = np.arange(0.01, 0.11, 0.01)
        num_metric = len(rot_thresh_list)
        num_iter = config.TEST.test_iter
        rot_acc = np.zeros((self.num_classes, num_iter, num_metric))
        trans_acc = np.zeros((self.num_classes, num_iter, num_metric))
        space_acc = np.zeros((self.num_classes, num_iter, num_metric))

        num_valid_class = 0
        for cls_idx, cls_name in enumerate(self.classes):
            if not (all_poses_est[cls_idx][0] and all_poses_gt[cls_idx][0]):
                continue
            num_valid_class += 1
            for iter_i in range(num_iter):
                curr_poses_gt = all_poses_gt[cls_idx][0]
                num = len(curr_poses_gt)
                curr_poses_est = all_poses_est[cls_idx][iter_i]

                cur_rot_rst = np.zeros((num, 1))
                cur_trans_rst = np.zeros((num, 1))

                for j in range(num):
                    r_dist_est, t_dist_est = calc_rt_dist_m(
                        curr_poses_est[j], curr_poses_gt[j])
                    if cls_name == "eggbox" and r_dist_est > 90:
                        RT_z = np.array([[-1, 0, 0, 0], [0, -1, 0, 0],
                                         [0, 0, 1, 0]])
                        curr_pose_est_sym = se3_mul(curr_poses_est[j], RT_z)
                        r_dist_est, t_dist_est = calc_rt_dist_m(
                            curr_pose_est_sym, curr_poses_gt[j])
                    cur_rot_rst[j, 0] = r_dist_est
                    cur_trans_rst[j, 0] = t_dist_est

                for thresh_idx in range(num_metric):
                    rot_acc[cls_idx, iter_i, thresh_idx] = np.mean(
                        cur_rot_rst < rot_thresh_list[thresh_idx])
                    trans_acc[cls_idx, iter_i, thresh_idx] = np.mean(
                        cur_trans_rst < trans_thresh_list[thresh_idx])
                    space_acc[cls_idx, iter_i, thresh_idx] = np.mean(
                        np.logical_and(
                            cur_rot_rst < rot_thresh_list[thresh_idx],
                            cur_trans_rst < trans_thresh_list[thresh_idx]))

            show_list = [1, 4, 9]
            logger.info("------------ {} -----------".format(cls_name))
            logger.info("{:>24}: {:>7}, {:>7}, {:>7}".format(
                "[rot_thresh, trans_thresh", "RotAcc", "TraAcc", "SpcAcc"))
            for iter_i in range(num_iter):
                logger.info("** iter {} **".format(iter_i + 1))
                logger.info("{:<16}{:>8}: {:>7.2f}, {:>7.2f}, {:>7.2f}".format(
                    "average_accuracy",
                    "[{:>2}, {:>4}]".format(-1, -1),
                    np.mean(rot_acc[cls_idx, iter_i, :]) * 100,
                    np.mean(trans_acc[cls_idx, iter_i, :]) * 100,
                    np.mean(space_acc[cls_idx, iter_i, :]) * 100,
                ))
                for i, show_idx in enumerate(show_list):
                    logger.info(
                        "{:>16}{:>8}: {:>7.2f}, {:>7.2f}, {:>7.2f}".format(
                            "average_accuracy",
                            "[{:>2}, {:>4}]".format(
                                rot_thresh_list[show_idx],
                                trans_thresh_list[show_idx]),
                            rot_acc[cls_idx, iter_i, show_idx] * 100,
                            trans_acc[cls_idx, iter_i, show_idx] * 100,
                            space_acc[cls_idx, iter_i, show_idx] * 100,
                        ))
        print(" ")
        # overall performance
        for iter_i in range(num_iter):
            show_list = [1, 4, 9]
            logger.info(
                "---------- performance over {} classes -----------".format(
                    num_valid_class))
            logger.info("** iter {} **".format(iter_i + 1))
            logger.info("{:>24}: {:>7}, {:>7}, {:>7}".format(
                "[rot_thresh, trans_thresh", "RotAcc", "TraAcc", "SpcAcc"))
            logger.info("{:<16}{:>8}: {:>7.2f}, {:>7.2f}, {:>7.2f}".format(
                "average_accuracy",
                "[{:>2}, {:>4}]".format(-1, -1),
                np.sum(rot_acc[:, iter_i, :]) /
                (num_valid_class * num_metric) * 100,
                np.sum(trans_acc[:, iter_i, :]) /
                (num_valid_class * num_metric) * 100,
                np.sum(space_acc[:, iter_i, :]) /
                (num_valid_class * num_metric) * 100,
            ))
            for i, show_idx in enumerate(show_list):
                logger.info("{:>16}{:>8}: {:>7.2f}, {:>7.2f}, {:>7.2f}".format(
                    "average_accuracy",
                    "[{:>2}, {:>4}]".format(rot_thresh_list[show_idx],
                                            trans_thresh_list[show_idx]),
                    np.sum(rot_acc[:, iter_i, show_idx]) / num_valid_class *
                    100,
                    np.sum(trans_acc[:, iter_i, show_idx]) / num_valid_class *
                    100,
                    np.sum(space_acc[:, iter_i, show_idx]) / num_valid_class *
                    100,
                ))
            print(" ")
示例#33
0
def test():
    client = WindmillTestClient(__name__)
    for klass in suite_classes:
        suite = klass(client)
        suite.run()
    logger.info("Finished with test run")
示例#34
0
def resolve_with_youtube_dl(url, parameters, action):
    youtube_dl_resolver = YoutubeDL(parameters)
    youtube_dl_resolver.add_default_info_extractors()
    try:
        result = youtube_dl_resolver.extract_info(url, download=False)
        if result is None:
            result = {}
    except Exception as e:
        logger.error(u'Error with YoutubeDL: %s' % e)
        result = {}
    logger.info(u'YoutubeDL full result: %s' % result)
    if 'entries' in result:
        logger.info(u'Playlist resolved by YoutubeDL: %s items' % len(result['entries']))
        item_list = []
        for entry in result['entries']:
            if entry is not None and 'url' in entry:
                item_list.append(entry)
                logger.info(u'Media found: %s' % entry['url'])
        if len(item_list) > 0:
            utils.play_items(item_list, action)
            return True
        else:
            logger.info(u'No playable urls in the playlist')
    if 'url' in result:
        logger.info(u'Url resolved by YoutubeDL: %s' % result['url'])
        utils.play_url(result['url'], action, result)
        return True
    if 'requested_formats' in result:
        if have_adaptive_plugin:
            logger.info(u'Adaptive plugin enabled looking for dash content')
            for entry in result['requested_formats']:
                if 'container' in entry and 'manifest_url' in entry:
                    if 'dash' in entry['container']:
                        logger.info(u'Url resolved by YoutubeDL: %s' % entry['manifest_url'])
                        utils.play_url(entry['manifest_url'], action, result, True)
                        return True
        for entry in result['requested_formats']:
            if 'protocol' in entry and 'manifest_url' in entry:
                if 'm3u8' in entry['protocol']:
                    logger.info(u'Url resolved by YoutubeDL: %s' % entry['manifest_url'])
                    utils.play_url(entry['manifest_url'], action, result)
                    return True
    return False
示例#35
0
def handle_unresolved_url(data, action):
    url = unquote(data)
    logger.info(u'Trying to resolve URL (%s): %s' % (action, url))
    if xbmc.Player().isPlaying():
        utils.show_info_notification(utils.translation(32007), 1000)
    else:
        utils.show_info_notification(utils.translation(32007))
    if 'youtube.com' in url or 'youtu.be' in url:
        youtube_addon = xbmcaddon.Addon(id="plugin.video.youtube")
        if youtube_addon:
            if youtube_addon.getSetting("kodion.video.quality.mpd") == "true":
                logger.info(u'Youtube addon have DASH enabled use it')
                utils.play_url('plugin://plugin.video.youtube/uri2addon/?uri=%s' % url, action)
                return
    logger.info(u'Trying to resolve with YoutubeDL')
    result = resolve_with_youtube_dl(url, {'format': 'best', 'no_color': 'true', 'ignoreerrors': 'true'}, action)
    if result:
        return
    # Second pass with new params to fix site like reddit dash streams
    logger.info(u'Trying to resolve with YoutubeDL other options')
    result = resolve_with_youtube_dl(url, {'format': 'bestvideo+bestaudio/best', 'no_color': 'true', 'ignoreerrors': 'true'}, action)
    if result:
        return
    logger.error(u'Url not resolved by YoutubeDL')

    if utils.is_python_3():
        logger.info(u'Skipping urlResolver as running on Python 3')
    else:
        logger.info(u'Trying to resolve with urlResolver')
        stream_url = urlresolver.HostedMediaFile(url=url).resolve()
        if stream_url:
            logger.info(u'Url resolved by urlResolver: %s' % stream_url)
            utils.play_url(stream_url, action)
            return

    logger.info(u'Trying to play as basic url')
    utils.play_url(url, action)
    if url:
        utils.show_error_notification(utils.translation(32006))
示例#36
0
# -*- coding: utf-8 -*-
import sys

sys.argv.insert(
    1,
    0)  # Stupid hack as calling scripts from JSON does not add script handle

import xbmcgui
import xbmcaddon
from lib import share, stream, utils
from lib.utils import logger, translation

logger.info("Starting script version: %s", utils.ADDON_VERSION)

argument = {}
for arg in sys.argv[2:]:
    argInfo = arg.split('=')
    argument[argInfo[0]] = argInfo[1]

logger.info("Parameters: %s" % argument)

commands = {'share': share.run, 'stream': stream.run}

if 'action' not in argument:
    xbmcaddon.Addon().openSettings()
else:
    if argument['action'] in commands:
        commands[argument['action']](argument)
    else:
        logger.error("Command not supported: %s" % argument['action'])
        xbmcgui.Dialog().ok(utils.ADDON_NAME, translation(32004),