Ejemplo n.º 1
0
  def _PrintInfoAboutBucketListingRef(self, bucket_listing_ref):
    """Print listing info for given bucket_listing_ref.

    Args:
      bucket_listing_ref: BucketListing being listed.

    Returns:
      Tuple (number of objects, object size)

    Raises:
      Exception: if calling bug encountered.
    """
    obj = bucket_listing_ref.root_object
    url_str = bucket_listing_ref.url_string
    if (obj.metadata and
        S3_DELETE_MARKER_GUID in obj.metadata.additionalProperties):
      size_string = '0'
      num_bytes = 0
      num_objs = 0
      url_str += '<DeleteMarker>'
    else:
      size_string = (MakeHumanReadable(obj.size)
                     if self.human_readable else str(obj.size))
      num_bytes = obj.size
      num_objs = 1

    if not self.summary_only:
      url_detail = '{size:<11}  {url}{ending}'.format(
          size=size_string,
          url=six.ensure_text(url_str),
          ending=six.ensure_text(self.line_ending))
      print_to_fd(url_detail, file=sys.stdout, end='')

    return (num_objs, num_bytes)
Ejemplo n.º 2
0
 def test_ensure_text(self):
     converted_unicode = six.ensure_text(self.UNICODE_EMOJI, encoding='utf-8', errors='strict')
     converted_binary = six.ensure_text(self.BINARY_EMOJI, encoding="utf-8", errors='strict')
     if six.PY2:
         # PY2: unicode -> unicode
         assert converted_unicode == self.UNICODE_EMOJI and isinstance(converted_unicode, unicode)
         # PY2: str -> unicode
         assert converted_binary == self.UNICODE_EMOJI and isinstance(converted_unicode, unicode)
     else:
         # PY3: str -> str
         assert converted_unicode == self.UNICODE_EMOJI and isinstance(converted_unicode, str)
         # PY3: bytes -> str
         assert converted_binary == self.UNICODE_EMOJI and isinstance(converted_unicode, str)
 def testMatchingAllObjects(self):
   """Tests matching all objects, based on wildcard."""
   actual_obj_uri_strs = set(
       six.ensure_text(str(u)) for u in self._test_wildcard_iterator(
           self.test_bucket0_uri.clone_replace_name('**')).IterAll(
               expand_top_level_buckets=True))
   self.assertEqual(self.test_bucket0_obj_uri_strs, actual_obj_uri_strs)
def GetCredentialStoreKey(credentials, api_version):
  """Disambiguates a credential for caching in a credential store.

  Different credential types have different fields that identify them.  This
  function assembles relevant information in a string to be used as the key for
  accessing a credential.  Note that in addition to uniquely identifying the
  entity to which a credential corresponds, we must differentiate between two or
  more of that entity's credentials that have different attributes such that the
  credentials should not be treated as interchangeable, e.g. if they target
  different API versions (happens for developers targeting different test
  environments), have different private key IDs (for service account JSON
  keyfiles), or target different provider token (refresh) URIs.

  Args:
    credentials: An OAuth2Credentials object.
    api_version: JSON API version being used.

  Returns:
    A string that can be used as the key to identify a credential, e.g.
    "v1-909320924072.apps.googleusercontent.com-1/rEfrEshtOkEn-https://..."
  """
  # Note: We don't include the scopes as part of the key. For a user credential
  # object, we always construct it with manually added scopes that are necessary
  # to negotiate reauth challenges - those scopes don't necessarily correspond
  # to the scopes the refresh token was created with. We avoid key name
  # mismatches for the same refresh token by not including scopes in the key
  # string.
  key_parts = [api_version]
  if isinstance(credentials, devshell.DevshellCredentials):
    key_parts.append(credentials.user_email)
  elif isinstance(credentials, ServiceAccountCredentials):
    # pylint: disable=protected-access
    key_parts.append(credentials._service_account_email)
    if getattr(credentials, '_private_key_id', None):  # JSON keyfile.
      # Differentiate between two different JSON keyfiles for the same service
      # account.
      key_parts.append(credentials._private_key_id)
    elif getattr(credentials, '_private_key_pkcs12', None):  # P12 keyfile
      # Use a prefix of the Base64-encoded PEM string to differentiate it from
      # others. Using a prefix of reasonable length prevents the key from being
      # unnecessarily large, and the likelihood of having two PEM strings with
      # the same prefixes is sufficiently low.
      key_parts.append(base64.b64encode(credentials._private_key_pkcs12)[:20])
    # pylint: enable=protected-access
  elif isinstance(credentials, oauth2client.client.OAuth2Credentials):
    if credentials.client_id and credentials.client_id != 'null':
      key_parts.append(credentials.client_id)
    else:
      key_parts.append('noclientid')
    key_parts.append(credentials.refresh_token or 'norefreshtoken')

  # If a cached credential is targeting provider token URI 'A' for token refresh
  # requests, then the user changes their boto file or private key file to
  # target URI 'B', we don't want to treat the cached and the new credential as
  # interchangeable.  This applies for all credentials that store a token URI.
  if getattr(credentials, 'token_uri', None):
    key_parts.append(credentials.token_uri)
  key_parts = [six.ensure_text(part) for part in key_parts]
  return '-'.join(key_parts)
Ejemplo n.º 5
0
  def tearDown(self):
    super(GsUtilUnitTestCase, self).tearDown()

    self.root_logger.handlers = self.log_handlers_save
    self.temp_log_handler.flush()
    self.temp_log_handler.close()
    self.log_handler_stream.seek(0)
    log_output = self.log_handler_stream.read()
    self.log_handler_stream.close()
    os.unlink(self.log_handler_file)

    sys.stdout.seek(0)
    sys.stderr.seek(0)
    if six.PY2:
      stdout = sys.stdout.read()
      stderr = sys.stderr.read()
    else:
      try:
        stdout = sys.stdout.read()
        stderr = sys.stderr.read()
      except UnicodeDecodeError:
        sys.stdout.seek(0)
        sys.stderr.seek(0)
        stdout = sys.stdout.buffer.read()
        stderr = sys.stderr.buffer.read()
    stdout += b''.join(self.accumulated_stdout)
    stderr += ''.join(self.accumulated_stderr)
    sys.stdout.close()
    sys.stderr.close()
    sys.stdout = self.stdout_save
    sys.stderr = self.stderr_save
    os.unlink(self.stdout_file)
    os.unlink(self.stderr_file)

    _id = six.ensure_text(self.id())
    if self.is_debugging and stdout:
      sys.stderr.write('==== stdout {} ====\n'.format(_id))
      sys.stderr.write(stdout)
      sys.stderr.write('==== end stdout ====\n')
    if self.is_debugging and stderr:
      sys.stderr.write('==== stderr {} ====\n'.format(_id))
      sys.stderr.write(stderr)
      sys.stderr.write('==== end stderr ====\n')
    if self.is_debugging and log_output:
      sys.stderr.write('==== log output {} ====\n'.format(_id))
      sys.stderr.write(log_output)
      sys.stderr.write('==== end log output ====\n')
Ejemplo n.º 6
0
def log_url(response, **kwargs):
    """Response hook to log request URL."""
    request = response.request
    log.debug(
        '{method} URL: {url} [Status: {status}]', {
            'method': request.method,
            'url': request.url,
            'status': response.status_code,
        }
    )
    log.debug('User-Agent: {}'.format(request.headers['User-Agent']))

    if request.method.upper() == 'POST':
        if request.body:
            text_body = ensure_text(request.body, errors='replace')
            if 'multipart/form-data' not in request.headers.get('content-type', ''):
                body = text_body
            elif len(text_body) > 99:
                body = text_body[0:99].replace('\n', ' ') + '...'
            else:
                body = text_body.replace('\n', ' ')

            log.debug('With post data: {0}', body)
Ejemplo n.º 7
0
def GetOutputOpNames(graph,
                     inference_graph_proto,
                     subgraphs=None,
                     preserve_colocation_nodes=True,
                     preserve_saver_restore_nodes=False,
                     preserve_extra_ops=None):
    """Gets output op names from an inference graph.

  Args:
    graph: The tf graph.
    inference_graph_proto: an InferenceGraph proto.
    subgraphs: an optional list of subgraph names. If provided, only output ops
      from these subgraphs are preserved. Otherwise, all subgraphs are included.
    preserve_colocation_nodes: a Python bool, default to True. Preserves nodes
      colocating with the closure of output ops in the returned array.
    preserve_saver_restore_nodes: a Python bool, default to True. Preserves
      nodes for restoring according to inference_graph_proto.saver_def.
    preserve_extra_ops: an optional list of extra op names to preserve as long
      as they present in the graph.

  Returns:
    Array of tf op names that should be preserved in the graph.
  """
    output_op_names = set()

    def _GetOpName(tensor_or_op_name):
        """Returns the op name of the given node name."""
        # Tensor names have format <op_name>:<output_index>. Some inference
        # graphs put tensors and others put ops in the feeds/fetches (depends
        # on how it is used). We differentiate here. We still do the lookup in
        # the graph to sanity check (versus relying on the text manipulation).
        # If this logic ever breaks, TensorFlow will raise a ValueError with
        # a description of the syntax of each.
        if re.search(r':[0-9]+$', tensor_or_op_name):
            # Tensor-name.
            t = graph.get_tensor_by_name(tensor_or_op_name)
            return t.op.name
        else:
            op = graph.get_operation_by_name(tensor_or_op_name)
            return op.name

    for subgraph_name, subgraph in six.iteritems(
            inference_graph_proto.subgraphs):
        if subgraphs and subgraph_name not in subgraphs:
            tf.logging.info('Skip subgraph %s.', subgraph_name)
            continue
        # Sometimes feeds aren't connected to any outputs but keep them in the graph
        # anyways to avoid errors.
        for tensor_or_op_name in (list(subgraph.feeds.values()) +
                                  list(subgraph.fetches.values())):
            output_op_names.add(_GetOpName(tensor_or_op_name))

    if preserve_saver_restore_nodes:
        # Only nodes for restoring is preserved. saver_def.save_tensor_name is
        # skipped because it's only used for saving.
        saver_def = inference_graph_proto.saver_def
        output_op_names.add(_GetOpName(saver_def.filename_tensor_name))
        output_op_names.add(_GetOpName(saver_def.restore_op_name))

    if not preserve_colocation_nodes and not preserve_extra_ops:
        return sorted(list(output_op_names))

    # We also need to preserve any nodes that are used for colocation.
    # E.g., a node may have this attr:
    #   attr {
    #     key: "_class"
    #     value {
    #       list {
    #         s: "loc:@inference/embedding_lookup/Read/ReadVariableOp"
    #       }
    #     }
    #   }
    #
    # In this case, we need to make sure the node
    # inference/embedding_lookup/Read/ReadVariableOp is not pruned.
    #
    # TODO(zhifengc): It's possible that it's better to fix in
    # tf.graph_util.extract_sub_graph.
    graph_def = tf.graph_util.extract_sub_graph(graph.as_graph_def(),
                                                list(output_op_names))
    reachable_vars = [node.name for node in graph_def.node]

    for node in graph.get_operations():
        if preserve_extra_ops and node.name in preserve_extra_ops:
            output_op_names.add(node.name)
        elif preserve_colocation_nodes and '_class' in node.node_def.attr:
            for loc in node.node_def.attr['_class'].list.s:
                loc = six.ensure_text(loc, 'utf-8')
                if loc.startswith('loc:@'):
                    loc_name = loc[5:]
                    if loc_name not in reachable_vars:
                        # Skip nodes that cannot be reached from the pruned graph.
                        continue
                    output_op_names.add(node.name)

    return sorted(list(output_op_names))
Ejemplo n.º 8
0
    def main(self):
        actions = (self.options.remove_duplicates
                   or self.options.merge_potmsgsets
                   or self.options.merge_translationmessages)

        if not actions:
            raise LaunchpadScriptFailure(
                "Select at least one action: remove duplicates, merge "
                "POTMsgSets, and/or merge TranslationMessages.")

        if self.options.product and self.options.distribution:
            raise LaunchpadScriptFailure(
                "Merge a product or a distribution, but not both.")

        if not (self.options.product or self.options.distribution):
            raise LaunchpadScriptFailure(
                "Specify a product or distribution to merge.")

        if self.options.sourcepackage and not self.options.distribution:
            raise LaunchpadScriptFailure(
                "Selecting a package only makes sense for distributions.")

        if self.options.product:
            product = getUtility(IProductSet).getByName(self.options.product)
            distribution = None
            if product is None:
                raise LaunchpadScriptFailure("Unknown product: '%s'" %
                                             self.options.product)
        else:
            product = None
            # import here to avoid circular import.
            from lp.registry.interfaces.distribution import IDistributionSet
            distribution = getUtility(IDistributionSet).getByName(
                self.options.distribution)
            if distribution is None:
                raise LaunchpadScriptFailure("Unknown distribution: '%s'" %
                                             self.options.distribution)

        if self.options.sourcepackage is None:
            sourcepackagename = None
        else:
            sourcepackagename = getUtility(ISourcePackageNameSet).queryByName(
                self.options.sourcepackage)
            if sourcepackagename is None:
                raise LaunchpadScriptFailure(
                    "Unknown source package name: '%s'" %
                    self.options.sourcepackage)

        self._setUpUtilities()

        subset = self.template_set.getSharingSubset(
            product=product,
            distribution=distribution,
            sourcepackagename=sourcepackagename)
        template_regex = self.options.template_names
        if template_regex is not None:
            template_regex = six.ensure_text(template_regex)
        equivalence_classes = subset.groupEquivalentPOTemplates(template_regex)

        class_count = len(equivalence_classes)
        log.info("Merging %d template equivalence classes." % class_count)

        tm = TransactionManager(self.txn, self.options.dry_run)
        for number, name in enumerate(sorted(equivalence_classes.iterkeys())):
            templates = equivalence_classes[name]
            log.info(
                "Merging equivalence class '%s': %d template(s) (%d / %d)" %
                (name, len(templates), number + 1, class_count))
            log.debug("Templates: %s" % str(templates))
            merger = TranslationMerger(templates, tm)
            if self.options.remove_duplicates:
                log.info("Removing duplicate messages.")
                merger.removeDuplicateMessages()
                tm.endTransaction(intermediate=True)

            if self.options.merge_potmsgsets:
                log.info("Merging POTMsgSets.")
                merger.mergePOTMsgSets()
                tm.endTransaction(intermediate=True)

            if self.options.merge_translationmessages:
                log.info("Merging TranslationMessages.")
                merger.mergeTranslationMessages()

            tm.endTransaction()

        log.info("Done.")
Ejemplo n.º 9
0
 def list_to_str(iterable):
     return [six.ensure_text(str(i)) for i in iterable]
Ejemplo n.º 10
0
sys.path.insert(0, os.path.join(here, "third_party", "html5lib"))
sys.path.insert(0, os.path.join(here, "third_party", "zipp"))
sys.path.insert(0, os.path.join(here, "third_party", "more-itertools"))
sys.path.insert(0, os.path.join(here, "third_party", "packaging"))
sys.path.insert(0, os.path.join(here, "third_party", "pathlib2"))
sys.path.insert(0, os.path.join(here, "third_party", "pluggy", "src"))
sys.path.insert(0, os.path.join(here, "third_party", "py"))
sys.path.insert(0, os.path.join(here, "third_party", "pytest"))
sys.path.insert(0, os.path.join(here, "third_party", "pytest", "src"))
sys.path.insert(0, os.path.join(here, "third_party", "pytest-asyncio"))
sys.path.insert(0, os.path.join(here, "third_party", "six"))
sys.path.insert(0, os.path.join(here, "third_party", "webencodings"))
sys.path.insert(0, os.path.join(here, "third_party", "h2"))
sys.path.insert(0, os.path.join(here, "third_party", "hpack"))
sys.path.insert(0, os.path.join(here, "third_party", "hyperframe"))
sys.path.insert(0, os.path.join(here, "third_party", "certifi"))
sys.path.insert(0, os.path.join(here, "third_party", "hyper"))
sys.path.insert(0, os.path.join(here, "third_party", "websockets", "src"))
sys.path.insert(0, os.path.join(here, "third_party", "iniconfig", "src"))
if sys.version_info < (3, 8):
    sys.path.insert(0, os.path.join(here, "third_party", "importlib_metadata"))
sys.path.insert(0, os.path.join(here, "webdriver"))
sys.path.insert(0, os.path.join(here, "wptrunner"))

if sys.version_info[0] == 2:
    sys.path.insert(0, os.path.join(here, "third_party", "enum"))

# We can't import six until we've set the path above.
from six import ensure_text
repo_root = ensure_text(os.path.abspath(os.path.join(here, os.pardir)))
Ejemplo n.º 11
0
 def register_destruction(x):
   destructions.append(
       x if isinstance(x, str) else six.ensure_text(x, 'utf-8'))
   return 0
Ejemplo n.º 12
0
 def __str__(self):
     msg = self.message
     if not isinstance(msg, str):
         msg = str(msg)
     return six.ensure_text(msg)
Ejemplo n.º 13
0
def mozharness_test_on_generic_worker(config, job, taskdesc):
    run = job['run']
    test = taskdesc['run']['test']
    mozharness = test['mozharness']
    worker = taskdesc['worker'] = job['worker']

    bitbar_script = 'test-linux.sh'

    is_macosx = worker['os'] == 'macosx'
    is_windows = worker['os'] == 'windows'
    is_linux = worker['os'] == 'linux' or worker['os'] == 'linux-bitbar'
    is_bitbar = worker['os'] == 'linux-bitbar'
    assert is_macosx or is_windows or is_linux

    artifacts = [
        {
            'name': 'public/logs',
            'path': 'logs',
            'type': 'directory'
        },
    ]

    # jittest doesn't have blob_upload_dir
    if test['test-name'] != 'jittest':
        artifacts.append({
            'name': 'public/test_info',
            'path': 'build/blobber_upload_dir',
            'type': 'directory'
        })

    if is_bitbar:
        artifacts = [
            {
                'name': 'public/test/',
                'path': 'artifacts/public',
                'type': 'directory'
            },
            {
                'name': 'public/logs/',
                'path': 'workspace/logs',
                'type': 'directory'
            },
            {
                'name': 'public/test_info/',
                'path': 'workspace/build/blobber_upload_dir',
                'type': 'directory'
            },
        ]

    if 'installer-url' in mozharness:
        installer_url = mozharness['installer-url']
    else:
        upstream_task = '<build-signing>' if mozharness[
            'requires-signed-builds'] else '<build>'
        installer_url = get_artifact_url(upstream_task,
                                         mozharness['build-artifact-name'])

    worker['os-groups'] = test['os-groups']

    # run-as-administrator is a feature for workers with UAC enabled and as such should not be
    # included in tasks on workers that have UAC disabled. Currently UAC is only enabled on
    # gecko Windows 10 workers, however this may be subject to change. Worker type
    # environment definitions can be found in https://github.com/mozilla-releng/OpenCloudConfig
    # See https://docs.microsoft.com/en-us/windows/desktop/secauthz/user-account-control
    # for more information about UAC.
    if test.get('run-as-administrator', False):
        if job['worker-type'].startswith('t-win10-64'):
            worker['run-as-administrator'] = True
        else:
            raise Exception('run-as-administrator not supported on {}'.format(
                job['worker-type']))

    if test['reboot']:
        raise Exception('reboot: {} not supported on generic-worker'.format(
            test['reboot']))

    worker['max-run-time'] = test['max-run-time']
    worker['retry-exit-status'] = test['retry-exit-status']
    worker['artifacts'] = artifacts

    env = worker.setdefault('env', {})
    env['GECKO_HEAD_REPOSITORY'] = config.params['head_repository']
    env['GECKO_HEAD_REV'] = config.params['head_rev']

    # this list will get cleaned up / reduced / removed in bug 1354088
    if is_macosx:
        env.update({
            'LC_ALL': 'en_US.UTF-8',
            'LANG': 'en_US.UTF-8',
            'MOZ_NODE_PATH': '/usr/local/bin/node',
            'PATH': '/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin',
            'SHELL': '/bin/bash',
        })
    elif is_bitbar:
        env.update({
            'MOZHARNESS_CONFIG': ' '.join(mozharness['config']),
            'MOZHARNESS_SCRIPT': mozharness['script'],
            'MOZHARNESS_URL': {
                'artifact-reference': '<build/public/build/mozharness.zip>'
            },
            'MOZILLA_BUILD_URL': {
                'task-reference': installer_url
            },
            "MOZ_NO_REMOTE": '1',
            "NEED_XVFB": "false",
            "XPCOM_DEBUG_BREAK": 'warn',
            "NO_FAIL_ON_TEST_ERRORS": '1',
            "MOZ_HIDE_RESULTS_TABLE": '1',
            "MOZ_NODE_PATH": "/usr/local/bin/node",
            'TASKCLUSTER_WORKER_TYPE': job['worker-type'],
        })

    extra_config = {
        'installer_url': installer_url,
        'test_packages_url': test_packages_url(taskdesc),
    }
    env['EXTRA_MOZHARNESS_CONFIG'] = {
        'task-reference': six.ensure_text(json.dumps(extra_config))
    }

    if is_windows:
        mh_command = [
            'c:\\mozilla-build\\python\\python.exe', '-u',
            'mozharness\\scripts\\' + normpath(mozharness['script'])
        ]
    elif is_bitbar:
        mh_command = ['bash', "./{}".format(bitbar_script)]
    elif is_macosx and 'macosx1014-64' in test['test-platform']:
        mh_command = [
            '/usr/local/bin/python2', '-u',
            'mozharness/scripts/' + mozharness['script']
        ]
    else:
        # is_linux or is_macosx
        mh_command = [
            # Using /usr/bin/python2.7 rather than python2.7 because
            # /usr/local/bin/python2.7 is broken on the mac workers.
            # See bug #1547903.
            '/usr/bin/python2.7',
            '-u',
            'mozharness/scripts/' + mozharness['script']
        ]

    for mh_config in mozharness['config']:
        cfg_path = 'mozharness/configs/' + mh_config
        if is_windows:
            cfg_path = normpath(cfg_path)
        mh_command.extend(['--cfg', cfg_path])
    mh_command.extend(mozharness.get('extra-options', []))
    if mozharness.get('download-symbols'):
        if isinstance(mozharness['download-symbols'], text_type):
            mh_command.extend(
                ['--download-symbols', mozharness['download-symbols']])
        else:
            mh_command.extend(['--download-symbols', 'true'])
    if mozharness.get('include-blob-upload-branch'):
        mh_command.append('--blob-upload-branch=' + config.params['project'])

    if test.get('test-manifests'):
        env['MOZHARNESS_TEST_PATHS'] = six.ensure_text(
            json.dumps({test['suite']: test['test-manifests']}))

    # TODO: remove the need for run['chunked']
    elif mozharness.get('chunked') or test['chunks'] > 1:
        mh_command.append('--total-chunk={}'.format(test['chunks']))
        mh_command.append('--this-chunk={}'.format(test['this-chunk']))

    if config.params.is_try():
        env['TRY_COMMIT_MSG'] = config.params['message']

    worker['mounts'] = [{
        'directory': '.',
        'content': {
            'artifact': get_artifact_path(taskdesc, 'mozharness.zip'),
            'task-id': {
                'task-reference': '<build>'
            }
        },
        'format': 'zip'
    }]
    if is_bitbar:
        a_url = config.params.file_url(
            'taskcluster/scripts/tester/{}'.format(bitbar_script), )
        worker['mounts'] = [{
            'file': bitbar_script,
            'content': {
                'url': a_url,
            },
        }]

    job['run'] = {
        'workdir': run['workdir'],
        'tooltool-downloads': mozharness['tooltool-downloads'],
        'checkout': test['checkout'],
        'command': mh_command,
        'using': 'run-task',
    }
    if is_bitbar:
        job['run']['run-as-root'] = True
    configure_taskdesc_for_run(config, job, taskdesc, worker['implementation'])
Ejemplo n.º 14
0
 def _PrintSummaryLine(self, num_bytes, name):
   size_string = (MakeHumanReadable(num_bytes)
                  if self.human_readable else six.text_type(num_bytes))
   text_util.print_to_fd('{size:<11}  {name}'.format(
       size=size_string, name=six.ensure_text(name)),
                         end=self.line_ending)
Ejemplo n.º 15
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                raise Exception()

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|\.|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link,
                          url).replace('%3A+',
                                       '-').replace('+',
                                                    '-').replace('--',
                                                                 '-').lower()

            r = client.request(url)
            r = client.parseDOM(r, 'h2', attrs={'class': 'title'})
            r = [
                re.findall('<a class=""\s*href="([^"]*)"\s*title="([^"]*)', i,
                           re.DOTALL)[0] for i in r
            ]

            hostDict = hostprDict + hostDict
            items = []
            for item in r:
                try:
                    t = item[1]
                    t1 = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                        '', t)

                    if not cleantitle.get(t1) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        t)[-1].upper()

                    if not y == hdlr: raise Exception()

                    data = client.request(item[0])
                    data = re.findall('<a href="([^"]*)', data)

                    u = [(t, i) for i in data]
                    items += u

                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    quality, info = source_utils.get_release_quality(
                        name, item[1])

                    url = item[1]
                    if any(x in url for x in [
                            '.rar', '.zip', '.iso', 'www.share-online.biz',
                            'https://ouo.io', 'http://guard.link'
                    ]):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = ensure_text(url)
                    valid, host = source_utils.is_host_valid(url, hostDict)
                    host = client.replaceHTMLCodes(host)
                    host = ensure_text(host)
                    info = ' | '.join(info)
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass
            return sources
        except:
            return sources
Ejemplo n.º 16
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            hostDict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            year = data['year']
            title = cleantitle.get_query(title)
            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else year
            premDate = ''

            query = '%s S%02dE%02d' % ( title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (title, year)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
            query = query.replace(" ", "-")

            _base_link = self.base_link if int(year) >= 2021 else self.old_base_link

            #url = self.search_link % quote_plus(query)
            #url = urljoin(_base_link, url)

            url = _base_link + query

            r = cfScraper.get(url).content
            r = ensure_text(r, errors='replace')

            if r is None and 'tvshowtitle' in data:
                season = re.search('S(.*?)E', hdlr)
                season = season.group(1)
                query = title
                query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
                query = query + "-S" + season
                query = query.replace("&", "and")
                query = query.replace("  ", " ")
                query = query.replace(" ", "-")
                url = _base_link + query
                r = cfScraper.get(url).content
                r = ensure_text(r, errors='replace')

            for loopCount in list(range(0, 2)):
                if loopCount == 1 or (r is None and 'tvshowtitle' in data):

                    #premDate = re.sub('[ \.]', '-', data['premiered'])
                    query = re.sub(r'[\\\\:;*?"<>|/\-\']', '', title)
                    query = query.replace(
                        "&", " and ").replace(
                        "  ", " ").replace(
                        " ", "-")  # throw in extra spaces around & just in case
                    #query = query + "-" + premDate

                    url = _base_link + query
                    url = url.replace('The-Late-Show-with-Stephen-Colbert', 'Stephen-Colbert')

                    r = cfScraper.get(url).content
                    r = ensure_text(r, errors='replace')

                posts = client.parseDOM(r, "div", attrs={"class": "content"})
                #hostDict = hostprDict + hostDict
                items = []
                for post in posts:
                    try:
                        u = client.parseDOM(post, 'a', ret='href')
                        for i in u:
                            try:
                                name = str(i)
                                if hdlr in name.upper():
                                    items.append(name)
                                #elif len(premDate) > 0 and premDate in name.replace(".", "-"):
                                    #items.append(name)
                            except:
                                log_utils.log('RLSBB - Exception', 1)
                                pass
                    except:
                        log_utils.log('RLSBB - Exception', 1)
                        pass

                if len(items) > 0:
                    break

            seen_urls = set()

            for item in items:
                try:
                    info = []

                    url = str(item)
                    url = client.replaceHTMLCodes(url)
                    url = ensure_text(url)

                    if url in seen_urls:
                        continue
                    seen_urls.add(url)

                    host = url.replace("\\", "")
                    host2 = host.strip('"')
                    host = re.findall('([\w]+[.][\w]+)$', urlparse(host2.strip().lower()).netloc)[0]

                    if host not in hostDict:
                        continue
                    if any(x in host2 for x in ['.rar', '.zip', '.iso', '.part']):
                        continue

                    quality, info = source_utils.get_release_quality(host2)

                    #try:
                    #    size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                    #    div = 1 if size.endswith(('GB', 'GiB')) else 1024
                    #    size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div
                    #    size = '%.2f GB' % size
                    #    info.append(size)
                    #except:
                    #    pass

                    info = ' | '.join(info)

                    host = client.replaceHTMLCodes(host)
                    host = ensure_text(host)
                    sources.append({'source': host, 'quality': quality, 'language': 'en',
                                    'url': host2, 'info': info, 'direct': False, 'debridonly': True})
                except:
                    log_utils.log('RLSBB - Exception', 1)
                    pass
            check = [i for i in sources if not i['quality'] == 'CAM']
            if check:
                sources = check
            return sources
        except:
            log_utils.log('RLSBB - Exception', 1)
            return sources
Ejemplo n.º 17
0
 def _PrintPrefixLong(blr):
     text_util.print_to_fd(
         '%-33s%s' % ('', six.ensure_text(blr.url_string)))
Ejemplo n.º 18
0
 def MaybePrintBucketHeader(blr):
     if len(self.args) > 1:
         text_util.print_to_fd('%s:' % six.ensure_text(blr.url_string))
Ejemplo n.º 19
0
def mozharness_test_on_docker(config, job, taskdesc):
    run = job['run']
    test = taskdesc['run']['test']
    mozharness = test['mozharness']
    worker = taskdesc['worker'] = job['worker']

    # apply some defaults
    worker['docker-image'] = test['docker-image']
    worker['allow-ptrace'] = True  # required for all tests, for crashreporter
    worker['loopback-video'] = test['loopback-video']
    worker['loopback-audio'] = test['loopback-audio']
    worker['max-run-time'] = test['max-run-time']
    worker['retry-exit-status'] = test['retry-exit-status']
    if 'android-em-7.0-x86' in test['test-platform']:
        worker['privileged'] = True

    artifacts = [
        # (artifact name prefix, in-image path)
        ("public/logs/", "{workdir}/workspace/logs/".format(**run)),
        ("public/test", "{workdir}/artifacts/".format(**run)),
        ("public/test_info/",
         "{workdir}/workspace/build/blobber_upload_dir/".format(**run)),
    ]

    if 'installer-url' in mozharness:
        installer_url = mozharness['installer-url']
    else:
        installer_url = get_artifact_url('<build>',
                                         mozharness['build-artifact-name'])

    mozharness_url = get_artifact_url(
        '<build>', get_artifact_path(taskdesc, 'mozharness.zip'))

    worker['artifacts'] = [{
        'name':
        prefix,
        'path':
        os.path.join('{workdir}/workspace'.format(**run), path),
        'type':
        'directory',
    } for (prefix, path) in artifacts]

    env = worker.setdefault('env', {})
    env.update({
        'MOZHARNESS_CONFIG': ' '.join(mozharness['config']),
        'MOZHARNESS_SCRIPT': mozharness['script'],
        'MOZILLA_BUILD_URL': {
            'task-reference': installer_url
        },
        'NEED_PULSEAUDIO': 'true',
        'NEED_WINDOW_MANAGER': 'true',
        'ENABLE_E10S': text_type(bool(test.get('e10s'))).lower(),
        'WORKING_DIR': '/builds/worker',
    })

    # Legacy linux64 tests rely on compiz.
    if test.get('docker-image', {}).get('in-tree') == 'desktop1604-test':
        env.update({'NEED_COMPIZ': 'true'})

    # Bug 1602701/1601828 - use compiz on ubuntu1804 due to GTK asynchiness
    # when manipulating windows.
    if test.get('docker-image', {}).get('in-tree') == 'ubuntu1804-test':
        if ('wdspec' in job['run']['test']['suite']
                or ('marionette' in job['run']['test']['suite']
                    and 'headless' not in job['label'])):
            env.update({'NEED_COMPIZ': 'true'})

    if mozharness.get('mochitest-flavor'):
        env['MOCHITEST_FLAVOR'] = mozharness['mochitest-flavor']

    if mozharness['set-moz-node-path']:
        env['MOZ_NODE_PATH'] = '/usr/local/bin/node'

    if 'actions' in mozharness:
        env['MOZHARNESS_ACTIONS'] = ' '.join(mozharness['actions'])

    if config.params.is_try():
        env['TRY_COMMIT_MSG'] = config.params['message']

    # handle some of the mozharness-specific options
    if test['reboot']:
        raise Exception('reboot: {} not supported on generic-worker'.format(
            test['reboot']))

    # Support vcs checkouts regardless of whether the task runs from
    # source or not in case it is needed on an interactive loaner.
    support_vcs_checkout(config, job, taskdesc)

    # If we have a source checkout, run mozharness from it instead of
    # downloading a zip file with the same content.
    if test['checkout']:
        env['MOZHARNESS_PATH'] = '{workdir}/checkouts/gecko/testing/mozharness'.format(
            **run)
    else:
        env['MOZHARNESS_URL'] = {'task-reference': mozharness_url}

    extra_config = {
        'installer_url': installer_url,
        'test_packages_url': test_packages_url(taskdesc),
    }
    env['EXTRA_MOZHARNESS_CONFIG'] = {
        'task-reference': six.ensure_text(json.dumps(extra_config))
    }

    command = [
        '{workdir}/bin/test-linux.sh'.format(**run),
    ]
    command.extend(mozharness.get('extra-options', []))

    if test.get('test-manifests'):
        env['MOZHARNESS_TEST_PATHS'] = six.ensure_text(
            json.dumps({test['suite']: test['test-manifests']}))

    # TODO: remove the need for run['chunked']
    elif mozharness.get('chunked') or test['chunks'] > 1:
        command.append('--total-chunk={}'.format(test['chunks']))
        command.append('--this-chunk={}'.format(test['this-chunk']))

    if 'download-symbols' in mozharness:
        download_symbols = mozharness['download-symbols']
        download_symbols = {
            True: 'true',
            False: 'false'
        }.get(download_symbols, download_symbols)
        command.append('--download-symbols=' + download_symbols)

    job['run'] = {
        'workdir': run['workdir'],
        'tooltool-downloads': mozharness['tooltool-downloads'],
        'checkout': test['checkout'],
        'command': command,
        'using': 'run-task',
    }
    configure_taskdesc_for_run(config, job, taskdesc, worker['implementation'])
Ejemplo n.º 20
0
def mozharness_test_on_script_engine_autophone(config, job, taskdesc):
    test = taskdesc['run']['test']
    mozharness = test['mozharness']
    worker = taskdesc['worker']
    is_talos = test['suite'] == 'talos' or test['suite'] == 'raptor'
    if worker['os'] != 'linux':
        raise Exception(
            'os: {} not supported on script-engine-autophone'.format(
                worker['os']))

    if 'installer-url' in mozharness:
        installer_url = mozharness['installer-url']
    else:
        installer_url = get_artifact_url('<build>',
                                         mozharness['build-artifact-name'])
    mozharness_url = get_artifact_url('<build>', 'public/build/mozharness.zip')

    artifacts = [
        # (artifact name prefix, in-image path)
        ("public/test/", "/builds/worker/artifacts"),
        ("public/logs/", "/builds/worker/workspace/build/logs"),
        ("public/test_info/",
         "/builds/worker/workspace/build/blobber_upload_dir"),
    ]

    worker['artifacts'] = [{
        'name': prefix,
        'path': path,
        'type': 'directory',
    } for (prefix, path) in artifacts]

    if test['reboot']:
        worker['reboot'] = test['reboot']

    worker['env'] = env = {
        'GECKO_HEAD_REPOSITORY': config.params['head_repository'],
        'GECKO_HEAD_REV': config.params['head_rev'],
        'MOZHARNESS_CONFIG': ' '.join(mozharness['config']),
        'MOZHARNESS_SCRIPT': mozharness['script'],
        'MOZHARNESS_URL': {
            'task-reference': mozharness_url
        },
        'MOZILLA_BUILD_URL': {
            'task-reference': installer_url
        },
        "MOZ_NO_REMOTE": '1',
        "XPCOM_DEBUG_BREAK": 'warn',
        "NO_FAIL_ON_TEST_ERRORS": '1',
        "MOZ_HIDE_RESULTS_TABLE": '1',
        "MOZ_NODE_PATH": "/usr/local/bin/node",
        'WORKING_DIR': '/builds/worker',
        'WORKSPACE': '/builds/worker/workspace',
        'TASKCLUSTER_WORKER_TYPE': job['worker-type'],
    }

    # for fetch tasks on mobile
    if 'env' in job['worker'] and 'MOZ_FETCHES' in job['worker']['env']:
        env['MOZ_FETCHES'] = job['worker']['env']['MOZ_FETCHES']
        env['MOZ_FETCHES_DIR'] = job['worker']['env']['MOZ_FETCHES_DIR']

    # talos tests don't need Xvfb
    if is_talos:
        env['NEED_XVFB'] = 'false'

    extra_config = {
        'installer_url': installer_url,
        'test_packages_url': test_packages_url(taskdesc),
    }
    env['EXTRA_MOZHARNESS_CONFIG'] = {
        'task-reference': six.ensure_text(json.dumps(extra_config))
    }

    script = 'test-linux.sh'
    worker['context'] = config.params.file_url(
        'taskcluster/scripts/tester/{}'.format(script), )

    command = worker['command'] = ["./{}".format(script)]
    if mozharness.get('include-blob-upload-branch'):
        command.append('--blob-upload-branch=' + config.params['project'])
    command.extend(mozharness.get('extra-options', []))

    if test.get('test-manifests'):
        env['MOZHARNESS_TEST_PATHS'] = six.ensure_text(
            json.dumps({test['suite']: test['test-manifests']}))

    # TODO: remove the need for run['chunked']
    elif mozharness.get('chunked') or test['chunks'] > 1:
        command.append('--total-chunk={}'.format(test['chunks']))
        command.append('--this-chunk={}'.format(test['this-chunk']))

    if 'download-symbols' in mozharness:
        download_symbols = mozharness['download-symbols']
        download_symbols = {
            True: 'true',
            False: 'false'
        }.get(download_symbols, download_symbols)
        command.append('--download-symbols=' + download_symbols)
Ejemplo n.º 21
0
def _group_or_org_update(context, data_dict, is_org=False):
    model = context['model']
    user = context['user']
    session = context['session']
    id = _get_or_bust(data_dict, 'id')

    group = model.Group.get(id)
    context["group"] = group
    if group is None:
        raise NotFound('Group was not found.')

    data_dict['type'] = group.type

    # get the schema
    group_plugin = lib_plugins.lookup_group_plugin(group.type)
    try:
        schema = group_plugin.form_to_db_schema_options({
            'type':
            'update',
            'api':
            'api_version' in context,
            'context':
            context
        })
    except AttributeError:
        schema = group_plugin.form_to_db_schema()

    upload = uploader.get_uploader('group')
    upload.update_data_dict(data_dict, 'image_url', 'image_upload',
                            'clear_upload')

    if is_org:
        _check_access('organization_update', context, data_dict)
    else:
        _check_access('group_update', context, data_dict)

    if 'api_version' not in context:
        # old plugins do not support passing the schema so we need
        # to ensure they still work
        try:
            group_plugin.check_data_dict(data_dict, schema)
        except TypeError:
            group_plugin.check_data_dict(data_dict)

    data, errors = lib_plugins.plugin_validate(
        group_plugin, context, data_dict, schema,
        'organization_update' if is_org else 'group_update')
    log.debug('group_update validate_errs=%r user=%s group=%s data_dict=%r',
              errors, context.get('user'),
              context.get('group').name if context.get('group') else '',
              data_dict)

    if errors:
        session.rollback()
        raise ValidationError(errors)

    contains_packages = 'packages' in data_dict

    group = model_save.group_dict_save(data,
                                       context,
                                       prevent_packages_update=is_org
                                       or not contains_packages)

    if is_org:
        plugin_type = plugins.IOrganizationController
    else:
        plugin_type = plugins.IGroupController

    for item in plugins.PluginImplementations(plugin_type):
        item.edit(group)

    if is_org:
        activity_type = 'changed organization'
    else:
        activity_type = 'changed group'

    activity_dict = {
        'user_id': model.User.by_name(six.ensure_text(user)).id,
        'object_id': group.id,
        'activity_type': activity_type,
    }
    # Handle 'deleted' groups.
    # When the user marks a group as deleted this comes through here as
    # a 'changed' group activity. We detect this and change it to a 'deleted'
    # activity.
    if group.state == u'deleted':
        if session.query(ckan.model.Activity).filter_by(
                object_id=group.id, activity_type='deleted').all():
            # A 'deleted group' activity for this group has already been
            # emitted.
            # FIXME: What if the group was deleted and then activated again?
            activity_dict = None
        else:
            # We will emit a 'deleted group' activity.
            activity_dict['activity_type'] = \
                'deleted organization' if is_org else 'deleted group'
    if activity_dict is not None:
        activity_dict['data'] = {
            'group': dictization.table_dictize(group, context)
        }
        activity_create_context = {
            'model': model,
            'user': user,
            'defer_commit': True,
            'ignore_auth': True,
            'session': session
        }
        _get_action('activity_create')(activity_create_context, activity_dict)
        # TODO: Also create an activity detail recording what exactly changed
        # in the group.

    upload.upload(uploader.get_max_image_size())

    if not context.get('defer_commit'):
        model.repo.commit()

    return model_dictize.group_dictize(group, context)
Ejemplo n.º 22
0
def _Utf8ToBase64(s):
    """Encode a utf-8 string as a base 64 string."""
    return six.ensure_text(base64.b64encode(six.ensure_binary(s)))
  def RunGsUtil(self,
                cmd,
                return_status=False,
                return_stdout=False,
                return_stderr=False,
                expected_status=0,
                stdin=None,
                env_vars=None):
    """Runs the gsutil command.

    Args:
      cmd: The command to run, as a list, e.g. ['cp', 'foo', 'bar']
      return_status: If True, the exit status code is returned.
      return_stdout: If True, the standard output of the command is returned.
      return_stderr: If True, the standard error of the command is returned.
      expected_status: The expected return code. If not specified, defaults to
                       0. If the return code is a different value, an exception
                       is raised.
      stdin: A string of data to pipe to the process as standard input.
      env_vars: A dictionary of variables to extend the subprocess's os.environ
                with.

    Returns:
      If multiple return_* values were specified, this method returns a tuple
      containing the desired return values specified by the return_* arguments
      (in the order those parameters are specified in the method definition).
      If only one return_* value was specified, that value is returned directly
      rather than being returned within a 1-tuple.
    """
    cmd = [
        gslib.GSUTIL_PATH, '--testexceptiontraces', '-o',
        'GSUtil:default_project_id=' + PopulateProjectId()
    ] + cmd
    if stdin is not None:
      if six.PY3:
        if not isinstance(stdin, bytes):
          stdin = stdin.encode(UTF8)
      else:
        stdin = stdin.encode(UTF8)
    # checking to see if test was invoked from a par file (bundled archive)
    # if not, add python executable path to ensure correct version of python
    # is used for testing
    cmd = [str(sys.executable)] + cmd if not InvokedFromParFile() else cmd
    env = os.environ.copy()
    if env_vars:
      env.update(env_vars)
    # Ensuring correct text types
    envstr = dict()
    for k, v in six.iteritems(env):
      envstr[six.ensure_str(k)] = six.ensure_str(v)
    cmd = [six.ensure_str(part) for part in cmd]
    # executing command
    p = subprocess.Popen(cmd,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE,
                         stdin=subprocess.PIPE,
                         env=envstr)
    c_out = p.communicate(stdin)
    try:
      c_out = [six.ensure_text(output) for output in c_out]
    except UnicodeDecodeError:
      c_out = [
          six.ensure_text(output, locale.getpreferredencoding(False))
          for output in c_out
      ]
    stdout = c_out[0].replace(os.linesep, '\n')
    stderr = c_out[1].replace(os.linesep, '\n')
    status = p.returncode

    if expected_status is not None:
      cmd = map(six.ensure_text, cmd)
      self.assertEqual(
          int(status),
          int(expected_status),
          msg='Expected status {}, got {}.\nCommand:\n{}\n\nstderr:\n{}'.format(
              expected_status, status, ' '.join(cmd), stderr))

    toreturn = []
    if return_status:
      toreturn.append(status)
    if return_stdout:
      toreturn.append(stdout)
    if return_stderr:
      toreturn.append(stderr)

    if len(toreturn) == 1:
      return toreturn[0]
    elif toreturn:
      return tuple(toreturn)
Ejemplo n.º 24
0
def build_image(path):
    log.debug("build_image()")

    log.debug("Request Path : {0}".format(path))

    request_path = path[1:]

    if request_path == "favicon.ico":
        return []

    decoded_url = ensure_text(base64.b64decode(request_path))
    log.debug("decoded_url : {0}".format(decoded_url))

    image_urls = get_image_links(decoded_url)

    width, height = 500, 750
    collage = Image.new('RGB', (width, height), (5, 5, 5))

    cols = 2
    rows = 2
    thumbnail_width = int(width / cols)
    thumbnail_height = int(height / rows)
    size = (thumbnail_width, thumbnail_height)
    image_count = 0

    for art in image_urls:

        thumb_url = art.get("thumb")
        if thumb_url:
            url_bits = urlparse(thumb_url.strip())

            host_name = url_bits.hostname
            port = url_bits.port
            url_path = url_bits.path
            url_query = url_bits.query

            server = "%s:%s" % (host_name, port)
            url_full_path = url_path + "?" + url_query

            log.debug("Loading image from : {0} {1} {2}".format(
                image_count, server, url_full_path))

            try:
                image_response = requests.get(thumb_url)
                image_data = image_response.content

                loaded_image = Image.open(io.BytesIO(image_data))
                image = ImageOps.fit(loaded_image,
                                     size,
                                     method=Image.ANTIALIAS,
                                     bleed=0.0,
                                     centering=(0.5, 0.5))

                x = int(image_count % cols) * thumbnail_width
                y = int(image_count / cols) * thumbnail_height
                collage.paste(image, (x, y))

                del loaded_image
                del image
                del image_data

            except Exception as con_err:
                log.debug("Error loading image : {0}".format(con_err))

            image_count += 1

        if image_count == cols * rows:
            break

    del image_urls

    img_byte_arr = io.BytesIO()
    collage.save(img_byte_arr, format='JPEG')
    image_bytes = img_byte_arr.getvalue()

    return image_bytes
Ejemplo n.º 25
0
  def RunCommand(self):
    """Command entry point for the du command."""
    self.line_ending = '\n'
    self.all_versions = False
    self.produce_total = False
    self.human_readable = False
    self.summary_only = False
    self.exclude_patterns = []
    if self.sub_opts:
      for o, a in self.sub_opts:
        if o == '-0':
          self.line_ending = '\0'
        elif o == '-a':
          self.all_versions = True
        elif o == '-c':
          self.produce_total = True
        elif o == '-e':
          self.exclude_patterns.append(a)
        elif o == '-h':
          self.human_readable = True
        elif o == '-s':
          self.summary_only = True
        elif o == '-X':
          if a == '-':
            f = sys.stdin
            f_close = False
          else:
            f = open(a, 'r') if six.PY2 else open(a, 'r', encoding=UTF8)
            f_close = True
          self.exclude_patterns = [six.ensure_text(line.strip()) for line in f]
          if f_close:
            f.close()

    if not self.args:
      # Default to listing all gs buckets.
      self.args = ['gs://']

    total_bytes = 0
    got_nomatch_errors = False

    def _PrintObjectLong(blr):
      return self._PrintInfoAboutBucketListingRef(blr)

    def _PrintNothing(unused_blr=None):
      pass

    def _PrintDirectory(num_bytes, blr):
      if not self.summary_only:
        self._PrintSummaryLine(num_bytes, blr.url_string.encode(UTF8))

    for url_arg in self.args:
      top_level_storage_url = StorageUrlFromString(url_arg)
      if top_level_storage_url.IsFileUrl():
        raise CommandException('Only cloud URLs are supported for %s' %
                               self.command_name)
      bucket_listing_fields = ['size']

      listing_helper = ls_helper.LsHelper(
          self.WildcardIterator,
          self.logger,
          print_object_func=_PrintObjectLong,
          print_dir_func=_PrintNothing,
          print_dir_header_func=_PrintNothing,
          print_dir_summary_func=_PrintDirectory,
          print_newline_func=_PrintNothing,
          all_versions=self.all_versions,
          should_recurse=True,
          exclude_patterns=self.exclude_patterns,
          fields=bucket_listing_fields)

      # LsHelper expands to objects and prefixes, so perform a top-level
      # expansion first.
      if top_level_storage_url.IsProvider():
        # Provider URL: use bucket wildcard to iterate over all buckets.
        top_level_iter = self.WildcardIterator(
            '%s://*' %
            top_level_storage_url.scheme).IterBuckets(bucket_fields=['id'])
      elif top_level_storage_url.IsBucket():
        top_level_iter = self.WildcardIterator(
            '%s://%s' % (top_level_storage_url.scheme,
                         top_level_storage_url.bucket_name)).IterBuckets(
                             bucket_fields=['id'])
      else:
        top_level_iter = [BucketListingObject(top_level_storage_url)]

      for blr in top_level_iter:
        storage_url = blr.storage_url
        if storage_url.IsBucket() and self.summary_only:
          storage_url = StorageUrlFromString(
              storage_url.CreatePrefixUrl(wildcard_suffix='**'))
        _, exp_objs, exp_bytes = listing_helper.ExpandUrlAndPrint(storage_url)
        if (storage_url.IsObject() and exp_objs == 0 and
            ContainsWildcard(url_arg) and not self.exclude_patterns):
          got_nomatch_errors = True
        total_bytes += exp_bytes

        if self.summary_only:
          self._PrintSummaryLine(exp_bytes,
                                 blr.url_string.rstrip('/').encode(UTF8))

    if self.produce_total:
      self._PrintSummaryLine(total_bytes, 'total')

    if got_nomatch_errors:
      raise CommandException('One or more URLs matched no objects.')

    return 0
def main(_):
  input_path = FLAGS.input_path
  output_dir = FLAGS.output_dir
  all_references = []
  overlap_references = []
  nonoverlap_references = []

  parent_prec_tables = []
  parent_rec_tables = []
  overlap_parent_prec_tables = []
  overlap_parent_rec_tables = []
  nonoverlap_parent_prec_tables = []
  nonoverlap_parent_rec_tables = []

  with open(input_path, "r") as input_file:
    for line in input_file:
      line = six.ensure_text(line, "utf-8")
      json_example = json.loads(line)
      multi_reference, multi_overlap_reference, multi_nonoverlap_reference = (
          get_references(json_example, FLAGS.mode))
      all_references.append(multi_reference)
      if multi_overlap_reference:
        overlap_references.append(multi_overlap_reference)
      if multi_nonoverlap_reference:
        nonoverlap_references.append(multi_nonoverlap_reference)

      table = json_example["table"]
      table_page_title = json_example["table_page_title"]
      table_section_title = json_example["table_section_title"]
      table_section_text = json_example["table_section_text"]

      cell_indices = json_example["highlighted_cells"]
      highlighted_subtable = (
          table_to_text_utils.get_highlighted_subtable(
              table=table, cell_indices=cell_indices, with_headers=False))

      # Get PARENT format code.
      table_prec_format = table_to_text_utils.get_table_parent_format(
          table=table,
          table_page_title=table_page_title,
          table_section_title=table_section_title,
          table_section_text=table_section_text)

      table_rec_format = table_to_text_utils.get_subtable_parent_format(
          subtable=highlighted_subtable,
          table_page_title=table_page_title,
          table_section_title=table_section_title)

      parent_prec_tables.append(table_prec_format)
      parent_rec_tables.append(table_rec_format)

      if FLAGS.mode == "dev" or FLAGS.mode == "test":
        if json_example["overlap_subset"]:
          overlap_parent_prec_tables.append(table_prec_format)
          overlap_parent_rec_tables.append(table_rec_format)
        else:
          nonoverlap_parent_prec_tables.append(table_prec_format)
          nonoverlap_parent_rec_tables.append(table_rec_format)

  print("Writing references.")
  all_output_path_base = os.path.join(output_dir, "references")
  overlap_output_path_base = os.path.join(output_dir, "overlap_references")
  nonoverlap_output_path_base = os.path.join(output_dir,
                                             "nonoverlap_references")
  write_references(all_references, all_output_path_base)
  write_references(overlap_references, overlap_output_path_base)
  write_references(nonoverlap_references, nonoverlap_output_path_base)

  print("Writing tables in PARENT format.")

  all_table_prec_path = os.path.join(output_dir,
                                     "tables_parent_precision_format")
  all_table_rec_path = os.path.join(output_dir, "tables_parent_recall_format")
  overlap_table_prec_path = os.path.join(
      output_dir, "overlap_tables_parent_precision_format")
  overlap_table_rec_path = os.path.join(output_dir,
                                        "overlap_tables_parent_recall_format")
  nonoverlap_table_prec_path = os.path.join(
      output_dir, "nonoverlap_tables_parent_precision_format")
  nonoverlap_table_rec_path = os.path.join(
      output_dir, "nonoverlap_tables_parent_recall_format")

  write_table_parent_format(parent_prec_tables, all_table_prec_path)
  write_table_parent_format(parent_rec_tables, all_table_rec_path)
  write_table_parent_format(overlap_parent_prec_tables, overlap_table_prec_path)
  write_table_parent_format(overlap_parent_rec_tables, overlap_table_rec_path)
  write_table_parent_format(nonoverlap_parent_prec_tables,
                            nonoverlap_table_prec_path)
  write_table_parent_format(nonoverlap_parent_rec_tables,
                            nonoverlap_table_rec_path)
Ejemplo n.º 27
0
	def render(self, request):

		@defer.inlineCallbacks
		def _showImage(data):

			@defer.inlineCallbacks
			def _setContentDispositionAndSend(file_path):
				filename = os.path.basename(file_path)
				request.setHeader('content-disposition', 'filename="%s"' % filename)
				request.setHeader('content-type', "image/png")
				f = open(file_path, "rb")
				yield FileSender().beginFileTransfer(f, request)
				f.close()
				defer.returnValue(0)

			if os.path.exists(data):
				yield _setContentDispositionAndSend(data)
			else:
				request.setResponseCode(http.NOT_FOUND)

			request.finish()
			defer.returnValue(0)

		# cache data
		withMainTemplate = self.withMainTemplate
		path = self.path
		isCustom = self.isCustom
		isMobile = self.isMobile
		isImage = self.isImage

		if self.path == "":
			self.path = "index"
		elif self.path == "signal":
			self.path = "tunersignal"
			request.uri = request.uri.replace(b'signal', b'tunersignal')
			request.path = request.path.replace(b'signal', b'tunersignal')

		self.suppresslog = False
		self.path = self.path.replace(".", "")
		if request.path.startswith(b'/api/config'):
			func = getattr(self, "P_config", None)
		elif self.path in self.NoDataRender():
			func = getattr(self, "noData", None)
		else:
			func = getattr(self, "P_" + self.path, None)

		if callable(func):
			request.setResponseCode(http.OK)

			# call prePageLoad function if exist
			plfunc = getattr(self, "prePageLoad", None)
			if callable(plfunc):
				plfunc(request)

			data = func(request)
			if data is None:
				# if not self.suppresslog:
					# print "[OpenWebif] page '%s' without content" % request.uri
				self.error404(request)
			elif self.isCustom:
				# if not self.suppresslog:
					# print "[OpenWebif] page '%s' ok (custom)" % request.uri
				request.write(six.ensure_binary(data))
				request.finish()
			elif self.isImage:
				_showImage(data)
			elif self.isJson:
				request.setHeader("content-type", "application/json; charset=utf-8")
				try:
					return six.ensure_binary(json.dumps(data, indent=1))
				except Exception as exc:
					request.setResponseCode(http.INTERNAL_SERVER_ERROR)
					return six.ensure_binary(json.dumps({"result": False, "request": request.path, "exception": repr(exc)}))
					pass
			elif isinstance(data, str):
				# if not self.suppresslog:
					# print "[OpenWebif] page '%s' ok (simple string)" % request.uri
				request.setHeader("content-type", "text/plain")
				request.write(six.ensure_binary(data))
				request.finish()
			else:
				# print "[OpenWebif] page '%s' ok (cheetah template)" % request.uri
				module = six.ensure_text(request.path)
				if module[-1:] == "/":
					module += "index"
				elif module[-5:] != "index" and self.path == "index":
					module += "/index"
				module = module.strip("/")
				module = module.replace(".", "")
				out = self.loadTemplate(module, self.path, data)
				if out is None:
					print("[OpenWebif] ERROR! Template not found for page '%s'" % request.uri)
					self.error404(request)
				else:
					if self.isMobile:
						head = self.loadTemplate('mobile/head', 'head', [])
						out = head + out
					elif self.withMainTemplate:
						args = self.prepareMainTemplate(request)
						args["content"] = out
						nout = self.loadTemplate("main", "main", args)
						if nout:
							out = nout
					elif self.isGZ:
						return out
					request.write(six.ensure_binary(out))
					request.finish()

		else:
			print("[OpenWebif] page '%s' not found" % request.uri)
			self.error404(request)

		# restore cached data
		self.withMainTemplate = withMainTemplate
		self.path = path
		self.isCustom = isCustom
		self.isMobile = isMobile
		self.isImage = isImage

		return server.NOT_DONE_YET
Ejemplo n.º 28
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None: return sources

            hostDict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            title = cleantitle.get_query(title)
            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s season %d' % (title, int(data['season'])) if 'tvshowtitle' in data else title
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            query = quote_plus(query)

            url = urljoin(self.base_link, self.search_link % query)

            ua = {'User-Agent': client.agent()}
            r = cfScraper.get(url, headers=ua).content
            r = six.ensure_text(r, errors='replace')
            _posts = client.parseDOM(r, 'div', attrs={'class': 'item'})
            posts = []
            for p in _posts:
                try:
                    post = (client.parseDOM(p, 'a', ret='href')[1],
                              client.parseDOM(p, 'a')[1],
                              re.findall(r'Release:\s*?(\d{4})</', p, re.I|re.S)[1])
                    posts.append(post)
                except:
                    pass
            posts = [(i[0], client.parseDOM(i[1], 'i')[0], i[2]) for i in posts if i]

            if 'tvshowtitle' in data:
                sep = 'season %d' % int(data['season'])
                sepi = 'season-%1d/episode-%1d.html' % (int(data['season']), int(data['episode']))
                post = [i[0] for i in posts if sep in i[1].lower()][0]
                data = cfScraper.get(post, headers=ua).content
                data = six.ensure_text(data, errors='replace')
                link = client.parseDOM(data, 'a', ret='href')
                link = [i for i in link if sepi in i][0]
            else:
                link = [i[0] for i in posts if cleantitle.get_title(title) in cleantitle.get_title(i[1]) and hdlr == i[2]][0]

            r = cfScraper.get(link, headers=ua).content
            r = six.ensure_text(r, errors='replace')
            try:
                v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0]
                v = v.encode('utf-8')
                b64 = base64.b64decode(v)
                b64 = six.ensure_text(b64, errors='ignore')
                url = client.parseDOM(b64, 'iframe', ret='src')[0]
                try:
                    host = re.findall('([\w]+[.][\w]+)$', urlparse(url.strip().lower()).netloc)[0]
                    host = client.replaceHTMLCodes(host)
                    host = six.ensure_str(host)
                    valid, hoster = source_utils.is_host_valid(host, hostDict)
                    if valid:
                        sources.append({
                            'source': hoster,
                            'quality': 'SD',
                            'language': 'en',
                            'url': url.replace('\/', '/'),
                            'direct': False,
                            'debridonly': False
                        })
                except:
                    log_utils.log('plockers4 Exception', 1)
                    pass
            except:
                log_utils.log('plockers3 Exception', 1)
                pass
            r = client.parseDOM(r, 'div', {'class': 'server_line'})
            r = [(client.parseDOM(i, 'a', ret='href')[0],
                  client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r]
            if r:
                for i in r:
                    try:
                        host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
                        url = i[0].replace('\/', '/')
                        host = client.replaceHTMLCodes(host)
                        host = six.ensure_str(host)
                        if 'other' in host: continue
                        valid, hoster = source_utils.is_host_valid(host, hostDict)
                        if valid:
                            sources.append({
                                'source': hoster,
                                'quality': 'SD',
                                'language': 'en',
                                'url': url,
                                'direct': False,
                                'debridonly': False
                            })
                    except:
                        log_utils.log('plockers5 Exception', 1)
                        pass
            return sources
        except:
            log_utils.log('plockers Exception', 1)
            return
Ejemplo n.º 29
0
def get_decision_parameters(graph_config, options):
    """
    Load parameters from the command-line options for 'taskgraph decision'.
    This also applies per-project parameters, based on the given project.

    """
    product_dir = graph_config['product-dir']

    parameters = {
        n: options[n]
        for n in [
            'base_repository',
            'head_repository',
            'head_rev',
            'head_ref',
            'project',
            'pushlog_id',
            'pushdate',
            'owner',
            'level',
            'target_tasks_method',
            'tasks_for',
        ] if n in options
    }

    for n in (
            'comm_base_repository',
            'comm_head_repository',
            'comm_head_rev',
            'comm_head_ref',
    ):
        if n in options and options[n] is not None:
            parameters[n] = options[n]

    commit_message = get_hg_commit_message(os.path.join(GECKO, product_dir))

    # Define default filter list, as most configurations shouldn't need
    # custom filters.
    parameters['filters'] = [
        'target_tasks_method',
    ]
    parameters['optimize_target_tasks'] = True
    parameters['existing_tasks'] = {}
    parameters['do_not_optimize'] = []
    parameters['build_number'] = 1
    parameters['version'] = get_version(product_dir)
    parameters['app_version'] = get_app_version(product_dir)
    parameters['message'] = try_syntax_from_message(commit_message)
    parameters['hg_branch'] = get_hg_revision_branch(
        GECKO, revision=parameters['head_rev'])
    parameters['next_version'] = None
    parameters['phabricator_diff'] = None
    parameters['release_type'] = ''
    parameters['release_eta'] = ''
    parameters['release_enable_partners'] = False
    parameters['release_partners'] = []
    parameters['release_partner_config'] = {}
    parameters['release_partner_build_number'] = 1
    parameters['release_enable_emefree'] = False
    parameters['release_product'] = None
    parameters['required_signoffs'] = []
    parameters['signoff_urls'] = {}
    parameters['try_mode'] = None
    parameters['try_task_config'] = {}
    parameters['try_options'] = None

    # owner must be an email, but sometimes (e.g., for ffxbld) it is not, in which
    # case, fake it
    if '@' not in parameters['owner']:
        parameters['owner'] += '@noreply.mozilla.org'

    # use the pushdate as build_date if given, else use current time
    parameters['build_date'] = parameters['pushdate'] or int(time.time())
    # moz_build_date is the build identifier based on build_date
    parameters['moz_build_date'] = six.ensure_text(
        time.strftime("%Y%m%d%H%M%S", time.gmtime(parameters['build_date'])))

    project = parameters['project']
    try:
        parameters.update(PER_PROJECT_PARAMETERS[project])
    except KeyError:
        logger.warning("using default project parameters; add {} to "
                       "PER_PROJECT_PARAMETERS in {} to customize behavior "
                       "for this project".format(project, __file__))
        parameters.update(PER_PROJECT_PARAMETERS['default'])

    # `target_tasks_method` has higher precedence than `project` parameters
    if options.get('target_tasks_method'):
        parameters['target_tasks_method'] = options['target_tasks_method']

    # ..but can be overridden by the commit message: if it contains the special
    # string "DONTBUILD" and this is an on-push decision task, then use the
    # special 'nothing' target task method.
    if 'DONTBUILD' in commit_message and options['tasks_for'] == 'hg-push':
        parameters['target_tasks_method'] = 'nothing'

    if options.get('include_push_tasks'):
        get_existing_tasks(options.get('rebuild_kinds', []), parameters,
                           graph_config)

    # If the target method is nightly, we should build partials. This means
    # knowing what has been released previously.
    # An empty release_history is fine, it just means no partials will be built
    parameters.setdefault('release_history', dict())
    if 'nightly' in parameters.get('target_tasks_method', ''):
        parameters['release_history'] = populate_release_history(
            'Firefox', project)

    if options.get('try_task_config_file'):
        task_config_file = os.path.abspath(options.get('try_task_config_file'))
    else:
        # if try_task_config.json is present, load it
        task_config_file = os.path.join(os.getcwd(), 'try_task_config.json')

    # load try settings
    if 'try' in project and options['tasks_for'] == 'hg-push':
        set_try_config(parameters, task_config_file)

    if options.get('optimize_target_tasks') is not None:
        parameters['optimize_target_tasks'] = options['optimize_target_tasks']

    if 'decision-parameters' in graph_config['taskgraph']:
        find_object(graph_config['taskgraph']['decision-parameters'])(
            graph_config, parameters)

    result = Parameters(**parameters)
    result.check()
    return result
Ejemplo n.º 30
0
def test_create_no_seed(python, creator, isolated, system, coverage_env,
                        special_name_dir, method):
    dest = special_name_dir
    cmd = [
        "-v",
        "-v",
        "-p",
        six.ensure_text(python),
        six.ensure_text(str(dest)),
        "--without-pip",
        "--activators",
        "",
        "--creator",
        creator,
        "--{}".format(method),
    ]
    if isolated == "global":
        cmd.append("--system-site-packages")
    result = run_via_cli(cmd)
    coverage_env()
    if IS_PYPY:
        # pypy cleans up file descriptors periodically so our (many) subprocess calls impact file descriptor limits
        # force a cleanup of these on system where the limit is low-ish (e.g. MacOS 256)
        gc.collect()
    content = list(result.creator.purelib.iterdir())
    assert not content, "\n".join(six.ensure_text(str(i)) for i in content)
    assert result.creator.env_name == six.ensure_text(dest.name)
    debug = result.creator.debug
    sys_path = cleanup_sys_path(debug["sys"]["path"])
    system_sys_path = cleanup_sys_path(system["sys"]["path"])
    our_paths = set(sys_path) - set(system_sys_path)
    our_paths_repr = "\n".join(six.ensure_text(repr(i)) for i in our_paths)

    # ensure we have at least one extra path added
    assert len(our_paths) >= 1, our_paths_repr
    # ensure all additional paths are related to the virtual environment
    for path in our_paths:
        msg = "\n{}\ndoes not start with {}\nhas:\n{}".format(
            six.ensure_text(str(path)),
            six.ensure_text(str(dest)),
            "\n".join(six.ensure_text(str(p)) for p in system_sys_path),
        )
        assert str(path).startswith(str(dest)), msg
    # ensure there's at least a site-packages folder as part of the virtual environment added
    assert any(p for p in our_paths
               if p.parts[-1] == "site-packages"), our_paths_repr

    # ensure the global site package is added or not, depending on flag
    last_from_system_path = next(i for i in reversed(system_sys_path)
                                 if str(i).startswith(system["sys"]["prefix"]))
    if isolated == "isolated":
        assert last_from_system_path not in sys_path
    else:
        common = []
        for left, right in zip(reversed(system_sys_path), reversed(sys_path)):
            if left == right:
                common.append(left)
            else:
                break

        def list_to_str(iterable):
            return [six.ensure_text(str(i)) for i in iterable]

        assert common, "\n".join(
            difflib.unified_diff(list_to_str(sys_path),
                                 list_to_str(system_sys_path)))
def get_version():
    return ensure_text(_read_build_config()["componentsVersion"])
Ejemplo n.º 32
0
    def create(
            cls,
            lock,  # type: SyncLock
            sync,  # type: Union[DownstreamSync, LandingSync]
            affected_tests=None,  # type: Optional[Dict[Text, List[Text]]]
            stability=False,  # type: bool
            hacks=True,  # type: bool
            try_cls=TryFuzzyCommit,  # type: type
            rebuild_count=None,  # type: Optional[int]
            check_open=True,  # type: bool
            **kwargs  # type: Any
    ):
        # type: (...) -> TryPush
        logger.info("Creating try push for PR %s" % sync.pr)
        if check_open and not tree.is_open("try"):
            logger.info("try is closed")
            raise RetryableError(AbortError("Try is closed"))

        # Ensure the required indexes exist
        TaskGroupIndex.get_or_create(sync.git_gecko)
        try_idx = TryCommitIndex.get_or_create(sync.git_gecko)

        git_work = sync.gecko_worktree.get()

        if rebuild_count is None:
            rebuild_count = 0 if not stability else env.config['gecko']['try'][
                'stability_count']
            if not isinstance(rebuild_count, six.integer_types):
                logger.error(
                    "Could not find config for Stability rebuild count, using default 5"
                )
                rebuild_count = 5
        with try_cls(sync.git_gecko,
                     git_work,
                     affected_tests,
                     rebuild_count,
                     hacks=hacks,
                     **kwargs) as c:
            try_rev = c.push()

        data = {
            "try-rev": try_rev,
            "stability": stability,
            "gecko-head": sync.gecko_commits.head.sha1,
            "wpt-head": sync.wpt_commits.head.sha1,
            "status": "open",
            "bug": sync.bug,
        }
        process_name = base.ProcessName.with_seq_id(
            sync.git_gecko, cls.obj_type, sync.sync_type,
            six.ensure_text(str(getattr(sync, sync.obj_id))))
        rv = super(TryPush, cls).create(lock, sync.git_gecko, process_name,
                                        data)
        # Add to the index
        if try_rev:
            try_idx.insert(try_idx.make_key(try_rev), process_name)

        with rv.as_mut(lock):
            rv.created = taskcluster.fromNowJSON("0 days")

        if sync.bug is not None:
            env.bz.comment(
                sync.bug, "Pushed to try%s %s" %
                (" (stability)" if stability else "", rv.treeherder_url))

        return rv
Ejemplo n.º 33
0
def _update_settings_xml():
    """
    This function writes a new ``resources/settings.xml`` file which contains
    all settings for this addon and its plugins.
    """
    try:
        xbmcvfs.mkdirs(common.settings_path)
    except OSError:
        pass

    new_xml = [
        '<?xml version="1.0" encoding="utf-8" standalone="yes"?>',
        '<settings>',
        '\t<category label="ResolveURL">',
        '\t\t<setting default="true" id="allow_universal" label="%s" type="bool"/>' % (common.i18n('enable_universal')),
        '\t\t<setting default="true" id="allow_popups" label="%s" type="bool"/>' % (common.i18n('enable_popups')),
        '\t\t<setting default="true" id="auto_pick" label="%s" type="bool"/>' % (common.i18n('auto_pick')),
        '\t\t<setting default="true" id="use_cache" label="%s" type="bool"/>' % (common.i18n('use_function_cache')),
        '\t\t<setting id="reset_cache" type="action" label="%s" action="RunPlugin(plugin://script.module.resolveurl/?mode=reset_cache)"/>' % (common.i18n('reset_function_cache')),
        '\t\t<setting id="personal_nid" label="Your NID" type="text" visible="false" default=""/>',
        '\t\t<setting id="last_ua_create" label="last_ua_create" type="number" visible="false" default="0"/>',
        '\t\t<setting id="current_ua" label="current_ua" type="text" visible="false" default=""/>',
        '\t\t<setting id="addon_debug" label="addon_debug" type="bool" visible="false" default="false"/>',
        '\t</category>',
        '\t<category label="%s">' % (common.i18n('universal_resolvers'))]

    resolvers = relevant_resolvers(include_universal=True, include_disabled=True)
    resolvers = sorted(resolvers, key=lambda x: x.name.upper())
    for resolver in resolvers:
        if resolver.isUniversal():
            new_xml.append('\t\t<setting label="%s" type="lsep"/>' % resolver.name)
            new_xml += ['\t\t' + line for line in resolver.get_settings_xml()]
    new_xml.append('\t</category>')
    new_xml.append('\t<category label="%s 1">' % (common.i18n('resolvers')))

    i = 0
    cat_count = 2
    for resolver in resolvers:
        if not resolver.isUniversal():
            if i > MAX_SETTINGS:
                new_xml.append('\t</category>')
                new_xml.append('\t<category label="%s %s">' % (common.i18n('resolvers'), cat_count))
                cat_count += 1
                i = 0
            new_xml.append('\t\t<setting label="%s" type="lsep"/>' % resolver.name)
            res_xml = resolver.get_settings_xml()
            new_xml += ['\t\t' + line for line in res_xml]
            i += len(res_xml) + 1

    new_xml.append('\t</category>')
    new_xml.append('</settings>')

    try:
        if six.PY3:
            with open(common.settings_file, 'r', encoding='utf-8') as f:
                old_xml = f.read()
        else:
            with open(common.settings_file, 'r') as f:
                old_xml = f.read()
    except:
        old_xml = u''
    old_xml = six.ensure_text(old_xml)

    new_xml = six.ensure_text('\n'.join(new_xml))
    if old_xml != new_xml:
        common.logger.log_debug('Updating Settings XML')
        try:
            if six.PY3:
                with open(common.settings_file, 'w', encoding='utf-8') as f:
                    f.write(new_xml)
            else:
                with open(common.settings_file, 'w') as f:
                    f.write(new_xml.encode('utf8'))
        except:
            raise
    else:
        common.logger.log_debug('No Settings Update Needed')
Ejemplo n.º 34
0
    def update(self, tree, parallel=True):
        # type: (Iterable[Tuple[Union[SourceFile, bytes], bool]], bool) -> bool
        """Update the manifest given an iterable of items that make up the updated manifest.

        The iterable must either generate tuples of the form (SourceFile, True) for paths
        that are to be updated, or (path, False) for items that are not to be updated. This
        unusual API is designed as an optimistaion meaning that SourceFile items need not be
        constructed in the case we are not updating a path, but the absence of an item from
        the iterator may be used to remove defunct entries from the manifest."""

        changed = False

        # Create local variable references to these dicts so we avoid the
        # attribute access in the hot loop below
        data = self._data

        types = data.type_by_path()
        deleted = set(types)

        to_update = []

        for source_file_or_path, update in tree:
            if not update:
                assert isinstance(source_file_or_path,
                                  (binary_type, text_type))
                path = ensure_text(source_file_or_path)
                deleted.remove(tuple(path.split(os.path.sep)))
            else:
                assert not isinstance(source_file_or_path,
                                      (binary_type, text_type))
                source_file = source_file_or_path
                rel_path_parts = source_file.rel_path_parts
                assert isinstance(rel_path_parts, tuple)

                is_new = rel_path_parts not in deleted  # type: bool
                hash_changed = False  # type: bool

                if not is_new:
                    deleted.remove(rel_path_parts)
                    old_type = types[rel_path_parts]
                    old_hash = data[old_type].hashes[rel_path_parts]
                    file_hash = source_file.hash  # type: Text
                    if old_hash != file_hash:
                        hash_changed = True
                        del data[old_type][rel_path_parts]

                if is_new or hash_changed:
                    to_update.append(source_file)

        if to_update:
            changed = True

        if parallel and len(to_update) > 25 and cpu_count() > 1:
            # 25 derived experimentally (2020-01) to be approximately
            # the point at which it is quicker to create Pool and
            # parallelize this
            pool = Pool()

            # chunksize set > 1 when more than 10000 tests, because
            # chunking is a net-gain once we get to very large numbers
            # of items (again, experimentally, 2020-01)
            results = pool.imap_unordered(
                compute_manifest_items,
                to_update,
                chunksize=max(1,
                              len(to_update) // 10000)
            )  # type: Iterator[Tuple[Tuple[Text, ...], Text, Set[ManifestItem], Text]]
        elif PY3:
            results = map(compute_manifest_items, to_update)
        else:
            results = itertools.imap(compute_manifest_items, to_update)

        for result in results:
            rel_path_parts, new_type, manifest_items, file_hash = result
            data[new_type][rel_path_parts] = manifest_items
            data[new_type].hashes[rel_path_parts] = file_hash

        if deleted:
            changed = True
            for rel_path_parts in deleted:
                for test_data in itervalues(data):
                    if rel_path_parts in test_data:
                        del test_data[rel_path_parts]

        return changed
  def CreateBucket(self,
                   bucket_name=None,
                   test_objects=0,
                   storage_class=None,
                   retention_policy=None,
                   provider=None,
                   prefer_json_api=False,
                   versioning_enabled=False,
                   bucket_policy_only=False,
                   bucket_name_prefix='',
                   bucket_name_suffix=''):
    """Creates a test bucket.

    The bucket and all of its contents will be deleted after the test.

    Args:
      bucket_name: Create the bucket with this name. If not provided, a
                   temporary test bucket name is constructed.
      test_objects: The number of objects that should be placed in the bucket.
                    Defaults to 0.
      storage_class: Storage class to use. If not provided we us standard.
      retention_policy: Retention policy to be used on the bucket.
      provider: Provider to use - either "gs" (the default) or "s3".
      prefer_json_api: If True, use the JSON creation functions where possible.
      versioning_enabled: If True, set the bucket's versioning attribute to
          True.
      bucket_policy_only: If True, set the bucket's iamConfiguration's
          bucketPolicyOnly attribute to True.
      bucket_name_prefix: Unicode string to be prepended to bucket_name
      bucket_name_suffix: Unicode string to be appended to bucket_name

    Returns:
      StorageUri for the created bucket.
    """
    if not provider:
      provider = self.default_provider

    # Location is controlled by the -b test flag.
    if self.multiregional_buckets or provider == 's3':
      location = None
    else:
      # We default to the "us-central1" location for regional buckets, but allow
      # overriding this value in the Boto config.
      location = boto.config.get('GSUtil', 'test_cmd_regional_bucket_location',
                                 'us-central1')

    bucket_name_prefix = six.ensure_text(bucket_name_prefix)
    bucket_name_suffix = six.ensure_text(bucket_name_suffix)

    if bucket_name:
      bucket_name = ''.join(
          [bucket_name_prefix, bucket_name, bucket_name_suffix])
      bucket_name = util.MakeBucketNameValid(bucket_name)
    else:
      bucket_name = self.MakeTempName('bucket',
                                      prefix=bucket_name_prefix,
                                      suffix=bucket_name_suffix)

    if prefer_json_api and provider == 'gs':
      json_bucket = self.CreateBucketJson(bucket_name=bucket_name,
                                          test_objects=test_objects,
                                          storage_class=storage_class,
                                          location=location,
                                          versioning_enabled=versioning_enabled,
                                          retention_policy=retention_policy,
                                          bucket_policy_only=bucket_policy_only)
      bucket_uri = boto.storage_uri('gs://%s' % json_bucket.name.lower(),
                                    suppress_consec_slashes=False)
      return bucket_uri

    bucket_uri = boto.storage_uri('%s://%s' % (provider, bucket_name.lower()),
                                  suppress_consec_slashes=False)

    if provider == 'gs':
      # Apply API version and project ID headers if necessary.
      headers = {
          'x-goog-api-version': self.api_version,
          GOOG_PROJ_ID_HDR: PopulateProjectId()
      }
    else:
      headers = {}

    # Parallel tests can easily run into bucket creation quotas.
    # Retry with exponential backoff so that we create them as fast as we
    # reasonably can.
    @Retry(StorageResponseError, tries=7, timeout_secs=1)
    def _CreateBucketWithExponentialBackoff():
      try:
        bucket_uri.create_bucket(storage_class=storage_class,
                                 location=location or '',
                                 headers=headers)
      except StorageResponseError as e:
        # If the service returns a transient error or a connection breaks,
        # it's possible the request succeeded. If that happens, the service
        # will return 409s for all future calls even though our intent
        # succeeded. If the error message says we already own the bucket,
        # assume success to reduce test flakiness. This depends on
        # randomness of test naming buckets to prevent name collisions for
        # test buckets created concurrently in the same project, which is
        # acceptable because this is far less likely than service errors.
        if e.status == 409 and e.body and 'already own' in e.body:
          pass
        else:
          raise

    _CreateBucketWithExponentialBackoff()
    self.bucket_uris.append(bucket_uri)

    if versioning_enabled:
      bucket_uri.configure_versioning(True)

    for i in range(test_objects):
      self.CreateObject(bucket_uri=bucket_uri,
                        object_name=self.MakeTempName('obj'),
                        contents='test {:d}'.format(i).encode('ascii'))
    return bucket_uri
Ejemplo n.º 36
0
def ToUnicode(text):
    if not isinstance(text, six.text_type):
        text = six.ensure_text(text, 'utf-8')
    return text
Ejemplo n.º 37
0
  def RunCommand(self,
                 command_name,
                 args=None,
                 headers=None,
                 debug=0,
                 return_stdout=False,
                 return_stderr=False,
                 return_log_handler=False,
                 cwd=None):
    """Method for calling gslib.command_runner.CommandRunner.

    Passes parallel_operations=False for all tests, optionally saving/returning
    stdout output. We run all tests multi-threaded, to exercise those more
    complicated code paths.
    TODO: Change to run with parallel_operations=True for all tests. At
    present when you do this it causes many test failures.

    Args:
      command_name: The name of the command being run.
      args: Command-line args (arg0 = actual arg, not command name ala bash).
      headers: Dictionary containing optional HTTP headers to pass to boto.
      debug: Debug level to pass in to boto connection (range 0..3).
      return_stdout: If True, will save and return stdout produced by command.
      return_stderr: If True, will save and return stderr produced by command.
      return_log_handler: If True, will return a MockLoggingHandler instance
           that was attached to the command's logger while running.
      cwd: The working directory that should be switched to before running the
           command. The working directory will be reset back to its original
           value after running the command. If not specified, the working
           directory is left unchanged.

    Returns:
      One or a tuple of requested return values, depending on whether
      return_stdout, return_stderr, and/or return_log_handler were specified.
      Return Types:
        stdout - binary
        stderr - str (binary in Py2, text in Py3)
        log_handler - MockLoggingHandler
    """
    args = args or []

    command_line = ' '.join([command_name] + args)
    if self.is_debugging:
      self.stderr_save.write('\nRunCommand of {}\n'.format(command_line))

    # Save and truncate stdout and stderr for the lifetime of RunCommand. This
    # way, we can return just the stdout and stderr that was output during the
    # RunNamedCommand call below.
    sys.stdout.seek(0)
    sys.stderr.seek(0)
    stdout = sys.stdout.read()
    stderr = sys.stderr.read()
    if stdout:
      self.accumulated_stdout.append(stdout)
    if stderr:
      self.accumulated_stderr.append(stderr)
    sys.stdout.seek(0)
    sys.stderr.seek(0)
    sys.stdout.truncate()
    sys.stderr.truncate()

    mock_log_handler = MockLoggingHandler()
    logging.getLogger(command_name).addHandler(mock_log_handler)
    if debug:
      logging.getLogger(command_name).setLevel(logging.DEBUG)

    try:
      with WorkingDirectory(cwd):
        self.command_runner.RunNamedCommand(command_name,
                                            args=args,
                                            headers=headers,
                                            debug=debug,
                                            parallel_operations=False,
                                            do_shutdown=False)
    finally:
      sys.stdout.seek(0)
      sys.stderr.seek(0)
      if six.PY2:
        stdout = sys.stdout.read()
        stderr = sys.stderr.read()
      else:
        try:
          stdout = sys.stdout.read()
          stderr = sys.stderr.read()
        except UnicodeDecodeError:
          sys.stdout.seek(0)
          sys.stderr.seek(0)
          stdout = sys.stdout.buffer.read().decode(UTF8)
          stderr = sys.stderr.buffer.read().decode(UTF8)
      logging.getLogger(command_name).removeHandler(mock_log_handler)
      mock_log_handler.close()

      log_output = '\n'.join(
          '%s:\n  ' % level + '\n  '.join(records)
          for level, records in six.iteritems(mock_log_handler.messages)
          if records)

      _id = six.ensure_text(self.id())
      if self.is_debugging and log_output:
        self.stderr_save.write('==== logging RunCommand {} {} ====\n'.format(
            _id, command_line))
        self.stderr_save.write(log_output)
        self.stderr_save.write('\n==== end logging ====\n')
      if self.is_debugging and stdout:
        self.stderr_save.write('==== stdout RunCommand {} {} ====\n'.format(
            _id, command_line))
        self.stderr_save.write(stdout)
        self.stderr_save.write('==== end stdout ====\n')
      if self.is_debugging and stderr:
        self.stderr_save.write('==== stderr RunCommand {} {} ====\n'.format(
            _id, command_line))
        self.stderr_save.write(stderr)
        self.stderr_save.write('==== end stderr ====\n')

      # Reset stdout and stderr files, so that we won't print them out again
      # in tearDown if debugging is enabled.
      sys.stdout.seek(0)
      sys.stderr.seek(0)
      sys.stdout.truncate()
      sys.stderr.truncate()

    to_return = []
    if return_stdout:
      to_return.append(stdout)
    if return_stderr:
      to_return.append(stderr)
    if return_log_handler:
      to_return.append(mock_log_handler)
    if len(to_return) == 1:
      return to_return[0]
    return tuple(to_return)
Ejemplo n.º 38
0
  def from_frozen_graph(cls,
                        graph_def_file,
                        input_arrays,
                        output_arrays,
                        input_shapes=None):
    """Creates a TFLiteConverter class from a file containing a frozen GraphDef.

    Args:
      graph_def_file: Full filepath of file containing frozen GraphDef.
      input_arrays: List of input tensors to freeze graph with.
      output_arrays: List of output tensors to freeze graph with.
      input_shapes: Dict of strings representing input tensor names to list of
        integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
        Automatically determined when input shapes is None (e.g., {"foo" :
          None}). (default None)

    Returns:
      TFLiteConverter class.

    Raises:
      IOError:
        File not found.
        Unable to parse input file.
      ValueError:
        The graph is not frozen.
        input_arrays or output_arrays contains an invalid tensor name.
        input_shapes is not correctly defined when required
    """
    with _ops.Graph().as_default():
      with _session.Session() as sess:
        # Read GraphDef from file.
        if not _file_io.file_exists(graph_def_file):
          raise IOError("File '{0}' does not exist.".format(graph_def_file))
        with _file_io.FileIO(graph_def_file, "rb") as f:
          file_content = f.read()

        try:
          graph_def = _graph_pb2.GraphDef()
          graph_def.ParseFromString(file_content)
        except (_text_format.ParseError, DecodeError):
          try:
            print("Ignore 'tcmalloc: large alloc' warnings.")

            if not isinstance(file_content, str):
              if PY2:
                file_content = six.ensure_binary(file_content, "utf-8")
              else:
                file_content = six.ensure_text(file_content, "utf-8")
            graph_def = _graph_pb2.GraphDef()
            _text_format.Merge(file_content, graph_def)
          except (_text_format.ParseError, DecodeError):
            raise IOError(
                "Unable to parse input file '{}'.".format(graph_def_file))

        # Handles models with custom TFLite ops that cannot be resolved in
        # TensorFlow.
        load_model_in_session = True
        try:
          _import_graph_def(graph_def, name="")
        except _NotFoundError:
          load_model_in_session = False

        if load_model_in_session:
          # Check if graph is frozen.
          if not _is_frozen_graph(sess):
            raise ValueError("Please freeze the graph using freeze_graph.py.")

          # Get input and output tensors.
          input_tensors = _get_tensors_from_tensor_names(
              sess.graph, input_arrays)
          output_tensors = _get_tensors_from_tensor_names(
              sess.graph, output_arrays)
          _set_tensor_shapes(input_tensors, input_shapes)

          return cls(sess.graph_def, input_tensors, output_tensors)
        else:
          if not input_shapes:
            raise ValueError("input_shapes must be defined for this model.")
          if set(input_arrays) != set(input_shapes.keys()):
            raise ValueError("input_shapes must contain a value for each item "
                             "in input_array.")

          input_arrays_with_shape = [
              (name, input_shapes[name]) for name in input_arrays
          ]
          return cls(
              graph_def,
              input_tensors=None,
              output_tensors=None,
              input_arrays_with_shape=input_arrays_with_shape,
              output_arrays=output_arrays)
Ejemplo n.º 39
0
def extract_item_info(item, gui_options):

    item_details = ItemDetails()

    item_details.id = item.get("Id")
    item_details.etag = item.get("Etag")
    item_details.is_folder = item.get("IsFolder")
    item_details.item_type = item.get("Type")
    item_details.location_type = item.get("LocationType")
    item_details.name = item.get("Name")
    item_details.sort_name = item.get("SortName")
    item_details.original_title = item_details.name

    if item_details.item_type == "Episode":
        item_details.episode_number = item.get("IndexNumber")
        item_details.season_number = item.get("ParentIndexNumber")
        item_details.series_id = item.get("SeriesId")

        if item_details.season_number != 0:
            item_details.season_sort_number = item_details.season_number
            item_details.episode_sort_number = item_details.episode_number
        else:
            special_after_season = item.get("AirsAfterSeasonNumber")
            special_before_season = item.get("AirsBeforeSeasonNumber")
            special_before_episode = item.get("AirsBeforeEpisodeNumber")

            if special_after_season:
                item_details.season_sort_number = special_after_season + 1
            elif special_before_season:
                item_details.season_sort_number = special_before_season - 1

            if special_before_episode:
                item_details.episode_sort_number = special_before_episode - 1

    elif item_details.item_type == "Season":
        item_details.season_number = item.get("IndexNumber")
        item_details.series_id = item.get("SeriesId")

    elif item_details.item_type == "Series":
        item_details.status = item.get("Status")

    elif item_details.item_type == "Audio":
        item_details.track_number = item.get("IndexNumber")
        item_details.album_name = item.get("Album")
        artists = item.get("Artists", [])
        if artists:
            item_details.song_artist = artists[0]  # get first artist

    elif item_details.item_type == "MusicAlbum":
        item_details.album_artist = item.get("AlbumArtist")
        item_details.album_name = item_details.name

    if item_details.season_number is None:
        item_details.season_number = 0
    if item_details.episode_number is None:
        item_details.episode_number = 0

    if item.get("Taglines", []):
        item_details.tagline = item.get("Taglines")[0]

    item_details.tags = []
    if item.get("TagItems", []):
        for tag_info in item.get("TagItems"):
            item_details.tags.append(tag_info.get("Name"))

    # set the item name
    # override with name format string from request
    name_format = gui_options.get("name_format")
    name_format_type = gui_options.get("name_format_type")

    if name_format is not None and item_details.item_type == name_format_type:
        name_info = {}
        name_info["ItemName"] = item.get("Name")
        season_name = item.get("SeriesName")
        if season_name:
            name_info["SeriesName"] = season_name
        else:
            name_info["SeriesName"] = ""
        name_info["SeasonIndex"] = u"%02d" % item_details.season_number
        name_info["EpisodeIndex"] = u"%02d" % item_details.episode_number
        log.debug("FormatName: {0} | {1}".format(name_format, name_info))
        item_details.name = ensure_text(name_format).format(
            **name_info).strip()

    year = item.get("ProductionYear")
    prem_date = item.get("PremiereDate")

    if year is not None:
        item_details.year = year
    elif item_details.year is None and prem_date is not None:
        item_details.year = int(prem_date[:4])

    if prem_date is not None:
        tokens = prem_date.split("T")
        item_details.premiere_date = tokens[0]

    create_date = item.get("DateCreated")
    if create_date:
        item_details.date_added = create_date.split('.')[0].replace('T', " ")

    # add the premiered date for Upcoming TV
    if item_details.location_type == "Virtual":
        airtime = item.get("AirTime")
        item_details.name = item_details.name + ' - ' + item_details.premiere_date + ' - ' + str(
            airtime)

    if item_details.item_type == "Program":
        item_details.program_channel_name = item.get("ChannelName")
        item_details.program_start_date = item.get("StartDate")
        item_details.program_end_date = item.get("EndDate")

    # Process MediaStreams
    media_streams = item.get("MediaStreams", [])
    if media_streams:
        media_info_list = []
        for mediaStream in media_streams:
            stream_type = mediaStream.get("Type")
            if stream_type == "Video":
                media_info = {}
                media_info["type"] = "video"
                media_info["codec"] = mediaStream.get("Codec")
                media_info["height"] = mediaStream.get("Height")
                media_info["width"] = mediaStream.get("Width")
                aspect_ratio = mediaStream.get("AspectRatio")
                media_info["apect"] = aspect_ratio
                if aspect_ratio and len(aspect_ratio) >= 3:
                    try:
                        aspect_width, aspect_height = aspect_ratio.split(':')
                        media_info["apect_ratio"] = float(
                            aspect_width) / float(aspect_height)
                    except:
                        media_info["apect_ratio"] = 1.85
                else:
                    media_info["apect_ratio"] = 1.85
                media_info_list.append(media_info)
            if stream_type == "Audio":
                media_info = {}
                media_info["type"] = "audio"
                media_info["codec"] = mediaStream.get("Codec")
                media_info["channels"] = mediaStream.get("Channels")
                media_info["language"] = mediaStream.get("Language")
                media_info_list.append(media_info)
            if stream_type == "Subtitle":
                item_details.subtitle_available = True
                media_info = {}
                media_info["type"] = "sub"
                media_info["language"] = mediaStream.get("Language", '')
                media_info_list.append(media_info)

        item_details.media_streams = media_info_list

    # Process People
    people = item.get("People", [])
    if people is not None:
        cast = []
        for person in people:
            person_type = person.get("Type")
            if person_type == "Director":
                item_details.director = item_details.director + person.get(
                    "Name") + ' '
            elif person_type == "Writing":
                item_details.writer = person["Name"]
            elif person_type == "Actor":
                person_name = person.get("Name")
                person_role = person.get("Role")
                person_id = person.get("Id")
                person_tag = person.get("PrimaryImageTag")
                if person_tag:
                    person_thumbnail = download_utils.image_url(
                        person_id,
                        "Primary",
                        0,
                        400,
                        400,
                        person_tag,
                        server=gui_options["server"])
                else:
                    person_thumbnail = ""
                person = {
                    "name": person_name,
                    "role": person_role,
                    "thumbnail": person_thumbnail
                }
                cast.append(person)
        item_details.cast = cast

    # Process Studios
    studios = item.get("Studios", [])
    if studios is not None:
        for studio in studios:
            if item_details.studio is None:  # Just take the first one
                studio_name = studio.get("Name")
                item_details.studio = studio_name
                break

    # production location
    prod_location = item.get("ProductionLocations", [])
    if prod_location:
        item_details.production_location = prod_location[0]

    # Process Genres
    genres = item.get("Genres", [])
    if genres:
        item_details.genres = genres

    # Process UserData
    user_data = item.get("UserData", {})

    if user_data.get("Played"):
        item_details.overlay = "6"
        item_details.play_count = 1
    else:
        item_details.overlay = "7"
        item_details.play_count = 0

    if user_data.get("IsFavorite"):
        item_details.overlay = "5"
        item_details.favorite = "true"
    else:
        item_details.favorite = "false"

    reasonable_ticks = user_data.get("PlaybackPositionTicks", 0)
    if reasonable_ticks:
        reasonable_ticks = int(reasonable_ticks) / 1000
        item_details.resume_time = int(reasonable_ticks / 10000)

    item_details.series_name = item.get("SeriesName", '')
    item_details.plot = item.get("Overview", '')

    runtime = item.get("RunTimeTicks")
    if item_details.is_folder is False and runtime:
        item_details.duration = runtime / 10000000

    child_count = item.get("ChildCount")
    if child_count:
        item_details.total_seasons = child_count

    recursive_item_count = item.get("RecursiveItemCount")
    if recursive_item_count:
        item_details.total_episodes = recursive_item_count

    unplayed_item_count = user_data.get("UnplayedItemCount")
    if unplayed_item_count is not None:
        item_details.unwatched_episodes = unplayed_item_count
        item_details.watched_episodes = item_details.total_episodes - unplayed_item_count

    item_details.number_episodes = item_details.total_episodes

    item_details.art = get_art(item, gui_options["server"])
    item_details.rating = item.get("OfficialRating")
    item_details.mpaa = item.get("OfficialRating")

    item_details.community_rating = item.get("CommunityRating")
    if not item_details.community_rating:
        item_details.community_rating = 0.0

    item_details.critic_rating = item.get("CriticRating")
    if not item_details.critic_rating:
        item_details.critic_rating = 0.0

    item_details.location_type = item.get("LocationType")
    item_details.recursive_item_count = item.get("RecursiveItemCount")
    item_details.recursive_unplayed_items_count = user_data.get(
        "UnplayedItemCount")

    item_details.mode = "GET_CONTENT"

    return item_details
Ejemplo n.º 40
0
def _serialize_report(user, problem_instances, test_groups):
    """Generates a dictionary representing a single report.


    :param request: Django request
    :type user: :cls:`django.contrib.auth.User`
    :param user: user to generate the report for
    :type problem_instances: list of
                              :cls:`oioioi.contests.ProblemInstance`
    :param problem_instances: problem instances to include in the report
    :type test_groups: dict(:cls:`oioioi.contests.ProblemInstance`
                        -> list of str)
    :param test_groups: dictionary mapping problem instances into lists
                        of names of test groups to include
    """

    resultsets = []
    total_score = None

    results = UserResultForProblem.objects.filter(
        user=user,
        problem_instance__in=list(problem_instances),
        submission_report__isnull=False,
    )
    for r in results:
        problem_instance = r.problem_instance
        submission_report = r.submission_report
        submission = submission_report.submission
        source_file = submission.programsubmission.source_file
        groups = list(test_groups[problem_instance])

        try:
            compilation_report = CompilationReport.objects.get(
                submission_report=submission_report
            )
        except CompilationReport.DoesNotExist:
            compilation_report = None

        try:
            test_reports = (
                TestReport.objects.filter(submission_report__submission=submission)
                .filter(submission_report__status='ACTIVE')
                .filter(submission_report__kind__in=['INITIAL', 'NORMAL'])
                .filter(test_group__in=groups)
                .order_by('test__kind', 'test__order', 'test_name')
            )
        except TestReport.DoesNotExist:
            test_reports = []

        group_reports = (
            GroupReport.objects.filter(submission_report__submission=submission)
            .filter(submission_report__status='ACTIVE')
            .filter(submission_report__kind__in=['INITIAL', 'NORMAL'])
            .filter(group__in=groups)
        )
        group_reports = dict((g.group, g) for g in group_reports)
        groups = []
        for group_name, tests in itertools.groupby(
            test_reports, attrgetter('test_group')
        ):
            groups.append({'tests': list(tests), 'report': group_reports[group_name]})

        problem_score = None
        max_problem_score = None
        for group in groups:
            group_score = group['report'].score
            group_max_score = group['report'].max_score

            if problem_score is None:
                problem_score = group_score
            elif group_score is not None:
                problem_score += group_score

            if max_problem_score is None:
                max_problem_score = group_max_score
            elif group_max_score is not None:
                max_problem_score += group_max_score

        resultsets.append(
            dict(
                result=r,
                score=problem_score,
                max_score=max_problem_score,
                compilation_report=compilation_report,
                groups=groups,
                code=six.ensure_text(source_file.read(), errors="replace"),
                codefile=source_file.file.name,
            )
        )
        source_file.close()
        if total_score is None:
            total_score = problem_score
        elif problem_score is not None:
            total_score += problem_score
    return {
        'user': user,
        'resultsets': resultsets,
        'sum': total_score,
    }
Ejemplo n.º 41
0
    def wpt_to_gecko_commits(self, dependencies=None):
        # type: (Optional[List[WptCommit]]) -> None
        """Create a patch based on wpt branch, apply it to corresponding gecko branch.

        If there is a commit with wpt-type metadata, this function will remove it. The
        sha1 will be stashed in self.data["metadata-commit"] so it can be restored next time
        we call ensure_metadata_commit()
        """
        # The logic here is that we can retain any dependent commits as long as we have
        # at least the set in the dependencies array, followed by the gecko commits created
        # from the wpt_commits, interspersed with any number of manifest commits,
        # followed by zero or one metadata commits

        if dependencies:
            expected_commits = [
                (item.sha1, item, True) for item in dependencies
            ]  # type: List[Tuple[Text, Optional[WptCommit], bool]]
        else:
            # If no dependencies are supplied, retain the ones that we alredy have, if any
            expected_commits = []
            for commit in self.gecko_commits:
                assert isinstance(commit, sync_commit.GeckoCommit)
                if commit.metadata.get("wpt-type") == "dependency":
                    expected_commits.append(
                        (commit.metadata["wpt-commit"], None, True))
                else:
                    break

        # Expect all the new commits
        for commit in self.wpt_commits:
            assert isinstance(commit, WptCommit)
            if not commit.is_merge:
                expected_commits.append((commit.sha1, commit, False))

        existing = [
            commit for commit in self.gecko_commits
            if commit.metadata.get("wpt-commit")
            and commit.metadata.get("wpt-type") in ("dependency", None)
        ]
        if MYPY:
            existing_commits = cast(List[GeckoCommit], existing)
        else:
            existing_commits = existing

        retain_commits = 0
        for gecko_commit, (wpt_sha1, _, _) in zip(existing_commits,
                                                  expected_commits):
            if gecko_commit.metadata.get("wpt-commit") != wpt_sha1:
                break
            retain_commits += 1

        keep_commits = existing_commits[:retain_commits]
        maybe_add_commits = expected_commits[retain_commits:]

        # Strip out any leading commits that come from currently applied dependencies that are
        # not being retained
        strip_count = 0
        for _, wpt_commit, _ in maybe_add_commits:
            if wpt_commit is not None:
                break
            strip_count += 1
        add_commits = maybe_add_commits[strip_count:]

        if len(keep_commits) == len(existing_commits) and not add_commits:
            logger.info("Commits did not change")
            return

        logger.info("Keeping %i existing commits; adding %i new commits" %
                    (len(keep_commits), len(add_commits)))

        if self.metadata_commit:
            # If we have a metadata commit, store it in self.data["metadata-commit"]
            # remove it when updating commits, and reapply it when we next call
            # ensure_metadata_commit
            self.data["metadata-commit"] = self.metadata_commit.sha1

        reset_head = None
        if not keep_commits:
            reset_head = self.data["gecko-base"]
        elif len(keep_commits) < len(existing_commits):
            reset_head = keep_commits[-1]
        elif ("metadata-commit" in self.data and
              self.gecko_commits[-1].metadata.get("wpt-type") == "metadata"):
            reset_head = self.gecko_commits[-2]

        # Clear the set of affected tests since there are updates
        del self.data["affected-tests"]

        gecko_work = self.gecko_worktree.get()

        if reset_head:
            self.gecko_commits.head = reset_head  # type: ignore
        gecko_work.git.reset(hard=True)

        for _, wpt_commit, is_dependency in add_commits:
            assert wpt_commit is not None
            logger.info("Moving commit %s" % wpt_commit.sha1)
            if is_dependency:
                metadata = {
                    u"wpt-type": u"dependency",
                    u"wpt-commit": wpt_commit.sha1
                }
                msg_filter = None
            else:
                metadata = {
                    u"wpt-pr": six.ensure_text(str(self.pr)),
                    u"wpt-commit": wpt_commit.sha1
                }
                msg_filter = self.message_filter

            wpt_commit.move(gecko_work,
                            dest_prefix=env.config["gecko"]["path"]["wpt"],
                            msg_filter=msg_filter,
                            metadata=metadata,
                            patch_fallback=True)