Пример #1
0
def correct_blinking(tracks, temporal, spatial):
    new_tracks = []
    init = 0
    while init < len(tracks):
        cur = {init}
        tr1 = tracks[init]
        for i in range(init+1, len(tracks)):
            tr2 = tracks[i]
            if min(tr2['frame']) - max(tr1['frame']) > temporal:
                break
            elif distance((tr1['xmean'], tr1['ymean']), (tr2['xmean'], tr2['ymean'])) <= spatial:
                cur.add(i)
        new_tracks.append(cur)
        init += 1
        cur = {init}

    first = new_tracks[::-1]
    second = map(lambda a: a | reduce(lambda b, c: b | c, filter(lambda d: d & a, first)), first)
    while second != first:
        first = second
        second = map(lambda a: a | reduce(lambda b, c: b | c, filter(lambda d: d & a, first)), first)
    second = np.unique([tuple(a) for a in second])
    tracks = [join_tracks([tracks[i] for i in links]) for links in second]
    with open('outfile.txt', 'w') as outf:
        outf.write('\n\n'.join([("%s:\t%s" % (k, v)) for k, v in tracks[100].items()]))
    return tracks
Пример #2
0
def partition_t(wrap, func):
    return Transformation(
        'partition({0})'.format(name(func)),
        lambda sequence: wrap(
            (wrap(filter(func, sequence)), wrap(filter(lambda val: not func(val), sequence)))
        ),
        None
    )
Пример #3
0
def partition_t(wrap, func):
    """
    Transformation for Sequence.partition
    :param wrap: wrap children values with this
    :param func: partition function
    :return: transformation
    """
    return Transformation(
        'partition({0})'.format(name(func)),
        lambda sequence: wrap(
            (wrap(filter(func, sequence)), wrap(filter(lambda val: not func(val), sequence)))
        ),
        None
    )
Пример #4
0
def update_contacts(contacts):
    contacts = map(_transform_contact_data, contacts)

    # Filter contact data using whitelist
    if settings.EMARSYS_RECIPIENT_WHITELIST is not None:
        contacts = filter(lambda contact: contact[3]  # 3=email
                          in settings.EMARSYS_RECIPIENT_WHITELIST, contacts)

    contacts = list(contacts)

    assert len(contacts) <= BATCH_SIZE

    if not contacts:
        return 0, [], []

    num_successful, errors = _update_contacts(contacts)

    missing_contacts = [email
                        for email, error_dict in errors.items()
                        if '2008' in error_dict]
    failed_contacts = [(email, error_dict)
                       for email, error_dict in errors.items()
                       if '2008' not in error_dict]

    return num_successful, missing_contacts, failed_contacts
Пример #5
0
 def get_content_models(cls):
     """
     Return all Page subclasses.
     """
     def is_content_model(m):
         return m is not Page and issubclass(m, Page) and not m._meta.proxy
     return list(filter(is_content_model, apps.get_models()))
Пример #6
0
  def _CombineParenthesis(self):
    for i in range(len(self.stack) - 2):
      if (self.stack[i] == "(" and self.stack[i + 2] == ")" and
          isinstance(self.stack[i + 1], Expression)):
        self.stack[i] = None
        self.stack[i + 2] = None

    self.stack = list(filter(None, self.stack))
Пример #7
0
  def _CombineContext(self):
    # Context can merge from item 0
    for i in range(len(self.stack) - 1, 0, -1):
      item = self.stack[i - 1]
      if (isinstance(item, ContextExpression) and
          isinstance(self.stack[i], lexer.Expression)):
        expression = self.stack[i]
        item.SetExpression(expression)
        self.stack[i] = None

    self.stack = list(filter(None, self.stack))
Пример #8
0
  def Split(self, count=None):
    """Returns all the path components.

    Args:
      count: If count is specified, the output will be exactly this many path
        components, possibly extended with the empty string. This is useful for
        tuple assignments without worrying about ValueErrors:  namespace, path =
          urn.Split(2)

    Returns:
      A list of path components of this URN.
    """
    if count:
      result = list(filter(None, self._string_urn.split("/", count)))
      while len(result) < count:
        result.append("")

      return result

    else:
      return list(filter(None, self._string_urn.split("/")))
Пример #9
0
Файл: io.py Проект: greole/owls
 def extract(line, keys):
     """
         returns key and values as list
             "ExecutionTime":[0,1]
     """
     import re
     for key, col_names in keys.items():
         if re.search(key, line):
             return col_names, list(
                     map(float,filter(lambda x:
                     x, re.findall("[0-9]+[.]?[0-9]*[e]?[\-]?[0-9]*", line))))
     return None, None
Пример #10
0
  def Parse(self, stat, file_obj, knowledge_base):

    del knowledge_base  # Unused.

    lines = set([l.strip() for l in file_obj.read().splitlines()])

    users = list(filter(None, lines))

    filename = stat.pathspec.path
    cfg = {"filename": filename, "users": users}

    yield rdf_protodict.AttributedDict(**cfg)
Пример #11
0
  def _CombineBinaryExpressions(self, operator):
    for i in range(1, len(self.stack) - 1):
      item = self.stack[i]
      if (isinstance(item, BinaryExpression) and item.operator == operator and
          isinstance(self.stack[i - 1], Expression) and
          isinstance(self.stack[i + 1], Expression)):
        lhs = self.stack[i - 1]
        rhs = self.stack[i + 1]

        item.AddOperands(lhs, rhs)
        self.stack[i - 1] = None
        self.stack[i + 1] = None

    self.stack = list(filter(None, self.stack))
Пример #12
0
  def _CombineBinaryExpressions(self, operator):
    for i in range(1, len(self.stack) - 1):
      item = self.stack[i]
      if (isinstance(item, lexer.BinaryExpression) and
          item.operator.lower() == operator.lower() and
          isinstance(self.stack[i - 1], lexer.Expression) and
          isinstance(self.stack[i + 1], lexer.Expression)):
        lhs = self.stack[i - 1]
        rhs = self.stack[i + 1]

        self.stack[i].AddOperands(lhs, rhs)  # pytype: disable=attribute-error
        self.stack[i - 1] = None
        self.stack[i + 1] = None

    self.stack = list(filter(None, self.stack))
Пример #13
0
  def ParseMultiple(self, stats, file_objects, knowledge_base):

    del knowledge_base  # Unused.

    lines = set()
    for file_obj in file_objects:
      lines.update(set(l.strip() for l in file_obj.read().splitlines()))

    users = list(filter(None, lines))

    for stat in stats:
      filename = stat.pathspec.path
      cfg = {"filename": filename, "users": users}

      yield rdf_protodict.AttributedDict(**cfg)
 def _parseParentChildRelsR(self, grp):
     levels=None
     if grp.tag=='{{{ns0}}}orthologGroup'.format(**self.parser.ns):
         levels = [l.get('value') for l in grp.findall('./{{{ns0}}}property[@name="TaxRange"]'
             .format(**self.parser.ns))]
     children = filter( lambda x:x.tag in 
         {"{{{ns0}}}orthologGroup".format(**self.parser.ns), 
          "{{{ns0}}}paralogGroup".format(**self.parser.ns)},
         list(grp))
     subLevs = reduce( set.union, map(self._parseParentChildRelsR, children), set())
     if levels is not None:
         for parent in levels:
             for child in subLevs:
                 self.adj.add((parent,child))
         subLevs = set(levels)
     return subLevs
Пример #15
0
  def Request(self):
    """Create the Approval object and notify the Approval Granter."""

    approval_id = "approval:%X" % random.UInt32()
    approval_urn = self.BuildApprovalUrn(approval_id)

    email_msg_id = email.utils.make_msgid()

    with aff4.FACTORY.Create(
        approval_urn, self.approval_type, mode="w",
        token=self.token) as approval_request:
      approval_request.Set(approval_request.Schema.SUBJECT(self.subject_urn))
      approval_request.Set(
          approval_request.Schema.REQUESTOR(self.token.username))
      approval_request.Set(approval_request.Schema.REASON(self.reason))
      approval_request.Set(approval_request.Schema.EMAIL_MSG_ID(email_msg_id))

      cc_addresses = (self.email_cc_address,
                      config.CONFIG.Get("Email.approval_cc_address"))
      email_cc = ",".join(filter(None, cc_addresses))

      # When we reply with the approval we want to cc all the people to whom the
      # original approval was sent, to avoid people approving stuff that was
      # already approved.
      if email_cc:
        reply_cc = ",".join((self.approver, email_cc))
      else:
        reply_cc = self.approver

      approval_request.Set(approval_request.Schema.EMAIL_CC(reply_cc))

      approval_request.Set(
          approval_request.Schema.NOTIFIED_USERS(self.approver))

      # We add ourselves as an approver as well (The requirement is that we have
      # 2 approvers, so the requester is automatically an approver).
      approval_request.AddAttribute(
          approval_request.Schema.APPROVER(self.token.username))

    approval_link_urns = self.BuildApprovalSymlinksUrns(approval_id)
    for link_urn in approval_link_urns:
      with aff4.FACTORY.Create(
          link_urn, aff4.AFF4Symlink, mode="w", token=self.token) as link:
        link.Set(link.Schema.SYMLINK_TARGET(approval_urn))

    return approval_urn
Пример #16
0
  def Request(self):
    """Create the Approval object and notify the Approval Granter."""

    approval_id = "approval:%X" % random.UInt32()
    approval_urn = self.BuildApprovalUrn(approval_id)

    email_msg_id = email.utils.make_msgid()

    with aff4.FACTORY.Create(
        approval_urn, self.approval_type, mode="w",
        token=self.token) as approval_request:
      approval_request.Set(approval_request.Schema.SUBJECT(self.subject_urn))
      approval_request.Set(
          approval_request.Schema.REQUESTOR(self.token.username))
      approval_request.Set(approval_request.Schema.REASON(self.reason))
      approval_request.Set(approval_request.Schema.EMAIL_MSG_ID(email_msg_id))

      cc_addresses = (self.email_cc_address,
                      config.CONFIG.Get("Email.approval_cc_address"))
      email_cc = ",".join(filter(None, cc_addresses))

      # When we reply with the approval we want to cc all the people to whom the
      # original approval was sent, to avoid people approving stuff that was
      # already approved.
      if email_cc:
        reply_cc = ",".join((self.approver, email_cc))
      else:
        reply_cc = self.approver

      approval_request.Set(approval_request.Schema.EMAIL_CC(reply_cc))

      approval_request.Set(
          approval_request.Schema.NOTIFIED_USERS(self.approver))

      # We add ourselves as an approver as well (The requirement is that we have
      # 2 approvers, so the requester is automatically an approver).
      approval_request.AddAttribute(
          approval_request.Schema.APPROVER(self.token.username))

    approval_link_urns = self.BuildApprovalSymlinksUrns(approval_id)
    for link_urn in approval_link_urns:
      with aff4.FACTORY.Create(
          link_urn, aff4.AFF4Symlink, mode="w", token=self.token) as link:
        link.Set(link.Schema.SYMLINK_TARGET(approval_urn))

    return approval_urn
Пример #17
0
    def translate_path(self, path):
        """Translate a /-separated PATH to the local filename syntax.

        Components that mean special things to the local file system
        (e.g. drive or directory names) are ignored.  (XXX They should
        probably be diagnosed.)

        """
        # abandon query parameters
        path = urllib.parse.urlparse(path)[2]
        path = os.path.normpath(urllib.parse.unquote(path))
        words = path.split('/')
        words = filter(None, words)
        path = self.root
        for word in words:
            drive, word = os.path.splitdrive(word)
            head, word = os.path.split(word)
            path = os.path.join(path, word)
        return path
Пример #18
0
    def translate_path(self, path):
        """Translate a /-separated PATH to the local filename syntax.

        Components that mean special things to the local file system
        (e.g. drive or directory names) are ignored.  (XXX They should
        probably be diagnosed.)

        """
        # abandon query parameters
        path = urllib.parse.urlparse(path)[2]
        path = os.path.normpath(urllib.parse.unquote(path))
        words = path.split("/")
        words = filter(None, words)
        path = self.root
        for word in words:
            drive, word = os.path.splitdrive(word)
            head, word = os.path.split(word)
            path = os.path.join(path, word)
        return path
Пример #19
0
    def _LastEntryTimestamp(dct, upper_bound_timestamp):
        """Searches for greatest timestamp lower than the specified one.

    Args:
      dct: A dictionary from timestamps to some items.
      upper_bound_timestamp: An upper bound for timestamp to be returned.

    Returns:
      Greatest timestamp that is lower than the specified one. If no such value
      exists, `None` is returned.
    """
        if upper_bound_timestamp is None:
            upper_bound = lambda _: True
        else:
            upper_bound = lambda key: key <= upper_bound_timestamp

        try:
            return max(filter(upper_bound, iterkeys(dct)))
        except ValueError:  # Thrown if `max` input (result of filtering) is empty.
            return None
Пример #20
0
  def _LastEntryTimestamp(dct, upper_bound_timestamp):
    """Searches for greatest timestamp lower than the specified one.

    Args:
      dct: A dictionary from timestamps to some items.
      upper_bound_timestamp: An upper bound for timestamp to be returned.

    Returns:
      Greatest timestamp that is lower than the specified one. If no such value
      exists, `None` is returned.
    """
    if upper_bound_timestamp is None:
      upper_bound = lambda _: True
    else:
      upper_bound = lambda key: key <= upper_bound_timestamp

    try:
      return max(filter(upper_bound, iterkeys(dct)))
    except ValueError:  # Thrown if `max` input (result of filtering) is empty.
      return None
 def _parseParentChildRelsR(self, grp):
     levels = None
     if grp.tag == '{{{ns0}}}orthologGroup'.format(**self.parser.ns):
         levels = [
             l.get('value')
             for l in grp.findall('./{{{ns0}}}property[@name="TaxRange"]'.
                                  format(**self.parser.ns))
         ]
     children = filter(
         lambda x: x.tag in {
             "{{{ns0}}}orthologGroup".format(**self.parser.ns),
             "{{{ns0}}}paralogGroup".format(**self.parser.ns)
         }, list(grp))
     subLevs = reduce(set.union, map(self._parseParentChildRelsR, children),
                      set())
     if levels is not None:
         for parent in levels:
             for child in subLevs:
                 self.adj.add((parent, child))
         subLevs = set(levels)
     return subLevs
Пример #22
0
def render_skeleton(skeleton='skeleton-rb', data=None):
	data.update({'skeletondir': os.path.join(SCRIPTPARENT, skeleton)})
	start_dir = os.path.join(data['skeletondir'], '{{name}}')
	
	files_skel = map(lambda p: os.path.relpath(p, start=start_dir), 
		filter(lambda p: os.path.isfile(p), glob.glob(start_dir + '/**/*',
		recursive=True)  + glob.glob(start_dir + '/**/.*', recursive=True)))
	inouts = {}
	for skelX in files_skel:
		inouts[skelX] = re.sub('\.mustache$', '', pystache.render(
			os.path.join(CUR_DIR, data['name'], skelX), data))
	print('... {0} files processing ...'.format(len(inouts)))
	
	for dirX in [os.path.dirname(pathX) for pathX in set(inouts.values())]:
		if not os.path.exists(os.path.join(CUR_DIR, data['name'], dirX)):
			os.makedirs(os.path.join(CUR_DIR, data['name'], dirX))
	for src, dst in inouts.items():
		with open(os.path.join(CUR_DIR, data['name'], dst), 'w+') as fOut, open(os.path.join(start_dir, src)) as fIn:
			fOut.write(pystache.render(fIn.read(), data))
	
	print('Post rendering message')
	os.chdir(data['name'])
	os.system('python choices/post_render.py')
 def write(self):
     print('\nFamily Analysis:')
     for species in self.species:
         gids = filter(lambda gid:self.parser.mapGeneToSpecies(gid)==species,
             self.parser.getGeneIds())
         gids.sort(cmp=lambda x,y:len(self._gene2copies[species][x]) - len(self._gene2copies[species][y]) )
         coveredFams = set(map(lambda x: self._gene2fam.get(x,None), gids))
         print("{} - {} of {} sub-families covered".
             format(species, len(coveredFams), 
                 len(coveredFams)+ len(self._famWhereLost[species])))
         for gid in gids:
             if len(self._gene2copies[species][gid])<=0:
                 print(" {}: n/a (singleton not in any family)".format(
                     self.parser.mapGeneToXRef(gid, self.XRefTag)))
             else:
                 args = dict(gXref=self.parser.mapGeneToXRef(gid,self.XRefTag),
                     famId=self._gene2fam[gid],
                     cnt=len(self._gene2copies[species][gid]),
                     sib=";".join([self.parser.mapGeneToXRef(z,self.XRefTag) 
                         for z in self._gene2copies[species][gid]]))
                 print(" {gXref}: {famId} ({cnt}): {sib}".format(**args))
         for fam in self._famWhereLost[species]:
             print(" n/a: {} (0) no member in subfamily".format(fam))
Пример #24
0
 def MultiResponseParsers(self):
   return filter(self._IsSupported, MULTI_RESPONSE_PARSER_FACTORY.CreateAll())
Пример #25
0
 def MultiFileParsers(self):
   return filter(self._IsSupported, MULTI_FILE_PARSER_FACTORY.CreateAll())
Пример #26
0
 def SingleFileParsers(self):
   return filter(self._IsSupported, SINGLE_FILE_PARSER_FACTORY.CreateAll())
Пример #27
0
 def get_content_models(cls):
     """
     Return all Page subclasses.
     """
     is_content_model = lambda m: m is not Page and issubclass(m, Page)
     return list(filter(is_content_model, models.get_models()))
Пример #28
0
 def SingleResponseParsers(self):
     # TODO: Apparently, pytype does not understand that we use
     # `filter` from the `future` package (which returns an iterator), instead of
     # builtin one which in Python 2 returns lists.
     return filter(self._IsSupported,
                   SINGLE_RESPONSE_PARSER_FACTORY.CreateAll())  # pytype: disable=bad-return-type
Пример #29
0
    def __init__(self,
                 base_fd,
                 handlers,
                 pathspec=None,
                 progress_callback=None):
        super(RegistryFile, self).__init__(base_fd,
                                           handlers=handlers,
                                           pathspec=pathspec,
                                           progress_callback=progress_callback)

        self.value = None
        self.value_type = winreg.REG_NONE
        self.hive = None
        self.hive_name = None
        self.local_path = None
        self.last_modified = 0
        self.is_directory = True
        self.fd = None

        if base_fd is None:
            self.pathspec.Append(pathspec)
        elif base_fd.IsDirectory():
            self.pathspec.last.path = utils.JoinPath(self.pathspec.last.path,
                                                     pathspec.path)
        else:
            raise IOError(
                "Registry handler can not be stacked on another handler.")

        path_components = list(filter(None,
                                      self.pathspec.last.path.split("/")))
        try:
            # The first component MUST be a hive
            self.hive_name = path_components[0]
            self.hive = KeyHandle(getattr(winreg, self.hive_name))
        except AttributeError:
            raise IOError("Unknown hive name %s" % self.hive_name)
        except IndexError:
            # A hive is not specified, we just list all the hives.
            return

        # Normalize the path casing if needed
        self.key_name = "/".join(path_components[1:])
        self.local_path = CanonicalPathToLocalPath(self.key_name)

        try:
            # Maybe its a value
            key_name, value_name = os.path.split(self.local_path)
            with OpenKey(self.hive, key_name) as key:
                self.value, self.value_type = QueryValueEx(key, value_name)

            # TODO: Registry-VFS has issues when keys and values of the
            # same name exist. ListNames() does not work for a key, if a value of the
            # same name exists. The original assumption was: "We are a value and
            # therefore not a directory". This is false, since the Registry can have
            # a key and a value of the same name in the same parent key.
            self.is_directory = False
        except OSError:
            try:
                # Try to get the default value for this key
                with OpenKey(self.hive, self.local_path) as key:

                    # Check for default value.
                    try:
                        self.value, self.value_type = QueryValueEx(key, "")
                    except OSError:
                        # Empty default value
                        self.value = ""
                        self.value_type = winreg.REG_NONE

            except OSError:
                raise IOError("Unable to open key %s" % self.key_name)
Пример #30
0
def partition_t(wrap, func):
    return Transformation(
        'partition({0})'.format(name(func)), lambda sequence: wrap(
            (wrap(filter(func, sequence)),
             wrap(filter(lambda val: not func(val), sequence)))), None)
Пример #31
0
Файл: base.py Проект: google/grr
  def Open(cls, fd, component, handlers, pathspec=None, progress_callback=None):
    """Try to correct the casing of component.

    This method is called when we failed to open the component directly. We try
    to transform the component into something which is likely to work.

    In this implementation, we correct the case of the component until we can
    not open the path any more.

    Args:
      fd: The base fd we will use.
      component: The component we should open.
      handlers: A mapping from rdf_paths.PathSpec.PathType to classes
        implementing VFSHandler.
      pathspec: The rest of the pathspec object.
      progress_callback: A callback to indicate that the open call is still
        working but needs more time.

    Returns:
      A file object.

    Raises:
      IOError: If nothing could be opened still.
    """
    # The handler for this component
    try:
      handler = handlers[component.pathtype]
    except KeyError:
      raise UnsupportedHandlerError(component.pathtype)

    # We will not do any case folding unless requested.
    if component.path_options == rdf_paths.PathSpec.Options.CASE_LITERAL:
      return handler(base_fd=fd, pathspec=component, handlers=handlers)

    path_components = client_utils.LocalPathToCanonicalPath(component.path)
    path_components = ["/"] + list(filter(None, path_components.split("/")))
    for i, path_component in enumerate(path_components):
      try:
        if fd:
          new_pathspec = fd.MatchBestComponentName(path_component)
        else:
          new_pathspec = component
          new_pathspec.path = path_component

        # The handler for this component
        try:
          handler = handlers[new_pathspec.pathtype]
        except KeyError:
          raise UnsupportedHandlerError(new_pathspec.pathtype)

        fd = handler(
            base_fd=fd,
            handlers=handlers,
            pathspec=new_pathspec,
            progress_callback=progress_callback)
      except IOError as e:
        # Can not open the first component, we must raise here.
        if i <= 1:
          raise IOError("File not found: {}".format(component))

        # Do not try to use TSK to open a not-found registry entry, fail
        # instead. Using TSK would lead to confusing error messages, hiding
        # the fact that the Registry entry is simply not there.
        if component.pathtype == rdf_paths.PathSpec.PathType.REGISTRY:
          raise IOError("Registry entry not found: {}".format(e))

        # Insert the remaining path at the front of the pathspec.
        pathspec.Insert(
            0,
            path=utils.JoinPath(*path_components[i:]),
            pathtype=rdf_paths.PathSpec.PathType.TSK)
        break

    return fd
Пример #32
0
 def SingleResponseParsers(self):
     return filter(self._IsSupported,
                   SINGLE_RESPONSE_PARSER_FACTORY.CreateAll())
Пример #33
0
 def collect_listenable_properties(dct):
     return list(filter(lambda item: isinstance(item[1], listenable_property_base), iteritems(dct)))
Пример #34
0
def sync_contacts(contacts, create_missing=True, quiet=True):
    """
    contacts is a list of dictionaries like this:
        [{
            u'E-Mail': u'*****@*****.**',
            u'Gender': 2,
            u'First Name': u'Admin',
            u'Last Name': u'von Total Berlin',
            ...
        }, ...]

    The dictionary keys are mapped to emarsys field ids using
    settings.EMARSYS_FIELDS, which can be generated with `get_fields()`.
    Fields in settings.EMARSYS_CREATE_ONLY_FIELDS are not sent when updating a
    contact.
    """

    def log_debug(message):
        if not quiet:
            print("{}\n".format(message))

    def chunked(it, n):
        """
        From http://stackoverflow.com/a/8991553
        """
        it = iter(it)
        while True:
            chunk = tuple(slice(it, n))
            if not chunk:
                return
            yield chunk

    total_updated = 0
    total_created = 0

    # emails of contacts that couldn't be updated because they don't exist at
    # emarsys
    missing_contacts = []

    # emails of contacts that couldn't be updated or created due to an error at
    # emarsys
    failed_contacts = []

    contacts = map(_transform_contact_data, contacts)

    # Filter contact data using whitelist
    if settings.EMARSYS_RECIPIENT_WHITELIST is not None:
        contacts = filter(lambda contact: contact[3]  # 3=email
                          in settings.EMARSYS_RECIPIENT_WHITELIST, contacts)

    update_contacts, create_contacts = tee(contacts, 2)

    # Filter out fields in create_only_fields for updating
    create_only_field_ids = [settings.EMARSYS_FIELDS[field_name][0]
                             for field_name in
                             settings.EMARSYS_CREATE_ONLY_FIELDS]
    update_contacts = [{k: v for k, v in contact.items()
                        if k not in create_only_field_ids}
                       for contact in update_contacts]

    # Update contacts
    for chunk_of_contacts in chunked(update_contacts, BATCH_SIZE):
        log_debug("Updating a chunk of {} users."
                  .format(len(chunk_of_contacts)))

        num_successful, errors = _update_contacts(chunk_of_contacts)
        log_debug('{} users updated, {} users errored.'
                  .format(num_successful, len(errors)))

        total_updated += num_successful

        missing_contacts.extend(email
                                for email, error_dict in errors.items()
                                if '2008' in error_dict)
        failed_contacts.extend((email, error_dict)
                               for email, error_dict in errors.items()
                               if '2008' not in error_dict)

    if create_missing:
        # Find contacts to create in original contact list
        create_contacts = filter(lambda contact: contact[3] in
                                 missing_contacts, create_contacts)

        # Create contacts
        for chunk_of_contacts in chunked(create_contacts, BATCH_SIZE):
            log_debug("Creating a chunk of {} users."
                      .format(len(chunk_of_contacts)))

            num_successful, errors = _create_contacts(chunk_of_contacts)
            log_debug('{} users created, {} users errored.'
                      .format(num_successful, len(errors)))

            total_created += num_successful

            failed_contacts.extend((email, error_dict)
                                   for email, error_dict in errors.items())

        # All contacts were either updated or the update or create failed.
        missing_contacts = []

    return total_updated, total_created, missing_contacts, failed_contacts
Пример #35
0
 def translate2jp(actress):
     return list(filter(lambda x: x, spawn_many(
         [Task(source.translate2jp, actress) for source in ActressTranslate.sources_en2jp]
     ).wait_for_one_finished()))[0]
Пример #36
0
Файл: vfs.py Проект: x35029/grr
    def Open(cls, fd, component, pathspec=None, progress_callback=None):
        """Try to correct the casing of component.

    This method is called when we failed to open the component directly. We try
    to transform the component into something which is likely to work.

    In this implementation, we correct the case of the component until we can
    not open the path any more.

    Args:
      fd: The base fd we will use.
      component: The component we should open.
      pathspec: The rest of the pathspec object.
      progress_callback: A callback to indicate that the open call is still
        working but needs more time.

    Returns:
      A file object.

    Raises:
      IOError: If nothing could be opened still.
    """
        # The handler for this component
        try:
            handler = VFS_HANDLERS[component.pathtype]
        except KeyError:
            raise IOError("VFS handler %d not supported." % component.pathtype)

        # We will not do any case folding unless requested.
        if component.path_options == rdf_paths.PathSpec.Options.CASE_LITERAL:
            return handler(base_fd=fd, pathspec=component)

        path_components = client_utils.LocalPathToCanonicalPath(component.path)
        path_components = ["/"] + list(filter(None,
                                              path_components.split("/")))
        for i, path_component in enumerate(path_components):
            try:
                if fd:
                    new_pathspec = fd.MatchBestComponentName(path_component)
                else:
                    new_pathspec = component
                    new_pathspec.path = path_component

                # The handler for this component
                try:
                    handler = VFS_HANDLERS[new_pathspec.pathtype]
                except KeyError:
                    raise IOError("VFS handler %d not supported." %
                                  new_pathspec.pathtype)

                fd = handler(base_fd=fd,
                             pathspec=new_pathspec,
                             progress_callback=progress_callback)
            except IOError as e:
                # Can not open the first component, we must raise here.
                if i <= 1:
                    raise IOError("File not found: {}".format(component))

                # Do not try to use TSK to open a not-found registry entry, fail
                # instead. Using TSK would lead to confusing error messages, hiding
                # the fact that the Registry entry is simply not there.
                if component.pathtype == rdf_paths.PathSpec.PathType.REGISTRY:
                    raise IOError("Registry entry not found: {}".format(e))

                # Insert the remaining path at the front of the pathspec.
                pathspec.Insert(0,
                                path=utils.JoinPath(*path_components[i:]),
                                pathtype=rdf_paths.PathSpec.PathType.TSK)
                break

        return fd
 def getUbiquitusFamilies(self, minCoverage=.5):
     families = self.getToplevelGroups()
     return filter(
         lambda x: len(self.getGenesPerSpeciesInFam(x)) >= minCoverage *
         len(self.getSpeciesSet()), families)
Пример #38
0
    def __init__(self, request, step, data=None, initial=None, errors=None):
        """
        Setup for each order form step which does a few things:

        - Calls OrderForm.preprocess on posted data
        - Sets up any custom checkout errors
        - Hides the discount code field if applicable
        - Hides sets of fields based on the checkout step
        - Sets year choices for cc expiry field based on current date
        """

        # ``data`` is usually the POST attribute of a Request object,
        # which is an immutable QueryDict. We want to modify it, so we
        # need to make a copy.
        data = copy(data)

        # Force the specified step in the posted data, which is
        # required to allow moving backwards in steps. Also handle any
        # data pre-processing, which subclasses may override.
        if data is not None:
            data["step"] = step
            data = self.preprocess(data)
        if initial is not None:
            initial["step"] = step

        super(OrderForm, self).__init__(request, data=data, initial=initial)
        self._checkout_errors = errors

        # Hide discount code field if it shouldn't appear in checkout,
        # or if no discount codes are active.
        settings.use_editable()
        if not (settings.SHOP_DISCOUNT_FIELD_IN_CHECKOUT and
                DiscountCode.objects.active().exists()):
            self.fields["discount_code"].widget = forms.HiddenInput()

        # Determine which sets of fields to hide for each checkout step.
        # A ``hidden_filter`` function is defined that's used for
        # filtering out the fields to hide.
        is_first_step = step == checkout.CHECKOUT_STEP_FIRST
        is_last_step = step == checkout.CHECKOUT_STEP_LAST
        is_payment_step = step == checkout.CHECKOUT_STEP_PAYMENT
        hidden_filter = lambda f: False
        if settings.SHOP_CHECKOUT_STEPS_SPLIT:
            if is_first_step:
                # Hide cc fields for billing/shipping if steps are split.
                hidden_filter = lambda f: f.startswith("card_")
            elif is_payment_step:
                # Hide non-cc fields for payment if steps are split.
                hidden_filter = lambda f: not f.startswith("card_")
        elif not settings.SHOP_PAYMENT_STEP_ENABLED:
            # Hide all cc fields if payment step is not enabled.
            hidden_filter = lambda f: f.startswith("card_")
        if settings.SHOP_CHECKOUT_STEPS_CONFIRMATION and is_last_step:
            # Hide all fields for the confirmation step.
            hidden_filter = lambda f: True
        for field in filter(hidden_filter, self.fields):
            self.fields[field].widget = forms.HiddenInput()
            self.fields[field].required = False

        # Set year choices for cc expiry, relative to the current year.
        year = now().year
        choices = make_choices(list(range(year, year + 21)))
        self.fields["card_expiry_year"].choices = choices
Пример #39
0
 def MultiResponseParsers(self):
     return filter(self._IsSupported,
                   MULTI_RESPONSE_PARSER_FACTORY.CreateAll())
Пример #40
0
 def SingleResponseParsers(self):
   return filter(self._IsSupported, SINGLE_RESPONSE_PARSER_FACTORY.CreateAll())
Пример #41
0
 def SingleFileParsers(self):
     return filter(self._IsSupported,
                   SINGLE_FILE_PARSER_FACTORY.CreateAll())
Пример #42
0
 def Filter(self, objects):
   """Returns a list of objects that pass the filter."""
   return list(filter(self.Matches, objects))
Пример #43
0
 def MultiFileParsers(self):
     return filter(self._IsSupported, MULTI_FILE_PARSER_FACTORY.CreateAll())
Пример #44
0
 def hasInts(self):
         """Return the number of items in this sequence that are numbers."""
         return len(list(filter(isInt, self._seq)))
Пример #45
0
    def Handle(self, args, token=None):
        if not args.file_path or args.file_path == "/":
            return self._GetRootChildren(args, token=token)

        if args.file_path == "fs":
            return self._GetFilesystemChildren(args)

        path_type, components = rdf_objects.ParseCategorizedPath(
            args.file_path)

        child_path_infos = data_store.REL_DB.ListChildPathInfos(
            client_id=args.client_id.ToString(),
            path_type=path_type,
            components=components,
            timestamp=args.timestamp)

        items = []

        for child_path_info in child_path_infos:
            if args.directories_only and not child_path_info.directory:
                continue

            child_item = ApiFile()
            child_item.name = child_path_info.basename

            if path_type == rdf_objects.PathInfo.PathType.OS:
                prefix = "fs/os/"
            elif path_type == rdf_objects.PathInfo.PathType.TSK:
                prefix = "fs/tsk/"
            elif path_type == rdf_objects.PathInfo.PathType.REGISTRY:
                prefix = "registry/"
            elif path_type == rdf_objects.PathInfo.PathType.TEMP:
                prefix = "temp/"

            child_item.path = prefix + "/".join(child_path_info.components)

            # TODO(hanuszczak): `PathInfo#directory` tells us whether given path has
            # ever been observed as a directory. Is this what we want here or should
            # we use `st_mode` information instead?
            child_item.is_directory = child_path_info.directory
            if child_path_info.stat_entry:
                child_item.stat = child_path_info.stat_entry
            child_item.age = child_path_info.timestamp

            if child_path_info.last_hash_entry_timestamp:
                child_item.last_collected = child_path_info.last_hash_entry_timestamp
                child_item.last_collected_size = child_path_info.hash_entry.num_bytes

            items.append(child_item)

        # TODO(hanuszczak): Instead of getting the whole list from the database and
        # then filtering the results we should do the filtering directly in the
        # database query.
        if args.filter:
            pattern = re.compile(args.filter, re.IGNORECASE)
            is_matching = lambda item: pattern.search(item.name)
            items = list(filter(is_matching, items))

        items.sort(key=lambda item: item.path)

        if args.count:
            items = items[args.offset:args.offset + args.count]
        else:
            items = items[args.offset:]

        return ApiListFilesResult(items=items)
Пример #46
0
 def get_content_models(cls):
     """
     Return all Page subclasses.
     """
     is_content_model = lambda m: m is not Page and issubclass(m, Page)
     return list(filter(is_content_model, models.get_models()))
 def getUbiquitusFamilies(self, minCoverage=.5):
     families = self.getToplevelGroups();
     return filter(
         lambda x:len(self.getGenesPerSpeciesInFam(x))>=minCoverage*len(self.getSpeciesSet()), 
         families)
Пример #48
0
    def __init__(self, request, step, data=None, initial=None, errors=None):
        """
        Setup for each order form step which does a few things:

        - Calls OrderForm.preprocess on posted data
        - Sets up any custom checkout errors
        - Hides the discount code field if applicable
        - Hides sets of fields based on the checkout step
        - Sets year choices for cc expiry field based on current date
        """

        # ``data`` is usually the POST attribute of a Request object,
        # which is an immutable QueryDict. We want to modify it, so we
        # need to make a copy.
        data = copy(data)

        # Force the specified step in the posted data, which is
        # required to allow moving backwards in steps. Also handle any
        # data pre-processing, which subclasses may override.
        if data is not None:
            data["step"] = step
            data = self.preprocess(data)
        if initial is not None:
            initial["step"] = step

        super(OrderForm, self).__init__(request, data=data, initial=initial)
        self._checkout_errors = errors

        # Hide discount code field if it shouldn't appear in checkout,
        # or if no discount codes are active.
        settings.use_editable()
        if not (settings.SHOP_DISCOUNT_FIELD_IN_CHECKOUT
                and DiscountCode.objects.active().exists()):
            self.fields["discount_code"].widget = forms.HiddenInput()

        # Determine which sets of fields to hide for each checkout step.
        # A ``hidden_filter`` function is defined that's used for
        # filtering out the fields to hide.
        is_first_step = step == checkout.CHECKOUT_STEP_FIRST
        is_last_step = step == checkout.CHECKOUT_STEP_LAST
        is_payment_step = step == checkout.CHECKOUT_STEP_PAYMENT
        hidden_filter = lambda f: False
        if settings.SHOP_CHECKOUT_STEPS_SPLIT:
            if is_first_step:
                # Hide cc fields for billing/shipping if steps are split.
                hidden_filter = lambda f: f.startswith("card_")
            elif is_payment_step:
                # Hide non-cc fields for payment if steps are split.
                hidden_filter = lambda f: not f.startswith("card_")
        elif not settings.SHOP_PAYMENT_STEP_ENABLED:
            # Hide all cc fields if payment step is not enabled.
            hidden_filter = lambda f: f.startswith("card_")
        if settings.SHOP_CHECKOUT_STEPS_CONFIRMATION and is_last_step:
            # Hide all fields for the confirmation step.
            hidden_filter = lambda f: True
        for field in filter(hidden_filter, self.fields):
            self.fields[field].widget = forms.HiddenInput()
            self.fields[field].required = False
        if settings.SHOP_ALWAYS_SAME_BILLING_SHIPPING:
            for field in self.fields:
                if field == 'same_billing_shipping' or field.startswith(
                        'shipping_'):
                    self.fields[field].widget = forms.HiddenInput()
                    self.fields[field].required = False

        # Set year choices for cc expiry, relative to the current year.
        year = now().year
        choices = make_choices(list(range(year, year + 21)))
        self.fields["card_expiry_year"].choices = choices
Пример #49
0
 def Filter(self, objects):
     """Returns a list of objects that pass the filter."""
     return list(filter(self.Matches, objects))
Пример #50
0
 def MultiFileParsers(self):
     # TODO: See above.
     return filter(self._IsSupported, MULTI_FILE_PARSER_FACTORY.CreateAll())  # pytype: disable=bad-return-type
Пример #51
0
Файл: ppo.py Проект: lns/dapo
    def __init__(self, base_net, ob_space, ac_space, device, build_actor_net,
                 input_data):
        print('adv_est: %s' % str(FLAGS.adv_est))
        print('adv_coef: %s' % str(FLAGS.adv_coef))
        print('adv_off: %s' % str(FLAGS.adv_off))
        print('reg_coef: %s' % str(FLAGS.reg_coef))
        print('ent_coef: %s' % str(FLAGS.ent_coef))
        print('cbarD: %s' % str(FLAGS.cbarD))
        print('vf_coef: %s' % str(FLAGS.vf_coef))
        print('max_grad_norm: %s' % str(FLAGS.max_grad_norm))
        print('rnn: %s' % str(FLAGS.rnn))
        print('value_loss: %s' % str(FLAGS.value_loss))
        print('gamma: %s' % str(FLAGS.gamma))
        print('batch_size: %s' % str(FLAGS.batch_size))
        print('nlstm: %s' % str(FLAGS.nlstm))

        rollout_len = FLAGS.rollout_len
        reg_coef = FLAGS.reg_coef
        batch_size = FLAGS.batch_size

        if FLAGS.rnn:
            if build_actor_net:
                actor_net = LSTMNet(ob_space=ob_space,
                                    nbatch=1,
                                    base_net=base_net,
                                    input_data=None,
                                    reuse=False,
                                    rollout_len=1,
                                    nlstm=FLAGS.nlstm)
            with tf.device(device):
                train_net = LSTMNet(ob_space=ob_space,
                                    nbatch=batch_size,
                                    base_net=base_net,
                                    input_data=input_data,
                                    reuse=True,
                                    rollout_len=rollout_len,
                                    nlstm=FLAGS.nlstm)
        else:
            if build_actor_net:
                actor_net = FFNet(ob_space=ob_space,
                                  nbatch=1,
                                  base_net=base_net,
                                  input_data=None,
                                  reuse=False)
            with tf.device(device):
                train_net = FFNet(ob_space=ob_space,
                                  nbatch=batch_size,
                                  base_net=base_net,
                                  input_data=input_data,
                                  reuse=True)
        if build_actor_net:
            actor_head = PGHead(ac_space, n_v=1, reuse=False)
            actor_model = make_policy(actor_net, actor_head)
        with tf.device(device):
            train_head = PGHead(ac_space, n_v=1, reuse=True)
            train_model = make_policy(train_net, train_head)

            if FLAGS.rnn:
                X, A, ADV, R, OLDVPRED, RWD, OLDNEGLOGPAC, WEIGHT, M, S = input_data
            else:
                X, A, ADV, R, OLDVPRED, RWD, OLDNEGLOGPAC, WEIGHT = input_data
            W = tf.maximum(WEIGHT, 1e-2)

            # Placeholders
            #LR = tf.placeholder(tf.float32, [])
            CLIPRANGE = tf.placeholder(tf.float32, [])
            # Placeholders feeder
            #lr = lambda f: (FLAGS.final_lr + f * (FLAGS.base_lr - FLAGS.final_lr))
            #cliprange = lambda f: f * 0.2
            cliprange = 0.2
            #lr = as_func(lr)
            cliprange = as_func(cliprange)

            mean_return = tf.reduce_mean(R / W)
            neglogpac = train_model.head.pd.neglogp(A)
            entropy = tf.reduce_mean(train_model.head.pd.entropy() / W)
            ratio = tf.exp(
                tf.clip_by_value(OLDNEGLOGPAC - neglogpac, -10.0, 10.0))
            static_ratio = tf.minimum(tf.stop_gradient(ratio), 3.0)
            assert batch_size % rollout_len == 0
            nrollout = batch_size // rollout_len
            seq_ratio = batch_to_seq(ratio, nrollout, rollout_len, flat=True)
            if FLAGS.rnn:
                MS = train_net.ms  # TODO: This is a hack!

            # Preprocess adv
            # TODO: These operations can be further moved to get_data() to speed up the training.
            if FLAGS.rnn:
                V, Q = vtrace(batch_to_seq(RWD,
                                           nrollout,
                                           rollout_len,
                                           flat=True),
                              batch_to_seq(train_model.head.vf,
                                           nrollout,
                                           rollout_len,
                                           flat=True),
                              seq_ratio,
                              MS,
                              gam=FLAGS.gamma,
                              cbar=1.0,
                              rhobar=1.0,
                              input_R=batch_to_seq(R,
                                                   nrollout,
                                                   rollout_len,
                                                   flat=True))
                V = tf.stop_gradient(seq_to_batch(V, flat=True))
                Q = tf.stop_gradient(seq_to_batch(Q, flat=True))
            else:
                V = (R - ADV) + static_ratio * ADV  # V_t + rho * Adv
                Q = R

            vpred = train_model.head.vf

            # normalize ADV
            if FLAGS.adv_est == 'off':
                adv = ADV / (tf.sqrt(
                    tf.maximum(tf.reduce_mean(tf.square(ADV)), 1e-8)) + 1e-4)
                adv = tf.stop_gradient(FLAGS.adv_coef * (adv - FLAGS.adv_off))
            elif FLAGS.adv_est == 'on':
                adv = (Q - vpred) / (tf.sqrt(
                    tf.maximum(tf.reduce_mean(tf.square(Q - vpred)), 1e-8)) +
                                     1e-4)
                adv = tf.stop_gradient(FLAGS.adv_coef * (adv - FLAGS.adv_off))
            else:
                raise NotImplementedError

            if FLAGS.value_loss == 'vanilla':
                vf_losses = tf.square(vpred - R)
                vf_loss = .5 * tf.reduce_mean(static_ratio * vf_losses / W)
            elif FLAGS.value_loss == 'clipped':
                vpredclipped = OLDVPRED + tf.clip_by_value(
                    train_model.head.vf - OLDVPRED, -CLIPRANGE, CLIPRANGE)
                vf_losses1 = tf.square(vpred - R)
                vf_losses2 = tf.square(vpredclipped - R)
                vf_loss = .5 * tf.reduce_mean(
                    static_ratio * tf.maximum(vf_losses1, vf_losses2) / W)
            elif FLAGS.value_loss == 'vtrace':
                vf_losses = tf.square(vpred - V)
                vf_loss = .5 * tf.reduce_mean(static_ratio * vf_losses / W)
            else:
                raise RuntimeError("Unknown value_loss: '%s'" %
                                   FLAGS.value_loss)

            if FLAGS.policy_loss == 'pg':
                pg_losses = -adv * (-neglogpac)
            elif FLAGS.policy_loss == 'pgis':
                pg_losses = -adv * ratio
            elif FLAGS.policy_loss == 'ppo':
                pg_losses1 = -adv * ratio
                pg_losses2 = -adv * tf.clip_by_value(ratio, 1.0 - CLIPRANGE,
                                                     1.0 + CLIPRANGE)
                pg_losses = tf.maximum(pg_losses1, pg_losses2)
            elif FLAGS.policy_loss == 'ppg':
                pg_losses = 0
            elif FLAGS.policy_loss == 'acer':
                pg_losses1 = -adv * tf.stop_gradient(
                    tf.minimum(ratio, FLAGS.acer_c)) * (-neglogpac)
                pg_losses2 = -adv * tf.stop_gradient(
                    tf.maximum(0.0,
                               (ratio - FLAGS.acer_c) / ratio)) * (-neglogpac)
                pg_losses = pg_losses1 + pg_losses2
            elif FLAGS.policy_loss == 'acerg':
                pg_losses = 0
            elif FLAGS.policy_loss == 'sil':
                pg_losses = tf.maximum(adv, 0.0) * neglogpac
            elif FLAGS.policy_loss == 'marwil':
                pg_losses = tf.exp(tf.clip_by_value(adv, -3.0,
                                                    3.0)) * neglogpac
            elif FLAGS.policy_loss == 'vrmarwil':
                pg_losses = tf.exp(tf.clip_by_value(adv, -3.0,
                                                    3.0)) * neglogpac + ratio
            else:
                raise RuntimeError("Unknown policy_loss: '%s'" %
                                   FLAGS.policy_loss)

            # \nabla_\theta D(\mu_\pi, \mu_t) = reg * \nabla_\theta \mu_\pi
            if FLAGS.reg == 'Entropy':
                reg = -neglogpac
            elif FLAGS.reg == 'KL':
                reg = OLDNEGLOGPAC - neglogpac
            elif FLAGS.reg == 'rKL':  # reverse KL
                # We can add a constant 1 here, because ratio's gradient's expectation is zero
                reg = 1 - tf.exp(
                    tf.clip_by_value(neglogpac - OLDNEGLOGPAC, -3.0, 3.0))
            elif FLAGS.reg == 'INF':  # Implicitly Normalized Forecaster
                reg = tf.exp(tf.clip_by_value(
                    0.5 * OLDNEGLOGPAC, -3.0, 3.0)) - tf.exp(
                        tf.clip_by_value(0.5 * neglogpac, -3.0, 3.0))
            elif FLAGS.reg == 'Hellinger':  # Hellinger distance. TODO: Check this
                reg = 1 - tf.exp(
                    tf.clip_by_value(0.5 *
                                     (neglogpac - OLDNEGLOGPAC), -3.0, 3.0))
            elif FLAGS.reg == 'TV':  # Total Variance
                reg = 0.5 * tf.exp(tf.clip_by_value(-OLDNEGLOGPAC, -3.0,
                                                    3.0)) * tf.sign(ratio - 1)
            else:
                raise RuntimeError("Unknown reg: '%s'" % FLAGS.reg)
            if FLAGS.rnn:
                reg = truncIS(batch_to_seq(reg,
                                           nrollout,
                                           rollout_len,
                                           flat=True),
                              seq_ratio,
                              MS,
                              gam=FLAGS.gamma,
                              cbar=FLAGS.cbarD,
                              rhobar=1.0)
                reg = seq_to_batch(reg, flat=True)
            # TODO: This is a hack
            if FLAGS.policy_loss == 'ppg':
                augadv = adv - FLAGS.reg_coef * tf.stop_gradient(reg)
                reg_losses1 = -augadv * ratio
                reg_losses2 = -augadv * tf.clip_by_value(
                    ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE)
                reg_losses = tf.maximum(reg_losses1, reg_losses2)
            elif FLAGS.policy_loss == 'acerg':
                augadv = adv - FLAGS.reg_coef * tf.stop_gradient(reg)
                reg_losses1 = -augadv * tf.stop_gradient(
                    tf.minimum(ratio, FLAGS.acer_c)) * (-neglogpac)
                reg_losses2 = -augadv * tf.stop_gradient(
                    tf.maximum(0.0,
                               (ratio - FLAGS.acer_c) / ratio)) * (-neglogpac)
                reg_losses = reg_losses1 + reg_losses2
            else:
                reg_losses = FLAGS.reg_coef * ratio * tf.stop_gradient(reg)
            #
            pi_loss = tf.reduce_mean((pg_losses + reg_losses) / W)
            approxkl = .5 * tf.reduce_mean(
                tf.square(neglogpac - OLDNEGLOGPAC) / W)
            #clipfrac = tf.reduce_mean(tf.to_float(tf.greater((ratio - 1.0)*tf.sign(ADV), CLIPRANGE)))
            loss = pi_loss - entropy * FLAGS.ent_coef + vf_loss * FLAGS.vf_coef
            with tf.variable_scope('model'):
                params = tf.trainable_variables()
            grads = tf.gradients(loss, params)
            if FLAGS.max_grad_norm is not None:
                grads, _grad_norm = tf.clip_by_global_norm(
                    grads, FLAGS.max_grad_norm)
            #grads = group_allreduce(grads, group_key=0, merge_op='Add', final_op='Div')
            # Remove none due to unconnected graph
            self.grads_v = list(
                filter(lambda x: (x[0] is not None), zip(grads, params)))
            #self.grads_v = list(zip(grads, params))
            #trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)
            #self.train_op = trainer.apply_gradients(self.grads_v)

        # Stats
        self.loss_vars = [pi_loss, vf_loss, entropy, approxkl, mean_return]
        self.loss_names = [
            'policy_loss', 'value_loss', 'policy_entropy', 'approxkl',
            'mean_return'
        ]

        def feeds(update, nupdates):
            frac = 1.0 - (update - 1.0) / nupdates
            cliprangenow = cliprange(frac)
            td_map = {CLIPRANGE: cliprangenow}
            return td_map

        if build_actor_net:
            self.names = actor_model.names
        self.feeds = feeds