def merge_txt(raw_ann: AnnotationData, no_type: AnnType) -> AnnotationData:
    """Return a normalized (lines merged) version of ``raw_ann``:
    '\n\n' in *.txt are the real lines, and are parsed as ''.
    So to create real lines, we merge all the strings between ''.
    """

    merged_ann: AnnotationData = AnnotationData(
        annotation_difficulty=raw_ann.annotation_difficulty,
        annotations=[])
    types: Set = set()
    lines: List[str] = []
    i: int = 0

    for virtual_line in raw_ann.annotations:
        if virtual_line.text == '':
            i += 1
            # remove no_type from the virtual lines when part of the line was
            # annotated
            if len(types) > 1:
                types.discard(no_type)
            merged_ann.annotations.append(AnnotatedLine(line_num=i,
                                                        types=types,
                                                        text=' '.join(lines)))
            types = set()
            lines = []
        else:
            types |= virtual_line.types
            lines.append(virtual_line.text)

    return merged_ann
Exemple #2
0
 def __init__(self):
     self.field_s: Set[FieldModel] = set()
     # Related fields that shouldn't be targeted for randomization
     self.dnr_field_s: Set[FieldModel] = set()
     self.constraint_s: Set[ConstraintModel] = set()
     self.field_l = None
     self.all_field_l = None
def findAthletesToInsert(resultsFile, urlAthleteCheck):
    table = []
    with open(resultsFile, mode="r") as f:
        for line in f:
            table.append(line.split(";"))

    firstNameId = table[0].index("FirstName")
    lastNameId = table[0].index("LastName")
    birthYearId = table[0].index("Yob")
    clubCodeId = table[0].index("ClubCode")
    clubNameId = table[0].index("ClubName")
    genderId = table[0].index("Gender")
    notExistingAthletes = set()
    existingAthletes = set()
    for l in table[1:10000]:
        if l[clubCodeId] == tvuID or l[clubNameId] == tvuName:
            fullName = l[firstNameId] + " " + l[lastNameId]
            birthYear = l[birthYearId]
            if not (fullName, birthYear) in notExistingAthletes and not (
                    fullName, birthYear) in existingAthletes:
                if not checkAthleteExists(urlAthleteCheck, fullName,
                                          birthYear):
                    notExistingAthletes.add((fullName, birthYear, l[genderId]))
                else:
                    existingAthletes.add((fullName, birthYear))
    exportCSV(
        notExistingAthletes, "notExistingAthletes{}.csv".format(
            datetime.datetime.now().strftime("%Y%m%d%H%M")))
    if len(notExistingAthletes) == 0:
        print("\n\nReady For Insertation. All Athletes are in the DB\n\n")
    return notExistingAthletes
 def __init__(self, *args, **kwargs):
     fields = kwargs.pop('fields', None)
     super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs)
     if fields is not None:
         allowed = set(fields)
         existing = set(self.fields)
         for field_name in existing - allowed:
             self.fields.pop(field_name)
Exemple #5
0
 def post(self, request):
     leaf_names = request.POST.getlist('leaf_name')
     results = []
     for leaf_name in leaf_names:
         result = Plant.objects.filter(leafchars__chars=leaf_name)
         if len(results):
             results = list(set(result) & set(results))
         else:
             results = result
     print(results)
     return render(request, 'index.html', context={'results': results})
def load_all_fields(db, month: int, year: int, required_fields, **kwargs):
    assert len(required_fields) > 0
    db.market_quote_cache.create_index([('tag', pymongo.ASCENDING)], unique=True)
    uber_df = None
    impute_fields = kwargs.get('impute_fields', set())
    market = kwargs.get('market', 'asx')
    for field_name in required_fields:
        print("Constructing matrix: {} {}-{}".format(field_name, month, year))
        rows = load_field_dataframe(db, field_name, month, year)
        if len(rows) == 0:
            continue
        df = pd.DataFrame.from_records(rows)
        df = df.pivot(index='fetch_date', columns='asx_code', values='field_value')
        if field_name in impute_fields: # some fields will break the viewer if too many NaN
            before = df.isnull().sum().sum()
            print(f"Imputing {field_name} - before {before} missing values")
            df = df.fillna(method='pad').fillna(method='backfill')
            after = df.isnull().sum().sum()
            print(f"After imputation {field_name} - now {after} missing values")
        rows = []
        for asx_code, series in df.iteritems():
            for fetch_date, field_value in series.iteritems():
                rows.append({ 'asx_code': asx_code, 'fetch_date': fetch_date, 'field_value': field_value, 'field_name': field_name})
        #print(rows)
        
        if uber_df is None:
            uber_df = pd.DataFrame.from_records(rows)
        else:
            uber_df = uber_df.append(rows)
        if df.isnull().values.any():
            dates_with_missing = set([df.loc[the_date].isnull().any() for the_date in df.index])
            today = datetime.strftime(datetime.today(), "%Y-%m-%d")
            if today in dates_with_missing:
                print(f"WARNING: today's {field_name} matrix contains missing data! Continuing anyway.")
                print(df.loc[today].isnull().values.any())
                # FALLTHRU...
        tag = "{}-{:02d}-{}-{}".format(field_name, month, year, market)
        #save_dataframe(db, df, tag, field_name, status, market, scope, compression='gzip', n_days=len(df), n_stocks=len(df.columns))

    all_stocks = set(uber_df['asx_code'])
    print("Detected {} stocks during month ({} datapoints total)".format(len(all_stocks), len(uber_df)))
    uber_tag = "uber-{:02d}-{}-{}".format(month, year, market)
    all_fields = set(uber_df['field_name'])
    # TODO FIXME: should we drop missing values from the uber parquet? might save space... since we will get them back on load
    #print(all_fields)
    #print(required_fields)
    assert all_fields == set(required_fields)

    print("% missing values: ", ((uber_df.isnull() | uber_df.isna()).sum() * 100 / uber_df.index.size).round(2))
    n_stocks = uber_df['asx_code'].nunique()
    n_days = uber_df['fetch_date'].nunique()
    print(uber_df)
    save_dataframe(db, uber_df, uber_tag, 'uber', n_days=n_days, n_stocks=n_stocks, **kwargs)
Exemple #7
0
    def run_game(self):
        # run rounds until there is a winner
        while (len(self.players) - len(self.inactive_players)) > 1:
            self.run_round()
            self.current_round += 1
        # If there is only one player left, do some formatting
        if (len(self.players) - len(self.inactive_players)) == 1:
            winner = list(set(self.players) - set(self.inactive_players))
            self.eliminated_players.insert(0, [winner[0].get_name()])

        # sort list of disqualified players
        self.disqualified_players.sort()
        return self.eliminated_players, self.disqualified_players
Exemple #8
0
 def __init__(self, rng):
     # TODO: need access to the random state
     super().__init__()
     self._pass = 0
     self._field_s = set()
     self._active_constraint = None
     self._active_randset = None
     self._randset_s: Set[RandSet] = set()
     self._randset_field_m: Dict[FieldModel,
                                 RandSet] = {}  # map<field,randset>
     self._constraint_s: List[ConstraintModel] = []
     self._used_rand = True
     self._in_generator = False
     self.active_cp = None
     self._rng = rng
  def test_filtering_no_tags(self):
    a = self.make_target('a', tags=['tag1'])
    b = self.make_target('b', tags=['tag1', 'tag2'])
    c = self.make_target('c', tags=['tag2'])

    filtered_targets = TargetFiltering(set()).apply_tag_blacklist([a, b, c])
    self.assertEqual([a, b, c], filtered_targets)
Exemple #10
0
def handle_nick(data, match, client, channels):
    """
    When a user changes their nick, tell everyone in all the channels they're in
    about it.

    '^NICK (?P<nick>.*)'

    :type data: str
    :type match: dict
    :type client: Client
    :type channels: list
    """

    newnick = match['nick']

    logger.info("Set new user's nick to '{newnick}'", newnick=newnick)

    client.send(Protocol.Nick.response(client.nick, newnick))

    announce = Protocol.Nick.announce(client, newnick)
    for cl in set(chain.from_iterable(chan.clients for chan in client.channels)):
        if cl is not client:
            cl.send(announce)

    client.nick = newnick
Exemple #11
0
    def __init__(self, rng):
        # TODO: need access to the random state
        super().__init__()
        self._pass = 0
        self._field_m = {}
        self._field_l = []
        self._active_constraint = None
        self._active_randset = None

        # Tracks the randsets added due to
        # dependencies against  ordered fields
        self._active_order_randset_s = set()

        self._randset_m: Dict[RandSet, int] = {}
        self._randset_l = []
        self._randset_field_m: Dict[FieldModel,
                                    RandSet] = {}  # map<field,randset>
        self._constraint_s: List[ConstraintModel] = []
        self._soft_priority = 0
        self._used_rand = True
        self._in_generator = False
        self.active_cp = None
        self._rng = rng

        self._order_m = {}
        self._expr2fm = Expr2FieldVisitor()
Exemple #12
0
    def FindWordWithMaxFrequency(self):
        # convert all text to lowercase
        doc_text = self.DataSourcetext.lower()

        # remove numbers
        doc_text = re.sub(r'\d+', '', doc_text)

        # remove punctuation, characters and whitespaces
        doc_text = re.sub('\W+', ' ', doc_text)

        # remove stopwords and tokenize
        stop_words = set(stopwords.words('english'))
        tokens = word_tokenize(doc_text)
        result = [i for i in tokens if not i in stop_words]

        wordlist = []

        for word in result:
            if len(word) > 2:
                wordlist.append(word)

        wordfreq = []
        for w in wordlist:
            wordfreq.append(wordlist.count(w))

        return max(list(zip(wordlist, wordfreq)), key=itemgetter(1))[0]
Exemple #13
0
def set(arg, default=None):
    val_list = list(arg, default=default)

    if val_list is not None:
        return builtins.set(val_list)
    else:
        return val_list
Exemple #14
0
def ListMethods(dev):
    """List user-callable methods for the device.

    Example:

    >>> ListMethods(phi)
    """
    dev = session.getDevice(dev, Device)
    items = []
    listed = builtins.set()

    def _list(cls):
        if cls in listed:
            return
        listed.add(cls)
        for name, (args, doc, mcls, is_user) in sorted(cls.methods.items()):
            if cls is mcls and is_user:
                items.append((dev.name + '.' + name + args, cls.__name__, doc))
        for base in cls.__bases__:
            if issubclass(base, (Device, DeviceMixinBase)):
                _list(base)

    _list(dev.__class__)
    dev.log.info('Device methods:')
    printTable(('method', 'from class', 'description'), items,
               session.log.info)
Exemple #15
0
 def __init__(self, global_context, raw_msg):
     """Parse the message, extracts and decode all headers and all
     text parts.
     """
     super(Message, self).__init__(global_context)
     self.raw_msg = self.translate_line_breaks(raw_msg)
     self.msg = email.message_from_string(self.raw_msg)
     self.headers = _Headers()
     self.raw_headers = _Headers()
     self.addr_headers = _Headers()
     self.name_headers = _Headers()
     self.mime_headers = _Headers()
     self.received_headers = _Headers()
     self.raw_mime_headers = _Headers()
     self.header_ips = _Headers()
     self.text = ""
     self.raw_text = ""
     self.uri_list = set()
     self.score = 0
     self.rules_checked = dict()
     self.interpolate_data = dict()
     self.plugin_tags = dict()
     # Data
     self.sender_address = ""
     self.hostname_with_ip = list()
     self._parse_message()
     self._hook_parsed_metadata()
Exemple #16
0
    def __init__(self, order=-1):
        self.order = order
        self.field_s: Set[FieldModel] = set()
        self.field_rand_l = []
        self.all_field_l = []

        self.constraint_s: Set[ConstraintModel] = set()
        self.constraint_l: List[ConstraintModel] = []
        self.soft_constraint_s: Set[ConstraintModel] = set()
        self.soft_constraint_l: List[ConstraintModel] = []
        self.soft_priority = 0
        self.dist_field_m = {}

        # List of fields in each ordered set
        # Only non-none if order constraints impact this randset
        self.rand_order_l = None
Exemple #17
0
 def __init__(self, global_context, raw_msg):
     """Parse the message, extracts and decode all headers and all
     text parts.
     """
     super(Message, self).__init__(global_context)
     self.raw_msg = self.translate_line_breaks(raw_msg)
     self.msg = email.message_from_string(self.raw_msg)
     self.headers = _Headers()
     self.raw_headers = _Headers()
     self.addr_headers = _Headers()
     self.name_headers = _Headers()
     self.mime_headers = _Headers()
     self.received_headers = list()
     self.raw_mime_headers = _Headers()
     self.header_ips = _Headers()
     self.text = ""
     self.raw_text = ""
     self.uri_list = set()
     self.score = 0
     self.rules_checked = dict()
     self.interpolate_data = dict()
     self.plugin_tags = dict()
     # Data
     self.sender_address = ""
     self.hostname_with_ip = list()
     self.internal_relays = []
     self.external_relays = []
     self.last_internal_relay_index = 0
     self.last_trusted_relay_index = 0
     self.trusted_relays = []
     self.untrusted_relays = []
     self._parse_message()
     self._hook_parsed_metadata()
Exemple #18
0
def maximumSum(a, m):
    #    logger.debug("a {} m {}".format(a, m))
    suma_mod = partial(lambda m, x, y: (x % m + y % m) % m, m)
    resta_mod = partial(lambda m, x, y: (x - y + m) % m, m)

    ord_set = OrderedSet()
    unord_set = set()
    acum = 0
    r = 0
    for n in a:
        acum = suma_mod(acum, n)
        logger.debug("acm {}".format(acum))
        sig = ord_set.find_gt(acum)
        optim = acum
        if sig:
            optim = resta_mod(acum, sig)
            logger.debug("optimus {}".format(optim))
        if optim > r:
            r = optim
        if acum not in unord_set:
            ord_set.add(acum)
            unord_set.add(acum)


#            todos = list(map(lambda n:n.key, ord_set))
#            todos_s = sorted(todos)
#            assert todos == todos_s, "esperado {} obtenido {}".format(todos_s, todos)
#        logger.debug("r es {} ord set {}".format(r, list(map(lambda n:n.key, ord_set))))

#    rt=fuerza_bruta(a, m)
#    assert r==rt, "esperado {} real {}".format(r,rt)
    return r
Exemple #19
0
  def _get_update_artifact_cache_work(self, vts_artifactfiles_pairs):
    """Create a Work instance to update an artifact cache, if we're configured to.

    vts_artifactfiles_pairs - a list of pairs (vts, artifactfiles) where
      - vts is single VersionedTargetSet.
      - artifactfiles is a list of paths to artifacts for the VersionedTargetSet.
    """
    cache = self._cache_factory.get_write_cache()
    if cache:
      if len(vts_artifactfiles_pairs) == 0:
        return None
        # Do some reporting.
      targets = set()
      for vts, _ in vts_artifactfiles_pairs:
        targets.update(vts.targets)

      self._report_targets(
        'Caching artifacts for ',
        list(targets),
        '.',
        logger=self.context.log.debug,
      )

      always_overwrite = self._cache_factory.overwrite()

      # Cache the artifacts.
      args_tuples = []
      for vts, artifactfiles in vts_artifactfiles_pairs:
        overwrite = always_overwrite or vts.cache_key in self._cache_key_errors
        args_tuples.append((cache, vts.cache_key, artifactfiles, overwrite))

      return Work(lambda x: self.context.subproc_map(call_insert, x), [(args_tuples,)], 'insert')
    else:
      return None
Exemple #20
0
def handle_nick(data, match, client, channels):
    """
    When a user changes their nick, tell everyone in all the channels they're in
    about it.

    '^NICK (?P<nick>.*)'

    :type data: str
    :type match: dict
    :type client: Client
    :type channels: list
    """

    newnick = match['nick']

    logger.info("Set new user's nick to '{newnick}'", newnick=newnick)

    client.send(Protocol.Nick.response(client.nick, newnick))

    announce = Protocol.Nick.announce(client, newnick)
    for cl in set(chain.from_iterable(chan.clients
                                      for chan in client.channels)):
        if cl is not client:
            cl.send(announce)

    client.nick = newnick
Exemple #21
0
    def __resolve_single(self, from_idtype, to_idtype, ids):
        from_mappings = self.mappers.get(from_idtype, {})
        to_mappings = from_mappings.get(to_idtype, [])
        if not to_mappings:
            _log.warn("cannot find mapping from %s to %s", from_idtype,
                      to_idtype)
            return [None for _ in ids]

        def apply_mapping(mapper, ids: List[str]):
            # Each mapper can define if it preserves the order of the incoming ids.
            if hasattr(mapper, "preserves_order") and mapper.preserves_order:
                return mapper(ids)
            else:
                # If this is not the case, we need to map every single id separately
                return [mapper([id])[0] for id in ids]

        if len(to_mappings) == 1:
            # single mapping no need for merging
            return apply_mapping(to_mappings[0], ids)

        # two way to preserve the order of the results
        r = [[] for _ in ids]
        rset = [set() for _ in ids]
        for mapper in to_mappings:
            mapped_ids = apply_mapping(mapper, ids)
            for mapped_id, rlist, rhash in zip(mapped_ids, r, rset):
                for id in mapped_id:
                    if id not in rhash:
                        rlist.append(id)
                        rhash.add(id)
        return r
Exemple #22
0
def run_election(vp_file: open) -> {str}:
    ballot_count = 1
    vd = read_voter_preferences(vp_file)
    print("Preferences: voter -> [candidates in order]")
    print(dict_as_str(vd))
    rc = {i for i in list(vd.values())[0]}
    while len(rc) != 1:
        if len(list(vd.values())[0]) <= ballot_count:
            break
        votes = evaluate_ballot(vd, rc)
        print(
            f'Vote count on ballot #{ballot_count}: candidates (sorted alphabetically) using only candidates in set {rc}'
        )
        for i, j in sorted(votes.items()):
            print(f'{i} -> {j}')
        print(
            f'Vote count on ballot #{ballot_count}: candidates (sorted numerically) using only candidates in set {rc}'
        )
        for i, j in sorted(votes.items(), key=(lambda x: x[1]), reverse=True):
            print(f'{i} -> {j}')
        rc = remaining_candidates(votes)
        ballot_count += 1
    print(f'Election winner is {set(list(rc)[0])}' if len(rc) == 1 else
          'Not any unique winner: all remaining candidates on the ballot tie')
    return rc if len(rc) == 1 else set()
Exemple #23
0
 def __init__(self, datasets):
     """
     Parameters
     ----------
     datasets: List[dataset]
         List of Datasets objects. Currently restricted to those generated by 'absorbance', 'fluorescence'
         and 'luminescence' operations
     """
     operation_set = set([ds.operation for ds in datasets])
     if len(operation_set) > 1:
         raise RuntimeError("Input Datasets must all be of the same type.")
     self.operation = operation_set.pop()
     if self.operation not in ["absorbance", "fluorescence", "luminescence"]:
         raise RuntimeError("%s has to be of type absorbance, fluorescence or luminescence" % self.operation)
     super(Spectrophotometry, self).__init__(datasets)
     # Assume that well names are consistent across all runs
     ref_dataset = datasets[0]
     ref_container = ref_dataset.container
     # Check if well_map is defined
     if len(ref_container.well_map) != 0:
         self.properties = pd.DataFrame.from_dict(ref_container.well_map, orient='index')
     else:
         self.properties = pd.DataFrame.from_dict({ref_container.container_type.robotize(x): x
                                                   for x in ref_dataset.data.columns
                                                   if x not in ["GAIN"]},
                                                  orient='index')
     self.properties.columns = ['name']
     self.properties.insert(1, "column", (self.properties.index % ref_container.container_type.col_count))
     self.properties.insert(1, "row", (self.properties.index // ref_container.container_type.col_count))
     self.properties.row = self.properties.row.apply(lambda x: "ABCDEFGHIJKLMNOPQRSTUVWXYZ"[x])
     self.properties.index = [ref_container.container_type.humanize(int(x)) for x in list(self.properties.index)]
Exemple #24
0
 def hasCombat(self):
     playerset = set()
     for va in self.vaisseaux:
         playerset.add(va.player)
     if len(playerset) > 1:
         return True
     return False
Exemple #25
0
    def search(self, from_idtype, to_idtype, query, max_results=None):
        """
        Searches for matches in the names of the given idtype.
        This operation does not resolve transitive mappings.
        :param query:
        :param max_results
        :return:
        """
        from_mappings = self.mappers.get(from_idtype, {})
        to_mappings = from_mappings.get(to_idtype, [])
        to_mappings = [m for m in to_mappings if hasattr(m, "search")]

        if not to_mappings:
            _log.warn("cannot find mapping from %s to %s", from_idtype,
                      to_idtype)
            return []

        if len(to_mappings) == 1:
            # single mapping no need for merging
            return to_mappings[0].search(query, max_results)

        rset = set()
        for mapper in to_mappings:
            results = mapper.search(query, max_results)
            for r in results:
                rset.add(r)
        return list(rset)
    def test_filtering_no_tags(self):
        a = self.make_target('a', tags=['tag1'])
        b = self.make_target('b', tags=['tag1', 'tag2'])
        c = self.make_target('c', tags=['tag2'])

        filtered_targets = TargetFiltering(set()).apply_tag_blacklist(
            [a, b, c])
        self.assertEqual([a, b, c], filtered_targets)
Exemple #27
0
def set(
    env_var: builtins.str,
    default: Optional[Set[Any]] = None,
) -> Optional[Set[Any]]:
    if env_var is None or env_var == "":
        return default

    return builtins.set(env_var.split(","))
Exemple #28
0
def dirty(f, *args, **kwargs):
    """ Dirty the cache key for the function invoked with the given parameters. """

    # delete the key containing the cached value for this function
    version = f.__version
    shard = f.__shard
    sorted_parameters = _sorted_parameters(f, *args, **kwargs)
    keys_to_delete = builtins.set([_cache_key(f, sorted_parameters, version=version)])

    # if we have keys to be excluded, then dirty all of those as well
    if hasattr(f, '__wildcard'):
        included_parameters = [e for e in sorted_parameters if e[0] not in f.__wildcard]
        included_keys_key = _cache_key(f, included_parameters, INCLUDED_KEYS_PREFIX, version=version)
        keys = get(included_keys_key, shard) or builtins.set()
        keys_to_delete |= keys

    return delete_multi(keys_to_delete, shard)
Exemple #29
0
def merge_txt(raw_ann: List[Annotated_line], no_type: AnnType,
              prelude_type: AnnType) -> List[Annotated_line]:
    """
    Return a normalized (lines merged) version of ``raw_ann``
    '\n\n' in *.txt are the real lines, and are parsed as ''.
    So we merge all the strings between ''
    """

    merged_ann: List[Annotated_line] = []
    types: Set = set()
    lines: List = []
    i: int = 0

    for virtual_line in raw_ann:
        if virtual_line.text == '':
            i += 1
            # remove no_type from the virtual lines when part of the line was
            # annotated
            if len(types) > 1:
                types.discard(no_type)
            merged_ann += [
                Annotated_line(line_num=i, types=types, text=' '.join(lines))
            ]
            types = set()
            lines = []
        else:
            types |= virtual_line.types
            lines.append(virtual_line.text)

    def has_no_type(ann_line: Annotated_line) -> bool:
        return {no_type} == ann_line.types

    # add the prelude_type to all the first untyped lines
    for_prelude_ann: List[Annotated_line] = list(
        itertools.takewhile(has_no_type, merged_ann))
    with_prelude_ann: List[Annotated_line] = list(
        map(
            lambda x: Annotated_line(
                line_num=x.line_num, types={prelude_type}, text=x.text),
            for_prelude_ann))
    tail_ann: List[Annotated_line] = list(
        itertools.dropwhile(has_no_type, merged_ann))
    merged_ann = list(with_prelude_ann) + (list(tail_ann))

    return merged_ann
Exemple #30
0
def set_toadd(i, n):
    toAdd = set()
    for idx in n.machine.nodes:
        if idx != i:
            toAdd.add(idx)
    if not n.hidden:
        for idx in conf_type.pubNodes:
            if idx != i:
                toAdd.add(idx)
    return toAdd
    def __init__(self, path, name):
        self.path = path
        self.name = name
        self.folders = set()
        self.toc = Toc()
        self.last_updated = 0
        self.ignore_updates = False

        # add the addon name as a primary folder
        self.folders.add(name)
Exemple #32
0
 def __init__(self, file):
     with open(file , "r") as f:
         config_json = json.load(f)
     base_stations = config_json.get("base_stations")
     if not base_stations:
         raise "No base stations defined"
     self._bssid_set = set(base_stations)
     self._known_addrs = config_json.get("known_addresses")
     if not self._known_addrs:
         self._known_addrs = {}
Exemple #33
0
  def _filter_targets(self, targets):
    included_targets = TargetFilter.scoped_instance(self).apply(targets)
    excluded_targets = set(targets).difference(included_targets)

    if excluded_targets:
      self.context.log.info("{} target(s) excluded".format(len(excluded_targets)))
      for target in excluded_targets:
        self.context.log.debug("{} excluded".format(target.address.spec))

    return included_targets
Exemple #34
0
 def if_row():
     checkRow = splitList(gridsL, 9)
     for i in range(9):
         poslist = checkRow[i]
         for j in range(9):
             poslist[j] = abs(int(poslist[j]))
         sl = list(set(poslist))
         sl += [0] * (poslist.count(0) - 1)
         if len(poslist) != len(sl):
             return False
     return True
Exemple #35
0
 def __init__(self, username, password, page_id, output):
     if USE_VIRTUAL_DISPLAY:
         self.display = Display(visible=0, size=(1366, 768))
         self.display.start()
     self.driver = get_driver(username, password)
     self.page_id = page_id
     self.output = output
     self.post_count = 0
     self.post_id_old = set()
     self.current_year = datetime.datetime.now().year
     self.scrap_page(FB_HOME + self.page_id)
Exemple #36
0
 def known_idtypes(self):
     """
     returns a set of a all known id types in this mapping graph
     :return:
     """
     s = set()
     for from_, v in self.mappers.items():
         s.add(from_)
         for to_ in list(v.keys()):
             s.add(to_)
     return s
Exemple #37
0
        def wrapper(*args, **kwargs):
            if not hasattr(f, '__version'):
                f.__version = version

            if not hasattr(f, '__shard'):
                f.__shard = shard

            # check if value already exists in cache
            sorted_parameters = _sorted_parameters(f, *args, **kwargs)
            key = _cache_key(f, sorted_parameters, version=version)
            cached = get(key, shard)
            if cached is not None:
                return cached

            # value is not cached, so compute and store
            result = f(*args, **kwargs)
            set(key, result, shard, timeout=timeout)

            # if this function has specified a list of keys to wildcard, then we need to store a separate
            # list mapping a key without those parameters to a list of keys with those parameters
            included_keys_key = None
            if wildcard:
                # keep track of the excluded parameters so we have it when we dirty later
                wildcard_set = builtins.set(wildcard)
                if not hasattr(f, '__wildcard'):
                    f.__wildcard = wildcard_set

                # get a list of parameters without the keys designated excluded
                included_parameters = [e for e in sorted_parameters if e[0] not in wildcard_set]
                included_keys_key = _cache_key(f, included_parameters, INCLUDED_KEYS_PREFIX, version=version)
                included_keys = get(included_keys_key, shard)

                # add the current key to that set of keys
                if not included_keys or not isinstance(included_keys, builtins.set):
                    included_keys = builtins.set()
                included_keys.add(key)
                set(included_keys_key, included_keys, shard)

            return result
Exemple #38
0
    def _parse_message(self):
        """Parse the message."""
        self._hook_check_start()
        # Dump the message raw headers
        for name, raw_value in self.msg._headers:
            self.raw_headers[name].append(raw_value)

        # XXX This is strange, but it's what SA does.
        # The body starts with the Subject header(s)
        body = list(self.get_decoded_header("Subject"))
        raw_body = list()
        for payload, part in self._iter_parts(self.msg):
            # Extract any MIME headers
            for name, raw_value in part._headers:
                self.raw_mime_headers[name].append(raw_value)
            text = None
            if payload is not None:
                # this must be a text part
                self.uri_list.update(set(URL_RE.findall(payload)))
                if part.get_content_subtype() == "html":
                    text = self.normalize_html_part(payload.replace("\n", " "))
                    text = " ".join(text)
                    body.append(text)
                    raw_body.append(payload)
                else:
                    text = payload.replace("\n", " ")
                    body.append(text)
                    raw_body.append(payload)
            self._hook_extract_metadata(payload, text, part)
        self.text = " ".join(body)
        self.raw_text = "\n".join(raw_body)
        self._parse_sender()
        received_headers = self.get_decoded_header("Received")
        for header in self.ctxt.conf["originating_ip_headers"]:
            headers = ["X-ORIGINATING-IP: %s" % x
                       for x in self.get_decoded_header(header)]
            received_headers.extend(headers)
        received_obj = ReceivedParser(received_headers)
        self.received_headers = received_obj.received
        self._parse_relays(self.received_headers)

        try:
            self._create_plugin_tags(self.received_headers[0])
        except IndexError:
            pass

        for header in self.received_headers:
            self.hostname_with_ip.append((header["rdns"], header["ip"]))
Exemple #39
0
def disconnect(devices=None):
	"""
	"""
	
	global targets
	remove_devices = devices and devices or list(targets)
	targets = targets - builtins.set(remove_devices)
	
	# This means disconnect from all already established connections
	if not devices:
		global available_connections
		for k, v in available_connections.items():
			pass
			# close socket in available_connections[k]["socket"]

		available_connections.clear()
			
	# print("disconnect {}".format(connections))
Exemple #40
0
def modified(date=None, etag=None):
    """
    Checks to see if the page has been modified since the version in the
    requester's cache.
    
    When you publish pages, you can include `Last-Modified` and `ETag`
    with the date the page was last modified and an opaque token for
    the particular version, respectively. When readers reload the page, 
    the browser sends along the modification date and etag value for
    the version it has in its cache. If the page hasn't changed, 
    the server can just return `304 Not Modified` and not have to 
    send the whole page again.
    
    This function takes the last-modified date `date` and the ETag `etag`
    and checks the headers to see if they match. If they do, it returns 
    `True`, or otherwise it raises NotModified error. It also sets 
    `Last-Modified` and `ETag` output headers.
    """
    try:
        from builtins import set
    except ImportError:
        # for python 2.3
        from sets import Set as set

    n = set([x.strip('" ') for x in web.ctx.env.get('HTTP_IF_NONE_MATCH', '').split(',')])
    m = net.parsehttpdate(web.ctx.env.get('HTTP_IF_MODIFIED_SINCE', '').split(';')[0])
    validate = False
    if etag:
        if '*' in n or etag in n:
            validate = True
    if date and m:
        # we subtract a second because 
        # HTTP dates don't have sub-second precision
        if date-datetime.timedelta(seconds=1) <= m:
            validate = True
    
    if date: lastmodified(date)
    if etag: web.header('ETag', '"' + etag + '"')
    if validate:
        raise web.notmodified()
    else:
        return True
Exemple #41
0
    def _parse_message(self):
        """Parse the message."""
        self._hook_check_start()
        # Dump the message raw headers
        for name, raw_value in self.msg._headers:
            self.raw_headers[name].append(raw_value)

        # XXX This is strange, but it's what SA does.
        # The body starts with the Subject header(s)
        body = list(self.get_decoded_header("Subject"))
        raw_body = list()
        for payload, part in self._iter_parts(self.msg):
            # Extract any MIME headers
            for name, raw_value in part._headers:
                self.raw_mime_headers[name].append(raw_value)
            text = None
            if payload is not None:
                # this must be a text part
                self.uri_list.update(set(URL_RE.findall(payload)))
                if part.get_content_subtype() == "html":
                    text = self.normalize_html_part(payload.replace("\n", " "))
                    text = " ".join(text)
                    body.append(text)
                    raw_body.append(payload)
                else:
                    text = payload.replace("\n", " ")
                    body.append(text)
                    raw_body.append(payload)
            self._hook_extract_metadata(payload, text, part)
        self.text = " ".join(body)
        self.raw_text = "\n".join(raw_body)
        self._parse_sender()

        for value in self.get_received_headers("Received"):
            if 'from' in value:
                hostname = value.split(' ')[1]
                ip = IPFRE.search(value).group()
                clean_ip = ip.strip("[ ]();\n")
                try:
                    self.hostname_with_ip.append((hostname, clean_ip))
                except ValueError:
                    continue
Exemple #42
0
def disconnect(devices=None):
    """[summary]
    
    [description]
    
    Keyword Arguments:
        devices {[type]} -- [description] (default: {None})
    """
    
    # Disconnect from selected devices or all targets   
    global targets

    remove_devices = devices and devices or list(targets)

    # Update target set for future operations
    targets = targets - builtins.set(remove_devices)
    
    # StreamWriters are closed only if no devices are specified (disconnect from all)
    if not devices:

        global async_event_loop
        global available_connections

        for gw in available_connections.keys():
            s = available_connections[gw]["streams"]

            try:
                async_event_loop.run_until_complete(_close_stream(s[1]))

            except Exception as e:
                print("Exception while closing streams: {}".format(e))

        available_connections.clear()

        
        try:
            async_event_loop.stop()
            async_event_loop.close()

        except Exception as e:
            print("Disconnect: {}".format(e))
Exemple #43
0
  def __init__(self, context, workdir):
    """Subclass __init__ methods, if defined, *must* follow this idiom:

    class MyTask(Task):
      def __init__(self, *args, **kwargs):
        super(MyTask, self).__init__(*args, **kwargs)
        ...

    This allows us to change Task.__init__()'s arguments without
    changing every subclass. If the subclass does not need its own
    initialization, this method can (and should) be omitted entirely.

    :API: public
    """
    super(TaskBase, self).__init__()
    self.context = context
    self._workdir = workdir

    self._task_name = type(self).__name__
    self._cache_key_errors = set()
    self._cache_factory = CacheSetup.create_cache_factory_for_task(self)
    self._force_invalidated = False
Exemple #44
0
    def _parse_message(self):
        """Parse the message."""
        self._hook_check_start()
        # Dump the message raw headers

        self.ctxt.log.debug("EMAIL %s", self.raw_msg)
        for line in self.raw_msg:
            self.ctxt.log.debug("LINE %s", line)
            if not email.feedparser.headerRE.match(line):
                # If we saw the RFC defined header/body separator
                # (i.e. newline), just throw it away. Otherwise the line is
                # part of the body so push it back.
                if not email.feedparser.NLCRE.match(line):
                    self.missing_header_body_separator = True
                break

        for name, raw_value in self.msg._headers:
            self.raw_headers[name].append(raw_value)

        # XXX This is strange, but it's what SA does.
        # The body starts with the Subject header(s)
        body = list(self.get_decoded_header("Subject"))
        raw_body = list()
        for payload, part in self._iter_parts(self.msg):
            if not part._headers:
                self.missing_boundary_header = True
            # Extract any MIME headers
            for name, raw_value in part._headers:
                self.raw_mime_headers[name].append(raw_value)
            text = None
            if payload is not None:
                # this must be a text part
                self.uri_list.update(set(URL_RE.findall(payload)))
                if part.get_content_subtype() == "html":
                    text = self.normalize_html_part(payload.replace("\n", " "))
                    text = " ".join(text)
                    body.append(text)
                    raw_body.append(payload)
                else:
                    text = payload.replace("\n", " ")
                    body.append(text)
                    raw_body.append(payload)
            self._hook_extract_metadata(payload, text, part)
        self.text = " ".join(body)
        self.raw_text = "\n".join(raw_body)

        received_headers = self.get_decoded_header("Received")
        for header in self.ctxt.conf["originating_ip_headers"]:
            headers = ["X-ORIGINATING-IP: %s" % x
                       for x in self.get_decoded_header(header)]
            received_headers.extend(headers)
        received_obj = ReceivedParser(received_headers)
        self.received_headers = received_obj.received
        self._parse_relays(self.received_headers)
        self._parse_sender()

        try:
            self._create_plugin_tags(self.received_headers[0])
        except IndexError:
            pass

        for header in self.received_headers:
            self.hostname_with_ip.append((header["rdns"], header["ip"]))
Exemple #45
0
import builtins
import itertools

from pyname import pyname

try:
	targets
except NameError:
	targets = builtins.set()

try:
	available_connections 
except NameError:
	available_connections = dict()


try:
	properties
except NameError:
	properties = dict()



def connect(devices, rbac_token, timeout_s):
	"""
	"""

	# Make a copy of the generator
	devices, devices_copy = itertools.tee(devices)

	global targets	
 def apply(self, targets):
   exclude_tags = set(self.get_options().exclude_tags)
   return TargetFiltering(exclude_tags).apply_tag_blacklist(targets)
Exemple #47
0
    def plot(self, wells="*", groupby=None, title=None, xlabel=None, ylabel=None, max_legend_len=20):
        """
        This generates a plot of the kinetics curve. Note that this function is meant for use under a Jupyter notebook
        environment

        Example Usage:

        .. code-block:: python

            from transcriptic.analysis.kinetics import Spectrophotometry
            growth_curve = Spectrophotometry(myRun.data.Datasets)
            growth_curve.plot(wells=["A1", "A2", "B1", "B2"])
            growth_curve.plot(wells=["A1", "A2", "B1", "B2"], groupby="row", title="Row Groups")
            growth_curve.plot(wells=["A1", "A2", "B1", "B2"], groupby="name", ylabel="Absorbance Units")
            growth_curve.plot(groupby="name", max_legend_len=40)

        Parameters
        ----------
        wells: Optional[list or str]
            If not specified, this plots all the wells associated with the Datasets given. Otherwise, specifiy
            a list of well indices (["A1", "B1"]) or a specific well ("A1")
        groupby: Optional[str]
            When specified, this groups the wells with the same property value together. On the plot, each group will
            be represented by a single curve with the mean values and error bars of 1 std. dev. away from the mean
        title: Optional[str]
            Plot title. Default: "Kinectics Curve (`run-id`)"
        xlabel: Optional[str]
            Plot x-axis label. Default: "Time"
        ylabel: Optional[str]
            Plot y-axis label. Default: "`Operation` (`Wavelength`)"
        max_legend_len
            Maximum number of characters for the legend labels before truncating. Default: 20

        Returns
        -------
        IPlot
            Plotly iplot object. Will be rendered nicely in Jupyter notebook instance
        """
        # TODO: Shift init_notebook_mode() to start of notebook instance
        py.offline.init_notebook_mode()

        if isinstance(wells, str):
            if wells != "*":
                wells = [wells]
            else:
                well_readings = self.readings
                wells = list(self.properties.index)
        if isinstance(wells, list):
            well_readings = self.readings.loc[wells]

        if not groupby:
            traces = [go.Scatter(x=self.readings.columns, y=well_readings.loc[well],
                                 name=self.properties["name"].loc[well]) for well in wells]
        else:
            if groupby not in self.properties.columns:
                raise ValueError("\'%s\' not found in the properties table. Please specify a column which exists" %
                                 groupby)
            grouped = self.properties.groupby(groupby)
            index_list = [grouped.get_group(group).index for group in grouped.groups]
            reading_map = []
            for indx in index_list:
                common_set = set(well_readings.index).intersection(set(indx))
                if len(common_set) != 0:
                    reading_map.append(well_readings.loc[common_set])
            if len(reading_map) != 0:
                traces = [go.Scatter(x=self.readings.columns,
                                     y=reading.mean(),
                                     name=self._truncate_name(self.properties[groupby].loc[reading.iloc[0].name],
                                                              max_legend_len),
                                     error_y=dict(type='data', array=reading.std(), visible=True)
                                     )
                          for reading in reading_map]
            else:
                raise ValueError("No common groups found for specified groupby: %s" % groupby)

        # Assume all data is generated from the same run-id for now
        if not title:
            title = "Kinetics Curve (%s)" % self.datasets[0].attributes["instruction"]["run"]["id"]
        if not xlabel:
            xlabel = 'Time'
        if not ylabel:
            if self.operation == "absorbance":
                ylabel = "RAU (%s)" % self.datasets[0].attributes["instruction"]["operation"]["wavelength"]
            elif self.operation == "fluorescence":
                ylabel = "RFU (%s/%s)" % (self.datasets[0].attributes["instruction"]["operation"]["excitation"],
                                          self.datasets[0].attributes["instruction"]["operation"]["emission"])
            elif self.operation == "luminescence":
                ylabel = "Luminescence"

        layout = go.Layout(
            title=title,
            xaxis=dict(
                title=xlabel,
                titlefont=dict(
                    family='Courier New, monospace',
                    size=18,
                    color='#7f7f7f'
                )
            ),
            yaxis=dict(
                title=ylabel,
                titlefont=dict(
                    family='Courier New, monospace',
                    size=18,
                    color='#7f7f7f'
                )
            ),
            legend=dict(
                x=100,
                y=1
            )
        )

        fig = go.Figure(data=traces, layout=layout)
        return py.offline.iplot(fig)
Exemple #48
0
def _filter_fields(fields):
    """ Filter out invalid field names. """

    exclude = builtins.set(['__created__'])
    return [e for e in fields if e not in exclude]