Esempio n. 1
1
 def __next__(self):
     """
     Return the next error node
     """
     for child in self.children:
         yield child
     next(self.parent)
Esempio n. 2
1
def iterunique(source, key):
    # assume source is sorted
    # first need to sort the data
    it = iter(source)

    hdr = next(it)
    yield tuple(hdr)

    # convert field selection into field indices
    if key is None:
        indices = range(len(hdr))
    else:
        indices = asindices(hdr, key)

    # now use field indices to construct a _getkey function
    # N.B., this may raise an exception on short rows, depending on
    # the field selection
    getkey = operator.itemgetter(*indices)

    prev = next(it)
    prev_key = getkey(prev)
    prev_comp_ne = True

    for curr in it:
        curr_key = getkey(curr)
        curr_comp_ne = curr_key != prev_key
        if prev_comp_ne and curr_comp_ne:
            yield tuple(prev)
        prev = curr
        prev_key = curr_key
        prev_comp_ne = curr_comp_ne

    # last one?
    if prev_comp_ne:
        yield prev
Esempio n. 3
0
def get_single_if_any(iterable,
                      exception_on_multiple=Exception('More than one value '
                                                      'not allowed.')):
    '''
    Get the single item of `iterable`, if any.
    
    If `iterable` has one item, return it. If it's empty, get `None`. If it has
    more than one item, raise an exception. (Unless
    `exception_on_multiple=None`.)
    '''
    assert isinstance(exception_on_multiple, Exception) or \
                                                  exception_on_multiple is None
    iterator = iter(iterable)
    try:
        first_item = next(iterator)
    except StopIteration:
        return None
    else:
        if exception_on_multiple:
            try:
                second_item = next(iterator)
            except StopIteration:
                return first_item
            else:
                raise exception_on_multiple
        else: # not exception_on_multiple
            return first_item
Esempio n. 4
0
def test_bison_lalr_repr_automaton_lr0():
    ex = grammar_examples.lr0.ex_minimal1
    grammar = ex.grammar

    automaton = compute_automaton(grammar)
    assert repr(automaton) == '<Automaton with 4 states>'
    assert repr(automaton._data) == '''
[<StateData #0 with 1 actions, 1 gotos
  <bison.ItemSet #0, size 2
    < $accept → • Root $eof ∥ >
    < Root → • term ∥ >
>>, <StateData #1 with 1 actions, 0 gotos
  <bison.ItemSet #1, size 1
    < Root → term • ∥ { $eof } >
>>, <StateData #2 with 1 actions, 0 gotos
  <bison.ItemSet #2, size 1
    < $accept → Root • $eof ∥ >
>>, <StateData #3 with 2 actions, 0 gotos
  <bison.ItemSet #3, size 1
    < $accept → Root $eof • ∥ >
>>]
    '''.strip().replace('•', _mdot).replace('∥', _parallel)
    assert repr(automaton._data[0]._id) == '''
<StateId for <StateData #0 with 1 actions, 1 gotos
  <bison.ItemSet #0, size 2
    < $accept → • Root $eof ∥ >
    < Root → • term ∥ >
>>>
'''.strip().replace('•', _mdot).replace('∥', _parallel)
    assert repr(next(iter(automaton._data[0]._actions.values()))) == 'Shift(<state 1>)'
    assert repr(next(iter(automaton._data[1]._actions.values()))) == 'Reduce(<rule 1>)'
    assert repr(next(iter(automaton._data[0]._gotos.values()))) == 'Goto(<state 2>)'
    def _sparql_query_pattern_part(
            self,
            bind=None,
            values=None,
            indent=' ',
    ):
        assert bind is None or isinstance(bind, dict)
        assert values is None or (
            isinstance(values, dict) and
            isinstance(next(six.iterkeys(values)), Iterable) and
            isinstance(next(six.itervalues(values)), Iterable)
        )

        res = ''
        if values:
            res = indent + self._sparql_values_part(values, indent) + '\n'
        res += indent + self._sparql_triples_part(indent) + '\n'
        if bind:
            res += '%sFILTER(\n%s\n%s)\n' % (
                indent,
                ' &&\n'.join([
                    '%s %s=%s' % (indent, k.n3(), self.curify(v))
                    for k, v in sorted(bind.items())
                    if k in self.vars_in_graph
                ]),
                indent,
            )
        return res
Esempio n. 6
0
def read_tss_anno(anno, gene_filter):
	positions = {}
	g_filter = []
	c = 0
	if gene_filter:
		with open(gene_filter) as f:
			for line in f:
				line = line.rstrip()
				word = line.split("\t")
				g_filter.append(word[0])
	with open(anno) as f:
		next(f)
		for line in f:
			line = line.rstrip()
			word = line.split("\t")
			if word[1] == "MT": chrom = "chrM"
			elif word[1] == "X": chrom = "chrX"
			elif word[1] == "Y": chrom = "chrY"
			elif word[1].isdigit(): chrom = "chr" + word[1]
			if word[4] == "1": 
				strand = "+"
			else: 
				strand = "-"
			if g_filter:
				if word[0] in g_filter:
					positions[word[0]] = (chrom, int(word[2]), strand)
					c +=  1
			else:
				positions[word[0]] = (chrom, int(word[2]), strand)
				c +=  1
	return positions
Esempio n. 7
0
def is_fastq( fqpath ):
    try:
        next( SeqIO.parse( fqpath, 'fastq' ) )
        return True
    except:
        print "{} is not a fastq file".format(fqpath)
        return False
Esempio n. 8
0
def window(iter, pre_size=1, post_size=1):
	"""
	Given an iterable, return a new iterable which yields triples of
	(pre, item, post), where pre and post are the items preceeding and
	following the item (or None if no such item is appropriate). pre
	and post will always be pre_size and post_size in length.

	>>> example = window(range(10), pre_size=2)
	>>> pre, item, post = next(example)
	>>> pre
	(None, None)
	>>> post
	(1,)
	>>> next(example)
	((None, 0), 1, (2,))
	>>> list(example)[-1]
	((7, 8), 9, (None,))
	"""
	pre_iter, iter = itertools.tee(iter)
	pre_iter = itertools.chain((None,) * pre_size, pre_iter)
	pre_iter = nwise(pre_iter, pre_size)
	post_iter, iter = itertools.tee(iter)
	post_iter = itertools.chain(post_iter, (None,) * post_size)
	post_iter = nwise(post_iter, post_size)
	next(post_iter, None)
	return six.moves.zip(pre_iter, iter, post_iter)
Esempio n. 9
0
def autoscale_rule_create(cmd, client, autoscale_name, resource_group_name, condition,
                          scale, profile_name=DEFAULT_PROFILE_NAME, cooldown=5, source=None,
                          timegrain="avg 1m"):
    from azure.mgmt.monitor.models import ScaleRule, ScaleAction, ScaleDirection
    autoscale_settings = client.get(resource_group_name, autoscale_name)
    profile = next(x for x in autoscale_settings.profiles if x.name == profile_name)
    condition.metric_resource_uri = source or autoscale_settings.target_resource_uri
    condition.statistic = timegrain.statistic
    condition.time_grain = timegrain.time_grain
    rule = ScaleRule(
        metric_trigger=condition,
        scale_action=ScaleAction(
            direction=scale.direction,
            type=scale.type,
            cooldown='PT{}M'.format(cooldown),
            value=scale.value)
    )
    profile.rules.append(rule)
    autoscale_settings = client.create_or_update(resource_group_name, autoscale_name, autoscale_settings)
    profile = next(x for x in autoscale_settings.profiles if x.name == profile_name)

    # determine if there are unbalanced rules
    scale_out_rule_count = len([x for x in profile.rules if x.scale_action.direction == ScaleDirection.increase])
    scale_in_rule_count = len([x for x in profile.rules if x.scale_action.direction == ScaleDirection.decrease])
    if scale_out_rule_count and not scale_in_rule_count:
        logger.warning("Profile '%s' has rules to scale out but none to scale in. "
                       "Recommend creating at least 1 scale in rule.", profile_name)
    elif scale_in_rule_count and not scale_out_rule_count:
        logger.warning("Profile '%s' has rules to scale in but none to scale out. "
                       "Recommend creating at least 1 scale out rule.", profile_name)
    return rule
def test_pqueue_by_servicebus_client_iter_messages_simple(live_servicebus_config, partitioned_queue):
    client = ServiceBusClient(
        service_namespace=live_servicebus_config['hostname'],
        shared_access_key_name=live_servicebus_config['key_name'],
        shared_access_key_value=live_servicebus_config['access_key'],
        debug=True)

    queue_client = client.get_queue(partitioned_queue)
    with queue_client.get_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock) as receiver:

        with queue_client.get_sender() as sender:
            for i in range(10):
                message = Message("Iter message no. {}".format(i))
                sender.send(message)

        count = 0
        for message in receiver:
            print_message(message)
            message.complete()
            with pytest.raises(MessageAlreadySettled):
                message.complete()
            with pytest.raises(MessageAlreadySettled):
                message.renew_lock()
            count += 1

        with pytest.raises(InvalidHandlerState):
            next(receiver)
    assert count == 10
Esempio n. 11
0
    def test_select_best_intent_with_domain(self):
        """Test to make sure that best intent is working with domains."""
        self.engine.register_domain('Domain1')
        self.engine.register_domain('Domain2')

        # Creating first intent domain
        parser1 = IntentBuilder("Parser1").require("Entity1").build()
        self.engine.register_intent_parser(parser1, domain='Domain1')
        self.engine.register_entity("tree", "Entity1", domain='Domain1')

        # Creating second intent domain
        parser2 = IntentBuilder("Parser1").require("Entity2").build()
        self.engine.register_intent_parser(parser2, domain="Domain2")
        self.engine.register_entity("house", "Entity2", domain="Domain2")

        utterance = "Entity1 Entity2 go to the tree house"
        intents = self.engine.determine_intent(utterance, 2)

        intent = next(intents)
        assert intent
        self.assertEqual(intent['intent_type'], 'Parser1')

        intent = next(intents)
        assert intent
        self.assertEqual(intent['intent_type'], 'Parser1')
Esempio n. 12
0
    def test_post_process_extensions_generator_response(self):
        class Controller(object):
            def index(self, req, pants=None):
                return pants

        controller = Controller()
        resource = wsgi.Resource(controller)

        called = []

        def extension1(req):
            yield
            called.append(1)

        def extension2(req):
            yield
            called.append(2)
            yield 'foo'

        ext1 = extension1(None)
        next(ext1)
        ext2 = extension2(None)
        next(ext2)

        response = resource.post_process_extensions([ext2, ext1],
                                                    None, None, {})

        self.assertEqual(called, [2])
        self.assertEqual(response, 'foo')
Esempio n. 13
0
def _remove_sig(signature):
    """
    Remove the signature node from payload keeping any tail element.
    This is needed for eneveloped signatures.

    :param payload: Payload to remove signature from
    :type data: XML ElementTree Element
    :param signature: Signature to remove from payload
    :type digest_algorithm: XML ElementTree Element
    """
    try:
        signaturep = next(signature.iterancestors())
    except StopIteration:
        raise ValueError("Can't remove the root signature node")
    if signature.tail is not None:
        try:
            signatures = next(signature.itersiblings(preceding=True))
        except StopIteration:
            if signaturep.text is not None:
                signaturep.text = signaturep.text + signature.tail
            else:
                signaturep.text = signature.tail
        else:
            if signatures.tail is not None:
                signatures.tail = signatures.tail + signature.tail
            else:
                signatures.tail = signature.tail
    signaturep.remove(signature)
Esempio n. 14
0
    def select_map(self, latitude, longitude):
        """
        Find and display a nearby track by latitude / longitude
        The selection will favor a previously selected track in the nearby area
        :param latitude
        :type  latitude float
        :param longitude
        :type longitude float
        :returns the selected track, or None if there are no nearby tracks
        :type Track 
        """

        if not latitude or not longitude:
            return None

        point = GeoPoint.fromPoint(latitude, longitude)
        nearby_tracks = self.track_manager.find_nearby_tracks(point)

        saved_tracks = self.get_pref_track_selections()

        saved_nearby_tracks = [t for t in nearby_tracks if t.track_id in saved_tracks]

        # select the saved nearby track or just a nearby track
        track = next(iter(saved_nearby_tracks), None)
        track = next(iter(nearby_tracks), None) if track is None else track

        if self.track != track:
            # only update the trackmap if it's changing
            self._select_track(track)
        return track
    def __memory_info(self):
        # Get all memory info and get details with 20 depth
        size_obj = asizeof.asized(self._node, detail=20)
        whole_size = size_obj.size
        size_obj = next(r for r in size_obj.refs if r.name == '__dict__')
        size_dict = dict()
        # Sort in descending order to select most 'heavy' collections
        for num, sub_obj in enumerate(sorted(size_obj.refs, key=lambda v: v.size, reverse=True)):
            if num > 5:
                break
            size_dict[sub_obj.name] = dict()
            size_dict[sub_obj.name]['size'] = sub_obj.size

            # Check if this object (which include __dict__ and __class__) or iterable (dict, list, etc ..)
            if len(sub_obj.refs) <= 2 and any(r.name == '__dict__' for r in sub_obj.refs):
                sub_obj_ref = next(r for r in sub_obj.refs if r.name == '__dict__')
            else:
                sub_obj_ref = sub_obj

            for num, sub_sub_obj in enumerate(sorted(sub_obj_ref.refs, key=lambda v: v.size, reverse=True)):
                if num > 5:
                    break
                size_dict[sub_obj.name][sub_sub_obj.name] = sub_sub_obj.size

        result_dict = {"whole_node_size": whole_size}
        result_dict.update(size_dict)
        return {
            'Memory': result_dict
        }
Esempio n. 16
0
def decode_item(next, token):
    if token == b'i':
        # integer: "i" value "e"
        data = int(next())
        if next() != b'e':
            raise ValueError
    elif token == b's':
        # string: "s" value (virtual tokens)
        data = next()
        # Strings in torrent file are defined as utf-8 encoded
        try:
            data = data.decode('utf-8')
        except UnicodeDecodeError:
            # The pieces field is a byte string, and should be left as such.
            pass
    elif token == b'l' or token == b'd':
        # container: "l" (or "d") values "e"
        data = []
        tok = next()
        while tok != b'e':
            data.append(decode_item(next, tok))
            tok = next()
        if token == b'd':
            data = dict(list(zip(data[0::2], data[1::2])))
    else:
        raise ValueError
    return data
def test_stratified_kfold_no_shuffle():
    # Manually check that StratifiedKFold preserves the data ordering as much
    # as possible on toy datasets in order to avoid hiding sample dependencies
    # when possible
    X, y = np.ones(4), [1, 1, 0, 0]
    splits = StratifiedKFold(2).split(X, y)
    train, test = next(splits)
    assert_array_equal(test, [0, 2])
    assert_array_equal(train, [1, 3])

    train, test = next(splits)
    assert_array_equal(test, [1, 3])
    assert_array_equal(train, [0, 2])

    X, y = np.ones(7), [1, 1, 1, 0, 0, 0, 0]
    splits = StratifiedKFold(2).split(X, y)
    train, test = next(splits)
    assert_array_equal(test, [0, 1, 3, 4])
    assert_array_equal(train, [2, 5, 6])

    train, test = next(splits)
    assert_array_equal(test, [2, 5, 6])
    assert_array_equal(train, [0, 1, 3, 4])

    # Check if get_n_splits returns the number of folds
    assert_equal(5, StratifiedKFold(5).get_n_splits(X, y))
 def run(self, infile, label, outfile, existing=None):
     existing_rows = {}
     if existing:
         with open(existing, 'rU') as csvfile:
             reader = csv.reader(csvfile)
             existing_rows = dict([(x[0], x[1:]) for x in reader])
     with open(infile, 'rU') as csvfile:
         reader = csv.reader(csvfile)
         # Ignore the header
         next(reader)
         for file_name, true_val, pred_val in reader:
             if existing and file_name not in existing_rows:
                 raise Exception(
                     "The file name ({0}) doesn't exist in the existing file.".format(file_name))
             if not existing:
                 existing_rows[file_name] = ['', '']
             existing_rows[file_name] = (
                 _combine_values(existing_rows[file_name][0], true_val, label),
                 _combine_values(existing_rows[file_name][1], pred_val, label)
             )
     with open(outfile, 'w') as csvfile:
         writer = csv.writer(csvfile)
         writer.writerow(['file_name', 'true', 'predicted'])
         for k, v in existing_rows.items():
             writer.writerow((k, *v))
Esempio n. 19
0
def load_athletes(filename):
    assert(type(filename) == str and len(filename) > 0)
    athletes = []
    with open(filename) as csvfile:
        athlete_data = csv.reader(csvfile)
        next(csvfile, None)
        for row in athlete_data:
            if row[2] and row[3] and row[4]:
                row[12] = [row[12], row[13].split(', ')]
                del row[-1]
                athletes.append(row)
    """
    Loads athlete data from 'filename' into a list of tuples.

    Returns a list of tuples of each athlete's attributes, where
      the last element of each tuple is a list of events the athlete
      competed in.

    Hint: Use the csv module to read quoted fields correctly.

    1. The header line is skipped
    2. Rows are removed if missing a value for the age, height, or weight.
    3. All trailing events are placed in a list in position EVENTS_COL

    For example:
    [...,
     ['Zhaoxu Zhang', "People's Republic of China", 
      '24', '221', '110', 'M', '11/18/1987', 
      '', '0', '0', '0', '0', 
      ['Basketball', "Men's Basketball"]],
     ...
    ]
    """

    return athletes
Esempio n. 20
0
    def get_cols(self, lines):
        """Initialize the header Column objects from the table ``lines``.

        Based on the previously set Header attributes find or create the column names.
        Sets ``self.cols`` with the list of Columns.  This list only includes the actual
        requested columns after filtering by the include_names and exclude_names
        attributes.  See ``self.names`` for the full list.

        :param lines: list of table lines
        :returns: None
        """

        start_line = _get_line_index(self.start_line, self.process_lines(lines))
        if start_line is None:
            # No header line so auto-generate names from n_data_cols
            if self.names is None:
                # Get the data values from the first line of table data to determine n_data_cols
                try:
                    first_data_vals = next(self.data.get_str_vals())
                except StopIteration:
                    raise InconsistentTableError('No data lines found so cannot autogenerate column names')
                n_data_cols = len(first_data_vals)
                self.names = [self.auto_format % i for i in range(1, n_data_cols+1)]

        elif self.names is None:
            # No column names supplied so read them from header line in table.
            for i, line in enumerate(self.process_lines(lines)):
                if i == start_line:
                    break
            else: # No header line matching
                raise ValueError('No header line found in table')

            self.names = next(self.splitter([line]))
        
        self._set_cols_from_names()
Esempio n. 21
0
def get_fluence(e_0=100.0):
    """
    Returns a function representing the electron fluence with the distance in CSDA units.

    Args:
        e_0 (float): The kinetic energy whose CSDA range is used to scale the distances.

    Returns:
        A function representing fluence(x,u) with x in CSDA units.

    """
    # List of available energies
    e0_str_list = list(map(lambda x: (os.path.split(x)[1]).split(".csv")[
        0], glob(os.path.join(data_path, "fluence", "*.csv"))))
    e0_list = sorted(list(map(int, list(filter(str.isdigit, e0_str_list)))))

    e_closest = min(e0_list, key=lambda x: abs(x - e_0))

    with open(os.path.join(data_path, "fluence/grid.csv"), 'r') as csvfile:
        r = csv.reader(csvfile, delimiter=' ', quotechar='|',
                       quoting=csv.QUOTE_MINIMAL)
        t = next(r)
        x = np.array([float(a) for a in t[0].split(",")])
        t = next(r)
        u = np.array([float(a) for a in t[0].split(",")])
    t = []
    with open(os.path.join(data_path, "fluence", "".join([str(e_closest), ".csv"])), 'r') as csvfile:
        r = csv.reader(csvfile, delimiter=' ', quotechar='|',
                       quoting=csv.QUOTE_MINIMAL)
        for row in r:
            t.append([float(a) for a in row[0].split(",")])
    t = np.array(t)
    f = interpolate.RectBivariateSpline(x, u, t, kx=1, ky=1)
    # Note f is returning numpy 1x1 arrays
    return f
Esempio n. 22
0
def get_cs(e_0=100, z=74):
    """
    Returns a function representing the scaled bremsstrahlung cross_section.

    Args:
        e_0 (float): The electron kinetic energy, used to scale u=e_e/e_0.
        z (int): Atomic number of the material.

    Returns:
        A function representing cross_section(e_g,u) in mb/keV, with e_g in keV.

    """
    # NOTE: Data is given for E0>1keV. CS values below this level should be used with caution.
    # The default behaviour is to keep it constant
    with open(os.path.join(data_path, "cs/grid.csv"), 'r') as csvfile:
        r = csv.reader(csvfile, delimiter=' ', quotechar='|',
                       quoting=csv.QUOTE_MINIMAL)
        t = next(r)
        e_e = np.array([float(a) for a in t[0].split(",")])
        log_e_e = np.log10(e_e)
        t = next(r)
        k = np.array([float(a) for a in t[0].split(",")])
    t = []
    with open(os.path.join(data_path, "cs/%d.csv" % z), 'r') as csvfile:
        r = csv.reader(csvfile, delimiter=' ', quotechar='|',
                       quoting=csv.QUOTE_MINIMAL)
        for row in r:
            t.append([float(a) for a in row[0].split(",")])
    t = np.array(t)
    scaled = interpolate.RectBivariateSpline(log_e_e, k, t, kx=3, ky=1)
    m_electron = 511
    z2 = z * z
    return lambda e_g, u: (u * e_0 + m_electron) ** 2 * z2 / (u * e_0 * e_g * (u * e_0 + 2 * m_electron)) * (
        scaled(np.log10(u * e_0), e_g / (u * e_0)))
Esempio n. 23
0
 def unmunch_all(self):
     self.parse_aff()
     with codecs.open(self.dic_path, "r", "utf-8") as fp:
         next(fp)  # Skip first line.
         for line in fp:
             line = line.split(u" ")[0]
             self.apply_suffix(u"", line)
Esempio n. 24
0
    def getContents(self, group):
        if not self._checkFsEndpointPath():
            return

        logger.debug("Scanning for posts (shallow) in: %s" %
                     self.fs_endpoint_path)
        year_pattern = ShallowPostsSource.YEAR_PATTERN
        file_pattern = ShallowPostsSource.FILE_PATTERN
        _, year_dirs, __ = next(osutil.walk(self.fs_endpoint_path))
        year_dirs = [d for d in year_dirs if year_pattern.match(d)]
        for yd in year_dirs:
            if year_pattern.match(yd) is None:
                logger.warning(
                    "'%s' is not formatted as 'YYYY' and will be ignored. "
                    "Is that a typo?")
                continue
            year = int(yd)
            year_dir = os.path.join(self.fs_endpoint_path, yd)

            _, __, filenames = next(osutil.walk(year_dir))
            for f in filenames:
                match = file_pattern.match(f)
                if match is None:
                    name, ext = os.path.splitext(f)
                    logger.warning(
                        "'%s' is not formatted as 'MM-DD_slug-title.%s' "
                        "and will be ignored. Is that a typo?" % (f, ext))
                    continue
                yield self._makeContentItem(
                    os.path.join(yd, f),
                    match.group(3),
                    year,
                    int(match.group(1)),
                    int(match.group(2)))
Esempio n. 25
0
    def test_pubsub(self):

        check_pubsub = self.redis.pubsub()
        check_pubsub.psubscribe("gottwall:*")
        next(check_pubsub.listen())

        cli = RedisClient(
            private_key=private_key,
            public_key=public_key,
            project=project,
            host=HOST)
        ts = datetime.utcnow()

        self.assertEquals("gottwall:{0}:{1}:{2}".format(project, public_key, private_key), cli.channel)
        cli.incr(name="orders", value=2, timestamp=ts, filters={"current_status": "Completed"})

        message = next(check_pubsub.listen())

        self.assertEquals(message['channel'], 'gottwall:{0}:{1}:{2}'.format(project, public_key, private_key))

        notification_message = json.loads(message['data'])
        self.assertEquals(notification_message['type'], 'notification')

        data_dict = json.loads(self.redis.spop(cli.data_key))
        self.assertTrue(data_dict['name'], 'orders')
        self.assertTrue(data_dict['timestamp'], ts.strftime("%Y-%m-%dT%H:%M:%S"))
        self.assertTrue(data_dict['filters']['current_status'], 'Completed')

        self.assertEquals(self.redis.scard(cli.data_key), 0)
Esempio n. 26
0
 def load_transactions_mock(input_file, **kwargs):
     """ Mock for apyori.load_transactions. """
     eq_(kwargs['delimiter'], delimiter)
     eq_(next(input_file), inputs[0])
     yield iter(input_transactions[0])
     eq_(next(input_file), inputs[1])
     yield iter(input_transactions[1])
Esempio n. 27
0
def get_apreture_dic():
    ffline = ff.FortranRecordReader('(3X,a16,3I4,F17.4,F17.10,2F12.3,I4,2x,a16)')
    dic = {}
    with open(MIX_FILE) as f:
        for _ in range(31):
            next(f)
        # for i in range(LAST_RING_ELEMENT):
        while True:
            try:
                line = f.readline()
                if not line:
                    break
                hh = ffline.read(line)
                device_type = int(hh[2])
                if device_type == 2:
                    # this is a drift space, ignore it
                    continue
                    # name = 'DRIFT'
                else:
                    name = hh[0].strip()
                aperture = hh[6]
                if name not in dic:
                    dic.update({name: aperture})
                    # print('{},\t{},\t,{}\n'.format(name, device_type, aperture))
                    # if aperture == 0:
                    #    print(name)
            except:
                pass
    return dic
Esempio n. 28
0
def sepLine2List(file2sep, sep="-"):
    """

    :param file:
    default:combine
    """
    try:
        fhIn = open(file2sep,'r')
        all = set()
        for line in fhIn:
            if line == '':
                next()
            line = line.replace(sep,'\n').rstrip()
            line = line.split('\n')
            for i in line:
                all.add(i)
        fhIn.close()

        fhOut = open(file2sep+'_sep','w')
        for i in all:
            fhOut.write(i+'\n')
        fhOut.close()
        print("Entry number left: %s" % len(all))
    except:
        print "error in sepLine2List"
        return False
Esempio n. 29
0
    async def async_step_import(self, user_input):
        """Import a config entry."""
        if self.hass.config_entries.async_entries(DOMAIN):
            return self.async_abort(reason='already_setup')

        self._scan_interval = user_input[KEY_SCAN_INTERVAL]
        if user_input[CONF_HOST] != DOMAIN:
            self._hosts.append(user_input[CONF_HOST])

        if not await self.hass.async_add_executor_job(
                os.path.isfile, self.hass.config.path(TELLDUS_CONFIG_FILE)):
            return await self.async_step_user()

        conf = await self.hass.async_add_executor_job(
            load_json, self.hass.config.path(TELLDUS_CONFIG_FILE))
        host = next(iter(conf))

        if user_input[CONF_HOST] != host:
            return await self.async_step_user()

        host = CLOUD_NAME if host == 'tellduslive' else host
        return self.async_create_entry(
            title=host,
            data={
                CONF_HOST: host,
                KEY_SCAN_INTERVAL: self._scan_interval.seconds,
                KEY_SESSION: next(iter(conf.values())),
            })
	def readFile(self):
		file = "/Users/molgenis/Documents/graduation_project/mendelianViolationFiles/mendelian_violation_allDiploid_Xadjusted.txt"
		
		mother_DP = []
		father_DP = []
		child_DP = []
		transmissionProbability = []

		with open(file) as f:
			next(f)
			for line in f:
				if line != "":
					columns = line.split('\t')
					
					mother_DP.append(columns[6])
					father_DP.append(columns[10])
					child_DP.append(columns[14])
					transmissionProbability.append(columns[4])
				else:
					next(line)
					
		self.countThresholdMother(mother_DP)
		self.countThresholdFather(father_DP)
		self.countThresholdChild(child_DP)
		self.countTP(transmissionProbability)
Esempio n. 31
0
 def to_dict(klass, user=None, **kw):
     try:
         slug = next(PatientList.for_user(user)).get_slug()
     except StopIteration:  # No lists for this user
         slug = ''
     return {klass.slug: slug}
Esempio n. 32
0
    for i in range(10):

        d = data1[0]
        for j in data1:
            if key(j) > key(d):

                d = j
        top10.append(d)
        data1.remove(d)
    return top10


with open('quake.csv') as file:
    data = []
    reader = csv.reader(file)
    next(reader)
    for row in reader:
        data.append([float(i) for i in row])
# c1 = 0
# for i in data:
# 	if i[3]>5:
# 		c1 += 1
# print(c1)

print(get_top10(data, lambda x: x[0]))
print(get_top10(data, lambda x: x[3]))

east = [i for i in data if i[2] > 0]
print(get_top10(east, lambda x: x[0]))
print(get_top10(east, lambda x: x[3]))
Esempio n. 33
0
 def __next__(self):
     return next(self._names)
Esempio n. 34
0
#read in CSV file
pybank_csv = os.path.join("..", "Resources", "budget_data.csv")
#total = 0

totalprofits = 0
totalmonths = 0
totalchange = 0
previous = 0
maxchange = 0
maxmonth = ""
minchange = 9999999999
minmonth = ""
with open(pybank_csv) as csvfile:
    csvreader = csv.reader(csvfile, delimiter=',')

    header = next(csvreader, None)
    print(f"Header: {header}")

    for row in csvreader:
        #print(row)

        totalmonths = totalmonths + 1

        totalprofits = totalprofits + int(row[1])

        current = int(row[1])
        change = current - previous

        if change > maxchange:
            maxchange = change
            maxmonth = row[0]
Esempio n. 35
0
File: bolt.py Progetto: s0md3v/Bolt
        url = list(insecureForm.keys())[0]
        action = list(insecureForm.values())[0]['action']
        form = action.replace(target, '')
        if form:
            print ('%s %s %s[%s%s%s]%s' %
                   (bad, url, green, end, form, green, end))

print (' %s Phase: Comparing %s[%s3/6%s]%s' %
       (lightning, green, end, green, end))
uniqueTokens = set(allTokens)
if len(uniqueTokens) < len(allTokens):
    print ('%s Potential Replay Attack condition found' % good)
    print ('%s Verifying and looking for the cause' % run)
    replay = False
    for each in tokenDatabase:
        url, token = next(iter(each.keys())), next(iter(each.values()))
        for each2 in tokenDatabase:
            url2, token2 = next(iter(each2.keys())), next(iter(each2.values()))
            if token == token2 and url != url2:
                print ('%s The same token was used on %s%s%s and %s%s%s' %
                       (good, green, url, end, green, url2, end))
                replay = True
    if not replay:
        print ('%s Further investigation shows that it was a false positive.')

p = Path(__file__).parent.joinpath('db/hashes.json')
with p.open('r') as f:
    hashPatterns = json.load(f)

if not allTokens:
    print ('%s No CSRF protection to test' % bad)
    callbacks=[history, terminate_on_nan, Checkpoint]
    return callbacks

callbacks_dict = {
        path_to_tensorboard is None                         : default_callback,
        path_to_tensorboard is not None                     : tensorboard_callback,
        }

def switch_case_callbacks(x):
    return callbacks_dict[x]()

with open(sample,'rt') as x, open(encoded_output,'rt') as y:
    row_count = sum(1 for row in open(sample))
    input_reader = csv.reader(x)
    output_reader = csv.reader(y)
    first_input = next(input_reader)
    first_output = next(output_reader)
    input_dim = len(first_input)
    y_categories = len(first_output)
        
    #https://keras.io/models/sequential/#fit_generator
    def generator(encoded_input,encoded_output):
        while True:
            with open(encoded_input,'rt') as x, open(encoded_output,'rt') as y:
                for a,b in zip(x,y):
                    inputa = [int(i) for i in a.split(",")]
                    outputb = [int(j) for j in b.split(",")]
                    X_train = np.array(inputa).reshape(1,-1)
                    Y_train = np.array(outputb).reshape(1,-1)
                    yield (X_train,Y_train)
            x.close()
Esempio n. 37
0
import os

fgdb = sys.argv[0]  #full path to existing file geodatabase
pgdb_dir = sys.argv[1]  #folder to hold personal geodatabase
pgdb_name = sys.argv[2]  #name of personal geodatabase, excluding extension

pgdb = arcpy.CreatePersonalGDB_management(pgdb_dir, pgdb_name)
pgdb = pgdb.__str__()  #extract geodatabase string value for os.path.join

gdb, fds, fcs = next(arcpy.da.Walk(fgdb))
for i in fds + fcs:
    arcpy.Copy_management(os.path.join(gdb, i), os.path.join(pgdb, i))
    def init_hidden(self, batch_size):
        weight = next(self.parameters()).data
        hidden = (weight.new(self.layers, batch_size, self.hidden_dim).zero_(),
                  weight.new(self.layers, batch_size, self.hidden_dim).zero_())

        return hidden
Esempio n. 39
0
    def testIteration(self):
        # Test the complex interaction when mixing file-iteration and the
        # various read* methods.
        dataoffset = 16384
        filler = b"ham\n"
        assert not dataoffset % len(filler), \
            "dataoffset must be multiple of len(filler)"
        nchunks = dataoffset // len(filler)
        testlines = [
            b"spam, spam and eggs\n",
            b"eggs, spam, ham and spam\n",
            b"saussages, spam, spam and eggs\n",
            b"spam, ham, spam and eggs\n",
            b"spam, spam, spam, spam, spam, ham, spam\n",
            b"wonderful spaaaaaam.\n"
        ]
        methods = [("readline", ()), ("read", ()), ("readlines", ()),
                   ("readinto", (array("b", b" "*100),))]

        # Prepare the testfile
        bag = self.open(TESTFN, "wb")
        bag.write(filler * nchunks)
        bag.writelines(testlines)
        bag.close()
        # Test for appropriate errors mixing read* and iteration
        for methodname, args in methods:
            f = self.open(TESTFN, 'rb')
            self.assertEqual(next(f), filler)
            meth = getattr(f, methodname)
            meth(*args)  # This simply shouldn't fail
            f.close()

        # Test to see if harmless (by accident) mixing of read* and
        # iteration still works. This depends on the size of the internal
        # iteration buffer (currently 8192,) but we can test it in a
        # flexible manner.  Each line in the bag o' ham is 4 bytes
        # ("h", "a", "m", "\n"), so 4096 lines of that should get us
        # exactly on the buffer boundary for any power-of-2 buffersize
        # between 4 and 16384 (inclusive).
        f = self.open(TESTFN, 'rb')
        for i in range(nchunks):
            next(f)
        testline = testlines.pop(0)
        try:
            line = f.readline()
        except ValueError:
            self.fail("readline() after next() with supposedly empty "
                        "iteration-buffer failed anyway")
        if line != testline:
            self.fail("readline() after next() with empty buffer "
                        "failed. Got %r, expected %r" % (line, testline))
        testline = testlines.pop(0)
        buf = array("b", b"\x00" * len(testline))
        try:
            f.readinto(buf)
        except ValueError:
            self.fail("readinto() after next() with supposedly empty "
                        "iteration-buffer failed anyway")
        line = buf.tobytes()
        if line != testline:
            self.fail("readinto() after next() with empty buffer "
                        "failed. Got %r, expected %r" % (line, testline))

        testline = testlines.pop(0)
        try:
            line = f.read(len(testline))
        except ValueError:
            self.fail("read() after next() with supposedly empty "
                        "iteration-buffer failed anyway")
        if line != testline:
            self.fail("read() after next() with empty buffer "
                        "failed. Got %r, expected %r" % (line, testline))
        try:
            lines = f.readlines()
        except ValueError:
            self.fail("readlines() after next() with supposedly empty "
                        "iteration-buffer failed anyway")
        if lines != testlines:
            self.fail("readlines() after next() with empty buffer "
                        "failed. Got %r, expected %r" % (line, testline))
        f.close()

        # Reading after iteration hit EOF shouldn't hurt either
        f = self.open(TESTFN, 'rb')
        try:
            for line in f:
                pass
            try:
                f.readline()
                f.readinto(buf)
                f.read()
                f.readlines()
            except ValueError:
                self.fail("read* failed after next() consumed file")
        finally:
            f.close()
Esempio n. 40
0
def plot( data, **kwargs ):
    """
    Given open,high,low,close,volume data for a financial instrument (such as a stock, index,
    currency, future, option, etc.) plot the data.
    Available plots include ohlc bars, candlestick, and line plots.
    Also provide visually analysis in the form of common technical studies, such as:
    moving averages, macd, trading envelope, etc. 
    Also provide ability to plot trading signals, and/or addtional user-defined data.
    """

    dates,opens,highs,lows,closes,volumes = _check_and_prepare_data(data)

    config = _process_kwargs(kwargs, _valid_plot_kwargs())

    style = config['style']
    if isinstance(style,str):
        style = _styles._get_mpfstyle(style)

    if isinstance(style,dict):
        _styles._apply_mpfstyle(style)
    
    w,h = config['figratio']
    r = float(w)/float(h)
    if r < 0.25 or r > 4.0:
        raise ValueError('"figratio" (aspect ratio)  must be between 0.25 and 4.0 (but is '+str(r)+')')
    base      = (w,h)
    figscale  = config['figscale']
    fsize     = [d*figscale for d in base]
    
    fig = plt.figure()
    fig.set_size_inches(fsize)

    if config['volume'] and volumes is None:
        raise ValueError('Request for volume, but NO volume data.')

    # -------------------------------------------------------------
    # For now (06-Feb-2020) to keep the code somewhat simpler for
    # implementing `secondary_y` we are going to ALWAYS create
    # secondary (twinx) axes, whether we need them or not, and 
    # then they will be available to use later when we are plotting:
    # -------------------------------------------------------------

    need_lower_panel = False
    addplot = config['addplot']
    if addplot is not None:
        if isinstance(addplot,dict):
            addplot = [addplot,]   # make list of dict to be consistent
        elif not _list_of_dict(addplot):
            raise TypeError('addplot must be `dict`, or `list of dict`, NOT '+str(type(addplot)))
        for apdict in addplot:
            if apdict['panel'] == 'lower':
                need_lower_panel = True
                break

    #  fig.add_axes( [left, bottom, width, height] ) ... numbers are fraction of fig
    if need_lower_panel or config['volume']:
        ax1 = fig.add_axes( [0.15, 0.38, 0.70, 0.50] )
        ax2 = fig.add_axes( [0.15, 0.18, 0.70, 0.20], sharex=ax1 )
        plt.xticks(rotation=45) # must do this after creation of axis, and
                                # after `sharex`, but must be BEFORE any 'twinx()'
        ax2.set_axisbelow(True) # so grid does not show through volume bars.
        ax4 = ax2.twinx()
        ax4.grid(False)
    else:
        ax1 = fig.add_axes( [0.15, 0.18, 0.70, 0.70] )
        plt.xticks(rotation=45) # must do this after creation of axis, but before any 'twinx()'
        ax2 = None
        ax4 = None
    ax3 = ax1.twinx()
    ax3.grid(False)

    avg_days_between_points = (dates[-1] - dates[0]) / float(len(dates))

    # avgerage of 3 or more data points per day we will call intraday data:
    if avg_days_between_points < 0.33:  # intraday
        if mdates.num2date(dates[-1]).date() != mdates.num2date(dates[0]).date():
            # intraday data for more than one day:
            fmtstring = '%b %d, %H:%M'
        else:  # intraday data for a single day
            fmtstring = '%H:%M'
    else:  # 'daily' data (or could be weekly, etc.)
        if mdates.num2date(dates[-1]).date().year != mdates.num2date(dates[0]).date().year:
           fmtstring = '%Y-%b-%d'
        else:
           fmtstring = '%b %d'

    if config['show_nontrading']:
        formatter = mdates.DateFormatter(fmtstring)
        xdates = dates
    else:
        formatter = IntegerIndexDateTimeFormatter(dates, fmtstring)
        xdates = np.arange(len(dates))
    
    ax1.xaxis.set_major_formatter(formatter)

    ptype = config['type']

    collections = None
    if ptype == 'candle' or ptype == 'candlestick':
        collections = _construct_candlestick_collections(xdates, opens, highs, lows, closes,
                                                         marketcolors=style['marketcolors'] )
    elif ptype == 'ohlc' or ptype == 'bars' or ptype == 'ohlc_bars':
        collections = _construct_ohlc_collections(xdates, opens, highs, lows, closes,
                                                         marketcolors=style['marketcolors'] )
    elif ptype == 'line':
        ax1.plot(xdates, closes, color=config['linecolor'])
    else:
        raise ValueError('Unrecognized plot type = "'+ptype+'"')

    if collections is not None:
        for collection in collections:
            ax1.add_collection(collection)

    mavgs = config['mav']
    if mavgs is not None:
        if isinstance(mavgs,int):
            mavgs = mavgs,      # convert to tuple 
        if len(mavgs) > 7:
            mavgs = mavgs[0:7]  # take at most 7
     
        if style['mavcolors'] is not None:
            mavc = cycle(style['mavcolors'])
        else:
            mavc = None
            
        for mav in mavgs:
            mavprices = data['Close'].rolling(mav).mean().values 
            if mavc:
                ax1.plot(xdates, mavprices, color=next(mavc))
            else:
                ax1.plot(xdates, mavprices)

    avg_dist_between_points = (xdates[-1] - xdates[0]) / float(len(xdates))
    minx = xdates[0]  - avg_dist_between_points
    maxx = xdates[-1] + avg_dist_between_points
    miny = min([low for low in lows if low != -1])
    maxy = max([high for high in highs if high != -1])
    corners = (minx, miny), (maxx, maxy)
    ax1.update_datalim(corners)

    if config['volume']:
        vup,vdown = style['marketcolors']['volume'].values()
        #-- print('vup,vdown=',vup,vdown)
        vcolors = _updown_colors(vup, vdown, opens, closes, use_prev_close=style['marketcolors']['vcdopcod'])
        #-- print('len(vcolors),len(opens),len(closes)=',len(vcolors),len(opens),len(closes))
        #-- print('vcolors=',vcolors)
        width = 0.5*avg_dist_between_points
        ax2.bar(xdates,volumes,width=width,color=vcolors)
        miny = 0.3 * min(volumes)
        maxy = 1.1 * max(volumes)
        ax2.set_ylim( miny, maxy )
        ax2.xaxis.set_major_formatter(formatter)
    
    used_ax3 = False
    used_ax4 = False
    addplot = config['addplot']
    if addplot is not None:
        # Calculate the Order of Magnitude Range
        # If addplot['secondary_y'] == 'auto', then: If the addplot['data']
        # is out of the Order of Magnitude Range, then use secondary_y.
        # Calculate omrange for Main panel, and for Lower (volume) panel:
        lo = math.log(max(math.fabs(min(lows)),1e-7),10) - 0.5
        hi = math.log(max(math.fabs(max(highs)),1e-7),10) + 0.5
        omrange = {'main' :{'lo':lo,'hi':hi},
                   'lower':None             }
        if config['volume']:
            lo = math.log(max(math.fabs(min(volumes)),1e-7),10) - 0.5
            hi = math.log(max(math.fabs(max(volumes)),1e-7),10) + 0.5
            omrange.update(lower={'lo':lo,'hi':hi})

        if isinstance(addplot,dict):
            addplot = [addplot,]   # make list of dict to be consistent

        elif not _list_of_dict(addplot):
            raise TypeError('addplot must be `dict`, or `list of dict`, NOT '+str(type(addplot)))

        for apdict in addplot:
            apdata = apdict['data']
            if isinstance(apdata,list) and not isinstance(apdata[0],(float,int)):
                raise TypeError('apdata is list but NOT of float or int')
            if isinstance(apdata,pd.DataFrame): 
                havedf = True
            else:
                havedf = False      # must be a single series or array
                apdata = [apdata,]  # make it iterable

            for column in apdata:
                if havedf:
                    ydata = apdata.loc[:,column]
                else:
                    ydata = column
                yd = [y for y in ydata if not math.isnan(y)]
                ymhi = math.log(max(math.fabs(max(yd)),1e-7),10)
                ymlo = math.log(max(math.fabs(min(yd)),1e-7),10)
                secondary_y = False
                if apdict['secondary_y'] == 'auto':
                    if apdict['panel'] == 'lower':
                        # If omrange['lower'] is not yet set by volume,
                        # then set it here as this is the first ydata
                        # to be plotted on the lower panel, so consider
                        # it to be the 'primary' lower panel axis.
                        if omrange['lower'] is None:
                            omrange.update(lower={'lo':ymlo,'hi':ymhi})
                        elif ymlo < omrange['lower']['lo'] or ymhi > omrange['lower']['hi']:
                            secondary_y = True
                    elif ymlo < omrange['main']['lo'] or ymhi > omrange['main']['hi']:
                        secondary_y = True
                    #   if secondary_y:
                    #       print('auto says USE secondary_y')
                    #   else:
                    #       print('auto says do NOT use secondary_y')
                else:
                    secondary_y = apdict['secondary_y']
                    #   print("apdict['secondary_y'] says secondary_y is",secondary_y)

                if apdict['panel'] == 'lower':
                    ax = ax4 if secondary_y else ax2
                else:
                    ax = ax3 if secondary_y else ax1

                if ax == ax3:
                    used_ax3 = True
                if ax == ax4:
                    used_ax4 = True

                if apdict['scatter']:
                    size  = apdict['markersize']
                    mark  = apdict['marker']
                    color = apdict['color']
                    ax.scatter(xdates, ydata, s=size, marker=mark, color=color)
                else:
                    ls    = apdict['linestyle']
                    color = apdict['color']
                    ax.plot(xdates, ydata, linestyle=ls, color=color)

    # put the twinx() on the "other" side:
    if style['y_on_right']:
        ax1.yaxis.set_label_position('right')
        ax1.yaxis.tick_right()
        ax3.yaxis.set_label_position('left')
        ax3.yaxis.tick_left()
        if ax2 and ax4:
            ax2.yaxis.set_label_position('right')
            ax2.yaxis.tick_right()
            if ax4 != ax2:
                 ax4.yaxis.set_label_position('left')
                 ax4.yaxis.tick_left()
    else:
        ax1.yaxis.set_label_position('left')
        ax1.yaxis.tick_left()
        ax3.yaxis.set_label_position('right')
        ax3.yaxis.tick_right()
        if ax2 and ax4:
            ax2.yaxis.set_label_position('left')
            ax2.yaxis.tick_left()
            if ax4 != ax2:
                ax4.yaxis.set_label_position('right')
                ax4.yaxis.tick_right()

    if need_lower_panel or config['volume']:
        ax1.spines['bottom'].set_linewidth(0.25)
        ax2.spines['top'   ].set_linewidth(0.25)
        plt.setp(ax1.get_xticklabels(), visible=False)

    # TODO: ================================================================
    # TODO:  Investigate:
    # TODO:  ===========
    # TODO:  It appears to me that there may be some or significant overlap
    # TODO:  between what the following functions actually do:
    # TODO:  At the very least, all four of them appear to communicate 
    # TODO:  to matplotlib that the xaxis should be treated as dates:
    # TODO:   ->  'ax.autoscale_view()'
    # TODO:   ->  'ax.xaxis_dates()'
    # TODO:   ->  'plt.autofmt_xdates()'
    # TODO:   ->  'fig.autofmt_xdate()'
    # TODO: ================================================================
    

    #if config['autofmt_xdate']:
        #print('CALLING fig.autofmt_xdate()')
        #fig.autofmt_xdate()

    ax1.autoscale_view()  # Is this really necessary??

    ax1.set_ylabel(config['ylabel'])

    if config['volume']:
        ax2.figure.canvas.draw()  # This is needed to calculate offset
        offset = ax2.yaxis.get_major_formatter().get_offset()
        ax2.yaxis.offsetText.set_visible(False)
        if len(offset) > 0:
            offset = (' x '+offset)
        if config['ylabel_lower'] is None:
            vol_label = 'Volume'+offset
        else:
            if len(offset) > 0:
                offset = '\n'+offset
            vol_label = config['ylabel_lower'] + offset
        ax2.set_ylabel(vol_label)

    if config['title'] is not None:
        fig.suptitle(config['title'],size='x-large',weight='semibold')

    if not used_ax3 and ax3 is not None:
        ax3.get_yaxis().set_visible(False)

    if not used_ax4 and ax4 is not None:
        ax4.get_yaxis().set_visible(False)

    if config['savefig'] is not None:
        save = config['savefig']
        if isinstance(save,dict):
            plt.savefig(**save)
        else:
            plt.savefig(save)
    else:
        # https://stackoverflow.com/a/13361748/1639359 suggests plt.show(block=False)
        plt.show(block=config['block'])
Esempio n. 41
0
    mlayer['type'][0] = mtype
    mlayer['inputs'][0] = rowcell(inputs)
    mlayer['outputs'][0] = rowcell(outputs)
    mlayer['params'][0] = rowcell(params)
    mlayer['block'][0] = dict_to_struct_array(mopts)

    mnet['layers'] = np.append(mnet['layers'], mlayer)
    mnet['params'] = np.append(mnet['params'], mparam)

# --------------------------------------------------------------------
#                                                       Append softmax
# --------------------------------------------------------------------

for i, name in enumerate(args.append_softmax):
    # searh for a layer of the specified name
    l = next((i for (i, l) in enumerate(layers) if l.name == name), None)
    if l is None:
        print 'Cannot append softmax to layer {} as no such layer could be found'.format(
            name)
        sys.exit(1)

    if len(args.append_softmax) > 1:
        layerName = 'softmax' + (i + 1)
        outputs = ['prediction' + (i + 1)]
    else:
        layerName = 'softmax'
        outputs = ['prediction']
    inputs = from_redirect(layers[l].top[0:1])

    print 'Appending softmax layer \'{}\' after layer \'{}\''.format(
        layerName, name)
Esempio n. 42
0
def main(csvname, module):
    print "Processing CSV file: {}".format(csvname)
    rename_module = None
    if module:
        try:
            rename_module = parse_module(module)
        except Exception as e:
            print "Error loading module {}: {}".format(module, e)
            return
    try:
        f = open(csvname, 'r')
    except Exception as e:
        print "Error opening file: {}".format(e)

    print "Parsing CSV file..."
    columns = []
    reader = csv.reader(f, delimiter=',', quotechar='\"')
    line = next(reader)
    for i, title in enumerate(line):
        columns.append(CSVColumn(title, i))
    for line in reader:
        for i, value in enumerate(line):
            columns[i].add_one(value.decode('utf-8'))
    f.close()

    print "\nStatistics of CSV file:"
    print "Columns: {}".format(len(columns))
    print "Rows: {}".format(columns[0].nr_rows)

    print "\nRe-formatting columns"
    reformatted = []
    for col in columns:
        print "\n" + "=" * 36
        col.show_info()

        yes = raw_input("\nAre you keeping this column(Y/N)? ")
        yes = yes.lower() in ('y', 'yes')
        if not yes:
            print "Skipping this column..."
            continue
        reformatted.append(col)

        yes = raw_input(
            "Keeping this column, are you renaming the values(Y/N)? ")
        yes = yes.lower() in ('y', 'yes')
        if not yes:
            print "Keeping the column values as they were..."
            continue
        elif not rename_module:
            print "Cannot rename columns because no module has been specified"
        else:
            print "Renaming the values..."
            col.rename(rename_module)

    print "\nWriting the re-formatted CSV"
    with open(csvname + '.reformat', 'w') as wf:
        writer = csv.writer(wf,
                            delimiter=',',
                            quotechar='\"',
                            quoting=csv.QUOTE_MINIMAL)
        with open(csvname, 'r') as f:
            reader = csv.reader(f, delimiter=',', quotechar='\"')
            next(reader)
            for line in reader:
                values = []
                for col in reformatted:
                    if col.rename_fun:
                        values.append(
                            col.rename_fun(line[col.index].decode('utf-8')))
                    else:
                        values.append(line[col.index].decode('utf-8'))
                writer.writerow([v.encode('utf-8') for v in values if v])
Esempio n. 43
0
 def reset(self):
     self.name = next(self._names)
Esempio n. 44
0
    def create_terms(self,
                     obo_path,
                     shortname,
                     attrs=["def", "synonym", "subset", "alt_id", "dbxref"]):
        self.cache = {}
        ids = []
        with open(
                obo_path
        ) as h:  # filter the ids because GODag iterates over alt_ids too
            for l in h.readlines():
                if l.startswith("id: " + shortname.upper() + ":"):
                    ids.append(l.split(" ")[1].strip())
        ids = set(ids)

        # PAra SO attrs = ["def", "subset", "dbxref", "alt_id"]

        go_dag = GODag(obo_path, load_obsolete=True, optional_attrs=attrs)

        finished = False
        pbar = iter(tqdm(ids))
        while not finished:
            with transaction.atomic():
                for _ in range(2000):
                    try:
                        go = next(pbar)
                        if go not in go_dag:
                            continue
                        term = go_dag[go]
                        if not Term.objects.filter(ontology=self.ontology,
                                                   identifier=go).exists():
                            dbTerm = Term(
                                name=term.name,
                                definition=term.defn
                                if hasattr(term, "defn") else "",
                                identifier=go,
                                is_obsolete="T" if term.is_obsolete else "F",
                                ontology=self.ontology)
                            dbTerm.save()
                            if term.namespace:
                                termdbref = TermDbxref(
                                    term=dbTerm,
                                    dbxref=Ontology.dbmap[term.namespace],
                                    rank=1)
                                termdbref.save()

                            for subset in term.subset:
                                if subset in Ontology.dbmap:
                                    termdbref = TermDbxref(
                                        term=dbTerm,
                                        dbxref=Ontology.dbmap[subset],
                                        rank=1)
                                    termdbref.save()
                            if hasattr(term, "synonym"):
                                for synonym in term.synonym:
                                    TermSynonym.objects.get_or_create(
                                        term=dbTerm, synonym=synonym[0][:255])

                            for synonym in term.alt_ids:
                                TermSynonym.objects.get_or_create(
                                    term=dbTerm, synonym=synonym[0][:255])

                            self.cache[go] = dbTerm
                        else:
                            self.cache[go] = Term.objects.filter(
                                ontology=self.ontology, identifier=go).get()
                            print("repeated: " + go)
                    except StopIteration:
                        finished = True
Esempio n. 45
0
LOGGING['loggers']['awx']['level'] = 'INFO'  # noqa

ALLOWED_HOSTS = ['*']

mimetypes.add_type("image/svg+xml", ".svg", True)
mimetypes.add_type("image/svg+xml", ".svgz", True)

# Disallow sending session cookies over insecure connections
SESSION_COOKIE_SECURE = False

# Disallow sending csrf cookies over insecure connections
CSRF_COOKIE_SECURE = False

# Override django.template.loaders.cached.Loader in defaults.py
template = next(
    (tpl_backend
     for tpl_backend in TEMPLATES if tpl_backend['NAME'] == 'default'),
    None)  # noqa
template['OPTIONS']['loaders'] = (
    'django.template.loaders.filesystem.Loader',
    'django.template.loaders.app_directories.Loader')

CALLBACK_QUEUE = "callback_tasks"

# Enable dynamically pulling roles from a requirement.yml file
# when updating SCM projects
# Note: This setting may be overridden by database settings.
AWX_ROLES_ENABLED = True

# Disable Pendo on the UI for development/test.
# Note: This setting may be overridden by database settings.
PENDO_TRACKING_STATE = "off"
Esempio n. 46
0
#                       lr=BASE_LR,
#                       momentum=0.9,
#                       weight_decay=0.0005)
# print(torch_summarize(net))
# print(net)
if USE_GPU:
    net.cuda()
    # net = torch.nn.DataParallel(net.module, device_ids=range(torch.cuda.device_count()))
    cudnn.benchmark = True

log = open("./log/" + MODEL_NAME + '.txt', 'a')
print("==> Preparing data...")
data_loader = DataLoader(data_dir=args.data,
                         image_size=IMAGE_SIZE,
                         batch_size=BATCH_SIZE)
inputs, classes = next(iter(data_loader.load_data()))
# out = torchvision.utils.make_grid(inputs)
# data_loader.show_image(out, title=[data_loader.data_classes[c] for c in classes])
train_loader = data_loader.load_data(data_set='train')
test_loader = data_loader.load_data(data_set='val')
criterion = WARPLoss()


# def one_hot_emb(batch, depth=NUM_CLASSES):
#     emb = nn.Embedding(depth, depth)
#     emb.weight.data = torch.eye(depth)
#     return emb(batch).data
def one_hot_emb(y, depth=NUM_CLASSES):
    y = y.view((-1, 1))
    one_hot = torch.FloatTensor(y.size(0), depth).zero_()
    one_hot.scatter_(1, y, 1)
 def __next__(self):
     with self.lock:
         return next(self.iterator)
Esempio n. 48
0
 def wrapper(*args, **kwargs):
     gen = fn(*args, **kwargs)
     next(gen)
     return gen
Esempio n. 49
0
 def get_component(self, component_type):
     return next((component for component in self.components
                  if isinstance(component, component_type)), None)
Esempio n. 50
0
import pytest

def generator_1():
    i = 0
    while True:
        i = i + 1
        yield i * i


generator_obj = generator_1()

print("type of generator_obj = ", type(generator_obj))

while True:
    res = next(generator_obj)
    if res > 100:
        break
    print(f"type of res = {type(res)}")
    print(f"res = {res}")

generator_obj = generator_1()

for res in generator_1():
    print(f"type of res = {type(res)}")
    if res > 100:
        break
    print(f"res = {res}")


def generator_2():
    g = 555
Esempio n. 51
0
# 1,定义输入数据并预处理数据
# 首先,需要将语音处理成能够读取的矩阵形式。这里面用到了梅尔频率倒谱系数(MFCC)特征向量
import tflearn             #python3可以用
#import speech_data
import  tensorflow as tf

learning_rate=0.0001
training_iters=300000  #迭代次数
batch_size=64

width=20               #MFCC特征
height=80              #最大发音长度
classes=10             #数字类别

batch = word_batch = speech_data.mfcc_batch_generator(batch_size)   # 生成每一批 MFCC 语音
X, Y = next(batch)
trainX, trainY = X, Y
testX, testY = X, Y

# 2,定义网络模型
# 用 tflearn 真是很简洁,只用 4 行代码就定广好了一个长短期记忆 LSTM 模型
net = tflearn.input_data([None, width, height])
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, classes, activation='softmax')
net = tflearn.regression(net,optimizer='adam', learning_rate=learning_rate,
       loss='categorical_crossentropy')

# 3,训练模型 p202


f = open('datafile.txt')

print(next(f))

print(f.__next__())

print(f.__next__())

L = [1, 2, 3]

i = iter(L)

print(i.__next__())

print(next(i))

print(i.__next__())

print('\n\n')

r = range(5)

x = iter(r)

print(x.__next__())

print(next(x))



Esempio n. 53
0
def ObtainQuantity(
    unit: Union[str, None, List[UnitExponentTuple], Dict[str, Sequence[Union[str, int]]]],
    category: Optional[Union[Tuple[str, ...], str]] = None,
    unknown_unit_caption: Optional[str] = None,
) -> "Quantity":
    """
    :type unit: str or OrderedDict(str -> list(str, int))
    :param unit:
        Either the string representing the unit or an ordered dict with the composing unit
        information (if composing all the info, including the category will be received in this
        parameter).

    :param str category:
        The category for the quantity. If not given it's gotten based on the unit passed.

    :param str unknown_unit_caption:
        The caption for the unit (used if unknown).

    :rtype Quantity:
    """
    unit_database = UnitDatabase.GetSingleton()
    quantities_cache = unit_database.quantities_cache

    if isinstance(unit, (list, tuple)):
        # It may be a derived unit with list(tuple(str, int)) -- in which case the category
        # must also be a list (of the same size)
        if len(unit) == 1 and unit[0][1] == 1:
            # Although passed as composing, it's a simple case
            unit = unit[0][0]
            if isinstance(category, (list, tuple)):
                category = category[0]
        else:
            assert isinstance(category, (list, tuple))
            unit = OrderedDict((cat, unit_and_exp) for (cat, unit_and_exp) in zip(category, unit))
            category = None

    if isinstance(unit, dict):
        assert category is None
        if len(unit) == 1 and next(iter(unit.values()))[1] == 1:  # type:ignore[comparison-overlap]
            # Although passed as composing, it's a simple case
            category, (unit, _exp) = next(iter(unit.items()))  # type:ignore[assignment]
        else:
            key: List[Any] = [
                (category, tuple(unit_and_exp)) for (category, unit_and_exp) in unit.items()
            ]
            if unknown_unit_caption:
                key.append(unknown_unit_caption)
            try:
                return quantities_cache[tuple(key)]
            except KeyError:
                quantity = quantities_cache[tuple(key)] = Quantity(unit, None, unknown_unit_caption)
                return quantity

    key = (category, unit, unknown_unit_caption)  # type:ignore[assignment]
    try:
        return quantities_cache[key]
    except KeyError:
        pass  # Just go on with the regular flow.

    if not isinstance(unit, str):
        if category is None:
            raise AssertionError("Currently only supporting unit as a string.")
        else:
            # Unit is given by the category
            unit = unit_database.GetDefaultUnit(category)
        quantity = quantities_cache[key] = Quantity(category, unit, unknown_unit_caption)
        return quantity

    elif category is None:
        category = unit_database.GetDefaultCategory(unit)
        if not category:
            is_legacy, unit = FixUnitIfIsLegacy(unit)
            if is_legacy:
                category = unit_database.GetDefaultCategory(unit)
            else:
                raise UnitsError(f"Unable to get default category for: {unit}")

        key_with_resolved_category = (category, unit, unknown_unit_caption)
        try:
            return quantities_cache[key_with_resolved_category]
        except KeyError:
            quantity = quantities_cache[key_with_resolved_category] = Quantity(
                category, unit, unknown_unit_caption
            )
            # Cache it with None category too.
            quantities_cache[key] = quantity
            return quantity

    else:
        quantities_cache[key] = quantity = Quantity(category, unit, unknown_unit_caption)
        return quantity
Esempio n. 54
0
    def build_posterior(
        self,
        density_estimator: Optional[TorchModule] = None,
        sample_with: str = "mcmc",
        mcmc_method: str = "slice_np",
        mcmc_parameters: Optional[Dict[str, Any]] = None,
        rejection_sampling_parameters: Optional[Dict[str, Any]] = None,
    ) -> LikelihoodBasedPosterior:
        r"""
        Build posterior from the neural density estimator.

        SNLE trains a neural network to approximate the likelihood $p(x|\theta)$. The
        `LikelihoodBasedPosterior` class wraps the trained network such that one can
        directly evaluate the unnormalized posterior log probability
        $p(\theta|x) \propto p(x|\theta) \cdot p(\theta)$ and draw samples from the
        posterior with MCMC.

        Args:
            density_estimator: The density estimator that the posterior is based on.
                If `None`, use the latest neural density estimator that was trained.
            sample_with: Method to use for sampling from the posterior. Must be one of
                [`mcmc` | `rejection`].
            mcmc_method: Method used for MCMC sampling, one of `slice_np`, `slice`,
                `hmc`, `nuts`. Currently defaults to `slice_np` for a custom numpy
                implementation of slice sampling; select `hmc`, `nuts` or `slice` for
                Pyro-based sampling.
            mcmc_parameters: Dictionary overriding the default parameters for MCMC.
                The following parameters are supported: `thin` to set the thinning
                factor for the chain, `warmup_steps` to set the initial number of
                samples to discard, `num_chains` for the number of chains,
                `init_strategy` for the initialisation strategy for chains; `prior` will
                draw init locations from prior, whereas `sir` will use
                Sequential-Importance-Resampling using `init_strategy_num_candidates`
                to find init locations.
            rejection_sampling_parameters: Dictionary overriding the default parameters
                for rejection sampling. The following parameters are supported:
                `proposal` as the proposal distribtution (default is the prior).
                `max_sampling_batch_size` as the batchsize of samples being drawn from
                the proposal at every iteration. `num_samples_to_find_max` as the
                number of samples that are used to find the maximum of the
                `potential_fn / proposal` ratio. `num_iter_to_find_max` as the number
                of gradient ascent iterations to find the maximum of that ratio. `m` as
                multiplier to that ratio.

        Returns:
            Posterior $p(\theta|x)$  with `.sample()` and `.log_prob()` methods
            (the returned log-probability is unnormalized).
        """

        if density_estimator is None:
            density_estimator = self._neural_net
            # If internal net is used device is defined.
            device = self._device
        else:
            # Otherwise, infer it from the device of the net parameters.
            device = next(density_estimator.parameters()).device

        self._posterior = LikelihoodBasedPosterior(
            method_family="snle",
            neural_net=density_estimator,
            prior=self._prior,
            x_shape=self._x_shape,
            sample_with=sample_with,
            mcmc_method=mcmc_method,
            mcmc_parameters=mcmc_parameters,
            rejection_sampling_parameters=rejection_sampling_parameters,
            device=device,
        )

        self._posterior._num_trained_rounds = self._round + 1

        # Store models at end of each round.
        self._model_bank.append(deepcopy(self._posterior))
        self._model_bank[-1].net.eval()

        return deepcopy(self._posterior)
Esempio n. 55
0
 def remove_widget(self, widget):
     action = next(
         iter(a for a in self.actions()
              if self.widgetForAction(a) is widget), None)
     if action:
         self.removeAction(action)
Esempio n. 56
0
 def compute_delay_buffer(self) -> None:
     """
     Computes the delay buffer sizes in the graph by propagating all paths from the input arrays to the successors in
     topological order. Delay buffer entries should be of the format: kernel.input_paths:{
                                                                             "in1": [[a,b,c, pred1], [d,e,f, pred2],
                                                                             ...],
                                                                             "in2": [ ... ],
                                                                             ...
                                                                         }
     where inX are input arrays to the stencil chain and predY are the kernel predecessors/inputs
     """
     # get topological order for top-down walk through of the graph
     try:
         order = list(nx.topological_sort(self.graph))
     except nx.exception.NetworkXUnfeasible:
         cycle = next(nx.algorithms.cycles.simple_cycles(self.graph))
         raise ValueError("Cycle detected: {}".format(
             [c.name for c in cycle]))
     # go through all nodes
     for node in order:
         # process delay buffer (no additional delay buffer will appear because of the topological order)
         for inp in node.input_paths:
             # compute maximum delay size per input
             max_delay = max(node.input_paths[inp])
             max_delay[
                 2] += 1  # add an extra delay cycle for the processing in the kernel node
             # loop over all inputs and set their size relative to the max size to have data ready at the exact
             # same time
             for entry in node.input_paths[inp]:
                 name = entry[-1]
                 max_size = stencilflow.convert_3d_to_1d(
                     dimensions=self.dimensions,
                     index=stencilflow.list_subtract_cwise(
                         max_delay[:-1], entry[:-1]))
                 node.delay_buffer[name] = BoundedQueue(name=name,
                                                        maxsize=max_size)
                 node.delay_buffer[name].import_data(
                     [None] * node.delay_buffer[name].maxsize)
         # set input node delay buffers to 1
         if isinstance(node, Input):
             node.delay_buffer = BoundedQueue(name=node.name,
                                              maxsize=1,
                                              collection=[None])
         # propagate the path lengths (from input arrays over all ways) to the successors
         for succ in self.graph.successors(node):
             # add input node to all as direct input (=0 delay buffer)
             if isinstance(node, Input):
                 # add emtpy list dictionary entry for enabling list append()
                 if node.name not in succ.input_paths:
                     succ.input_paths[node.name] = []
                 successor = [0] * len(self.dimensions)
                 successor = successor + [node.name]
                 succ.input_paths[node.name].append(successor)
             # add kernel node to all, but calculate the length first (predecessor + delay + internal, ..)
             elif isinstance(node, Kernel):  # add KERNEL
                 # add latency, internal_buffer, delay_buffer
                 internal_buffer = [0] * 3
                 for item in node.graph.accesses:
                     internal_buffer = max(
                         node.graph.accesses[item]
                     ) if KernelChainGraph.greater(
                         max(node.graph.accesses[item]),
                         internal_buffer) else internal_buffer
                 # latency
                 latency = self.kernel_nodes[node.name].graph.max_latency
                 # compute delay buffer and create entry
                 for entry in node.input_paths:
                     # the first entry has to initialize the structure
                     if entry not in succ.input_paths:
                         succ.input_paths[entry] = []
                     # compute the actual delay buffer
                     delay_buffer = max(node.input_paths[entry][:])
                     # merge them together
                     total = [
                         i + d if i is not None else d
                         for i, d in zip(internal_buffer, delay_buffer)
                     ]
                     # add the latency too
                     total[-1] += latency
                     total.append(node.name)
                     # add entry to paths
                     succ.input_paths[entry].append(total)
             else:  # NodeType.OUTPUT: do nothing
                 continue
Esempio n. 57
0
 def check_resources():
     target_node = next(node for node in ray.nodes()
                        if node["NodeID"] == target_node_id)
     resources = target_node["Resources"]
     return res_name not in resources
Esempio n. 58
0
def findMinFibonacciNumbers(k: int) -> int:
    fib_gen = fib()
    fib_nums = [next(fib_gen)]
    while fib_nums[-1] < k: fib_nums.append(next(fib_gen))
    min_nums = find_sum(fib_nums, k)
    return min_nums
Esempio n. 59
0
    def check_resources():
        target_node = next(node for node in ray.nodes()
                           if node["NodeID"] == target_node_id)
        resources = target_node["Resources"]

        return (res_name in resources and resources[res_name] == res_capacity)
Esempio n. 60
0
 def preppend_widget(self, widget):
     first_action = next(iter(self.actions()), None)
     if first_action:
         self.insertWidget(first_action, widget)
     else:
         self.addWidget(widget)