def test_admin_setup(self): # PUTs for account and 16 .hash's self.test_origin.app = FakeApp(iter( [('204 No Content', {}, '') for i in xrange(102)])) resp = Request.blank('/origin/.prep', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Origin-Admin-User': '.origin_admin', 'X-Origin-Admin-Key': 'unittest'}).get_response( self.test_origin) self.assertEquals(resp.status_int, 204) self.assertEquals(self.test_origin.app.calls, 101) self.test_origin.app = FakeApp(iter( [('404 Not Found', {}, '')])) req = Request.blank('/origin/.prep', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Origin-Admin-User': '.origin_admin', 'X-Origin-Admin-Key': 'unittest'}) self.assertRaises(Exception, req.get_response, self.test_origin) self.test_origin.app = FakeApp(iter( [('204 No Content', {}, ''), ('404 Not Found', {}, '')])) req = Request.blank('/origin/.prep', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Origin-Admin-User': '.origin_admin', 'X-Origin-Admin-Key': 'unittest'}) self.assertRaises(Exception, req.get_response, self.test_origin)
def select_map(self, latitude, longitude): """ Find and display a nearby track by latitude / longitude The selection will favor a previously selected track in the nearby area :param latitude :type latitude float :param longitude :type longitude float :returns the selected track, or None if there are no nearby tracks :type Track """ if not latitude or not longitude: return None point = GeoPoint.fromPoint(latitude, longitude) nearby_tracks = self.track_manager.find_nearby_tracks(point) saved_tracks = self.get_pref_track_selections() saved_nearby_tracks = [t for t in nearby_tracks if t.track_id in saved_tracks] # select the saved nearby track or just a nearby track track = next(iter(saved_nearby_tracks), None) track = next(iter(nearby_tracks), None) if track is None else track if self.track != track: # only update the trackmap if it's changing self._select_track(track) return track
def test_bison_lalr_repr_automaton_lr0(): ex = grammar_examples.lr0.ex_minimal1 grammar = ex.grammar automaton = compute_automaton(grammar) assert repr(automaton) == '<Automaton with 4 states>' assert repr(automaton._data) == ''' [<StateData #0 with 1 actions, 1 gotos <bison.ItemSet #0, size 2 < $accept → • Root $eof ∥ > < Root → • term ∥ > >>, <StateData #1 with 1 actions, 0 gotos <bison.ItemSet #1, size 1 < Root → term • ∥ { $eof } > >>, <StateData #2 with 1 actions, 0 gotos <bison.ItemSet #2, size 1 < $accept → Root • $eof ∥ > >>, <StateData #3 with 2 actions, 0 gotos <bison.ItemSet #3, size 1 < $accept → Root $eof • ∥ > >>] '''.strip().replace('•', _mdot).replace('∥', _parallel) assert repr(automaton._data[0]._id) == ''' <StateId for <StateData #0 with 1 actions, 1 gotos <bison.ItemSet #0, size 2 < $accept → • Root $eof ∥ > < Root → • term ∥ > >>> '''.strip().replace('•', _mdot).replace('∥', _parallel) assert repr(next(iter(automaton._data[0]._actions.values()))) == 'Shift(<state 1>)' assert repr(next(iter(automaton._data[1]._actions.values()))) == 'Reduce(<rule 1>)' assert repr(next(iter(automaton._data[0]._gotos.values()))) == 'Goto(<state 2>)'
def __reduce__(self): return (__newobj__, # callable (PickleProtocol2ReduceNewobj, 'yam', 1), # args None, # state iter([]), # listitems iter([]), # dictitems )
def __reduce__(self): return (PickleProtocol2ReduceTuple, # callable ('yam', 1), # args {'foo': 1}, # state iter([]), # listitems iter([]), # dictitems )
def test_count(): assert count((1, 2, 3)) == 3 assert count([]) == 0 assert count(iter((1, 2, 3, 4))) == 4 assert count("hello") == 5 assert count(iter("hello")) == 5
def train(self, inp, out, training_weight=1.): inp = np.mat(inp).T out = np.mat(out).T deriv = [] val = inp vals = [val] # forward calculation of activations and derivatives for weight,bias in self.__weights: val = weight*val val += bias deriv.append(self.__derivative(val)) vals.append(self.__activation(val)) deriv = iter(reversed(deriv)) weights = iter(reversed(self.__weights)) errs = [] errs.append(np.multiply(vals[-1]-out, next(deriv))) # backwards propagation of errors for (w,b),d in zip(weights, deriv): errs.append(np.multiply(np.dot(w.T, errs[-1]), d)) weights = iter(self.__weights) for (w,b),v,e in zip(\ self.__weights,\ vals, reversed(errs)): e *= self.__learning_rate*training_weight w -= e*v.T b -= e tmp = vals[-1]-out return np.dot(tmp[0].T,tmp[0])*.5*training_weight
def tag_info_chart (self): """ Make the taginfo.txt plot """ ## TODO: human chrs on hg19. How will this work with GRCh genome or other, non human, genomes? # nice if they are ordered by size ucsc = ["chr" + str(i) for i in range(1,23)].append([ "chrX", "chrY", "chrM"]) ensembl = list(range(1,23)).append([ "X", "Y", "MT"]) pconfig = { 'id': 'tagInfo', 'title': 'Homer: Tag Info Distribution', 'ylab': 'Tags', 'cpswitch_counts_label': 'Number of Tags' } ## check if chromosomes starts with "chr" (UCSC) or "#" (ensembl) sample1 = next(iter(self.tagdir_data['taginfo_total'])) chrFormat = next(iter(self.tagdir_data['taginfo_total'][sample1])) if ("chr" in chrFormat): chrs = ucsc else: chrs = ensembl return bargraph.plot(self.tagdir_data['taginfo_total'], chrs, pconfig)
def open_moinpage_part(self, elem): type = elem.get(moin_page.content_type, u"").split(u';') if len(type) == 2: if type[0] == u"x-moin/macro": if len(elem) and iter(elem).next().tag.name == "arguments": alt = u"<<{0}({1})>>".format(type[1].split(u'=')[1], u','.join( [u''.join(c.itertext()) for c in iter(elem).next() if c.tag.name == "argument"])) else: alt = u"<<{0}()>>".format(type[1].split(u'=')[1]) obj = u".. |{0}| macro:: {1}".format(alt, alt) self.objects.append(obj) return u" |{0}| ".format(alt) elif type[0] == u"x-moin/format": elem_it = iter(elem) ret = u"\n\n.. parser:{0}".format(type[1].split(u'=')[1]) if len(elem) and elem_it.next().tag.name == "arguments": args = [] for arg in iter(elem).next(): if arg.tag.name == "argument": args.append(u"{0}=\"{1}\"".format(arg.get(moin_page.name, u""), u' '.join(arg.itertext()))) ret = u'{0} {1}'.format(ret, u' '.join(args)) elem = elem_it.next() ret = u"{0}\n {1}".format(ret, u' '.join(elem.itertext())) return ret return elem.get(moin_page.alt, u'') + u"\n"
def __init__(self, iterator, unit=None, computer_prefix=None, display=MULTI_LINE): """Create a new progress display. 'iterator' is the iterator containing the work to be done. 'unit' is the unit to be displayed to the user. 'computer_prefix' should be set to True if this unit requires prefix increments of 1024 instead of the traditional 1000. If it is not set, then the class tries to guess based on 'unit'. 'display' defaults to MULTI_LINE to print a new line for every update, or can be SINGLE_LINE to keep updating a single status line. """ if hasattr(iterator, "__len__"): # This may be an expensive operation, for instance on a # hypothetical os.walk() which implements __len__. length = len(iterator) self.iterator = iter(iterator) else: list = [] # TODO: isn't there some kind of builtin expand operation? for i in iterator: list.append(i) length = len(list) self.iterator = iter(list) self.progress = Progress(length, unit, computer_prefix) self.display = display # The first call to next is before the work actually starts, so we # shouldn't increment() at that point. self.first = True
def __iter__(self): # Avoid usage of zip() below since it will consume one item too many. it1, it2 = iter(self.reader1), iter(self.reader2) while True: try: r1 = next(it1) except StopIteration: # End of file 1. Make sure that file 2 is also at end. try: next(it2) raise FormatError("Reads are improperly paired. There are more reads in file 2 than in file 1.") except StopIteration: pass break try: r2 = next(it2) except StopIteration: raise FormatError("Reads are improperly paired. There are more reads in file 1 than in file 2.") name1 = r1.name.split(None, 1)[0] name2 = r2.name.split(None, 1)[0] if name1[-2:-1] == '/': name1 = name1[:-2] if name2[-2:-1] == '/': name2 = name2[:-2] if name1 != name2: raise FormatError("Reads are improperly paired. Read name '{0}' in file 1 not equal to '{1}' in file 2.".format(name1, name2)) yield (r1, r2)
def test_cdn_get_regex(self): prev_data = json.dumps({'account': 'acc', 'container': 'cont', 'ttl': 1234, 'logs_enabled': True, 'cdn_enabled': True}) def check_urls(req): vrs, acc, cont, obj = utils.split_path(req.path, 1, 4) self.assertEquals(acc, 'acc') self.assertEquals(cont, 'cont') self.assertEquals(obj, 'obj1.jpg') self.test_origin.app = FakeApp(iter([ ('204 No Content', {}, prev_data), # call to _get_cdn_data ('304 No Content', {}, '', check_urls)])) #call to get obj req = Request.blank('http://1234.r3.origin_cdn.com:8080/obj1.jpg', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.test_origin) self.assertEquals(resp.status_int, 304) self.test_origin.app = FakeApp(iter([ ('204 No Content', {}, prev_data), # call to _get_cdn_data ('304 No Content', {}, '', check_urls)])) #call to get obj req = Request.blank('http://r3.origin_cdn.com:8080/nohash/obj1.jpg', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.test_origin) self.assertEquals(resp.status_int, 404)
def test_cdn_get_no_content(self): prev_data = json.dumps({'account': 'acc', 'container': 'cont', 'ttl': 1234, 'logs_enabled': True, 'cdn_enabled': True}) self.test_origin.app = FakeApp(iter([ ('204 No Content', {}, prev_data), # call to _get_cdn_data ('304 No Content', {}, '')])) #call to get obj req = Request.blank('http://1234.r34.origin_cdn.com:8080/obj1.jpg', environ={'REQUEST_METHOD': 'HEAD', 'swift.cdn_hash': 'abcd', 'swift.cdn_object_name': 'obj1.jpg'}) resp = req.get_response(self.test_origin) self.assertEquals(resp.status_int, 304) self.test_origin.app = FakeApp(iter([ ('204 No Content', {}, prev_data), # call to _get_cdn_data ('404 No Content', {}, '')])) #call to get obj req = Request.blank('http://1234.r34.origin_cdn.com:8080/obj1.jpg', environ={'REQUEST_METHOD': 'HEAD', 'swift.cdn_hash': 'abcd', 'swift.cdn_object_name': 'obj1.jpg'}) resp = req.get_response(self.test_origin) self.assertEquals(resp.status_int, 404) self.test_origin.app = FakeApp(iter([ ('204 No Content', {}, prev_data), # call to _get_cdn_data ('416 No Content', {}, '')])) #call to get obj req = Request.blank('http://1234.r34.origin_cdn.com:8080/obj1.jpg', environ={'REQUEST_METHOD': 'HEAD', 'swift.cdn_hash': 'abcd', 'swift.cdn_object_name': 'obj1.jpg'}) resp = req.get_response(self.test_origin) self.assertEquals(resp.status_int, 416)
def test_origin_db_post_fail(self): self.test_origin.app = FakeApp(iter([ ('204 No Content', {}, ''), # call to _get_cdn_data ('404 Not Found', {}, ''), # put to .hash ])) req = Request.blank('http://origin_db.com:8080/v1/acc/cont', environ={'REQUEST_METHOD': 'PUT'}) resp = req.get_response(self.test_origin) self.assertEquals(resp.status_int, 500) self.test_origin.app = FakeApp(iter([ ('204 No Content', {}, ''), # call to _get_cdn_data ('204 No Content', {}, ''), # put to .hash ('404 Not Found', {}, ''), # HEAD check to list container ('404 Not Found', {}, ''), # PUT to list container ])) req = Request.blank('http://origin_db.com:8080/v1/acc/cont', environ={'REQUEST_METHOD': 'PUT'}) resp = req.get_response(self.test_origin) self.assertEquals(resp.status_int, 500) self.test_origin.app = FakeApp(iter([ ('204 No Content', {}, ''), # call to _get_cdn_data ('204 No Content', {}, ''), # put to .hash ('204 No Content', {}, ''), # HEAD check to list container ('404 Not Found', {}, ''), # PUT to list container ])) req = Request.blank('http://origin_db.com:8080/v1/acc/cont', environ={'REQUEST_METHOD': 'PUT'}) resp = req.get_response(self.test_origin) self.assertEquals(resp.status_int, 500)
def run(self): container_loaded.wait() container_loaded.clear() link = Link.from_string(sys.argv[3]) if link.type() == Link.LINK_TRACK: track = link.as_track() itrack = iter([track]) elif link.type() == Link.LINK_PLAYLIST: playlist = link.as_playlist() print('loading playlist...') while not playlist.is_loaded(): time.sleep(0.1) print('done') itrack = iter(playlist) session = self.ripper.session for track in itrack: self.ripper.load_track(track) rip_init(session, track) self.ripper.play() end_of_track.wait() end_of_track.clear() rip_terminate(session, track) rip_id3(session, track) self.ripper.disconnect()
def GetEntries(self, parser_mediator, match=None, **unused_kwargs): """Extract device information from the iPod plist. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. """ devices = match.get('Devices', {}) for device_identifier, device_information in iter(devices.items()): datetime_value = device_information.get('Connected', None) if not datetime_value: continue event_data = IPodPlistEventData() event_data.device_id = device_identifier # TODO: refactor. for key, value in iter(device_information.items()): if key == 'Connected': continue attribute_name = key.lower().replace(' ', '_') setattr(event_data, attribute_name, value) event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_LAST_CONNECTED) parser_mediator.ProduceEventWithEventData(event, event_data)
def _isnotsuite(test): "A crude way to tell apart testcases and suites with duck-typing" try: iter(test) except TypeError: return True return False
def AnalyseResult(l_download): success_download_count, fail_download_count, timeout_download_count = 0,0,0 try: if len(l_download) != 0 : for i in iter(l_download): if i[0] == 'success': success_download_count += 1 elif i[0] == 'fail': fail_download_count += 1 else: timeout_download_count += 1 cost_of_valid_download = [x[1] for x in iter(l_download) if x[0] == 'success'] # summary logger.info('-'*30 + "SUMMARY" + '-'*30) logger.info('Total Download: %s, Success: %s, Fail: %s, Timeout: %s' % (len(l_download),success_download_count,fail_download_count,timeout_download_count)) if len(cost_of_valid_download): logger.info('\tThe fastest download in %s seconds' % min(cost_of_valid_download)) logger.info('\tThe slowest download in %s seconds' % max(cost_of_valid_download)) logger.info('\tThe average download in %s seconds' % str(sum(cost_of_valid_download)/len(cost_of_valid_download))) else: logger.error('\tNone valid download!!!') if fail_download_count == 0 and timeout_download_count == 0: ResultLog.info('PASS') else: ResultLog.error('FAIL') except Exception as err: print(err)
def dfs(G,source=None): """Produce edges in a depth-first-search starting at source. Edges are tagged as either 'tree' or 'back'""" # Very slight modification of the DFS procedure from networkx # One could unify this with compute_information, but it seemed cleaner this way if source is None: # produce edges for all components nodes=G else: # produce edges for components with source nodes=[source] visited=set() for start in nodes: if start in visited: continue visited.add(start) stack = [(start,iter(G[start]))] while stack: parent,children = stack[-1] try: child = next(children) if child not in visited: yield parent,child,'tree' visited.add(child) stack.append((child,iter(G[child]))) else: yield parent,child,'back' except StopIteration: stack.pop()
def setUp(self) : random.seed(123) self.ids_str = iter([('1', '2'), ('2', '3'), ('4', '5'), ('6', '7'), ('8','9')]) self.records = iter([({'name': 'Margret', 'age': '32'}, {'name': 'Marga', 'age': '33'}), \ ({'name': 'Marga', 'age': '33'}, {'name': 'Maria', 'age': '19'}), \ ({'name': 'Maria', 'age': '19'}, {'name': 'Monica', 'age': '39'}), \ ({'name': 'Monica', 'age': '39'}, {'name': 'Mira', 'age': '47'}), \ ({'name': 'Mira', 'age': '47'}, {'name': 'Mona', 'age': '9'}), ]) self.normalizedAffineGapDistance = dedupe.affinegap.normalizedAffineGapDistance self.data_model = {} self.data_model['fields'] = dedupe.core.OrderedDict() v = {} v.update({'Has Missing': False, 'type': 'String', 'comparator': self.normalizedAffineGapDistance, \ 'weight': -1.0302742719650269}) self.data_model['fields']['name'] = v self.data_model['bias'] = 4.76 score_dtype = [('pairs', 'S1', 2), ('score', 'f4', 1)] self.desired_scored_pairs = numpy.array([(['1', '2'], 0.96), (['2', '3'], 0.96), \ (['4', '5'], 0.78), (['6', '7'], 0.72), \ (['8', '9'], 0.84)], dtype=score_dtype)
def __reduce__(self): return (protocol_2_reduce_tuple_func, # callable ('yam', 1), # args None, # state iter([]), # listitems iter([]), # dictitems )
def set_current_draw_pattern(self, pattern, control): try: iter(pattern) except TypeError: self.draw_pattern = [pattern] else: self.draw_pattern = pattern
def __reduce__(self): return (type(self), # callable ('yam', 1), # args {'foo': 1}, # state iter([]), # listitems iter([]), # dictitems )
async def async_step_import(self, user_input): """Import a config entry.""" if self.hass.config_entries.async_entries(DOMAIN): return self.async_abort(reason='already_setup') self._scan_interval = user_input[KEY_SCAN_INTERVAL] if user_input[CONF_HOST] != DOMAIN: self._hosts.append(user_input[CONF_HOST]) if not await self.hass.async_add_executor_job( os.path.isfile, self.hass.config.path(TELLDUS_CONFIG_FILE)): return await self.async_step_user() conf = await self.hass.async_add_executor_job( load_json, self.hass.config.path(TELLDUS_CONFIG_FILE)) host = next(iter(conf)) if user_input[CONF_HOST] != host: return await self.async_step_user() host = CLOUD_NAME if host == 'tellduslive' else host return self.async_create_entry( title=host, data={ CONF_HOST: host, KEY_SCAN_INTERVAL: self._scan_interval.seconds, KEY_SESSION: next(iter(conf.values())), })
def __reduce__(self): return (PickleProtocol2ReduceListitemsAppend, # callable (), # args {}, # state iter(['foo', 'bar']), # listitems iter([]), # dictitems )
def load_transactions_mock(input_file, **kwargs): """ Mock for apyori.load_transactions. """ eq_(kwargs['delimiter'], delimiter) eq_(next(input_file), inputs[0]) yield iter(input_transactions[0]) eq_(next(input_file), inputs[1]) yield iter(input_transactions[1])
def test_gen(self): g = regen(iter(list(range(10)))) assert g[7] == 7 assert g[6] == 6 assert g[5] == 5 assert g[4] == 4 assert g[3] == 3 assert g[2] == 2 assert g[1] == 1 assert g[0] == 0 assert g.data, list(range(10)) assert g[8] == 8 assert g[0] == 0 g = regen(iter(list(range(10)))) assert g[0] == 0 assert g[1] == 1 assert g.data == list(range(10)) g = regen(iter([1])) assert g[0] == 1 with pytest.raises(IndexError): g[1] assert g.data == [1] g = regen(iter(list(range(10)))) assert g[-1] == 9 assert g[-2] == 8 assert g[-3] == 7 assert g[-4] == 6 assert g[-5] == 5 assert g[5] == 5 assert g.data == list(range(10)) assert list(iter(g)) == list(range(10))
def create_file(self): # Expanding summits tfbs_summit_regions = GenomicRegionSet("TFBS Summit Regions") tfbs_summit_regions.read_bed(self.tfbs_summit_fname) for region in iter(tfbs_summit_regions): summit = int(region.data.split()[-1]) + region.initial region.initial = max(summit - (self.peak_ext / 2), 0) region.final = summit + (self.peak_ext / 2) # Calculating intersections mpbs_regions = GenomicRegionSet("MPBS Regions") mpbs_regions.read_bed(self.mpbs_fname) tfbs_summit_regions.sort() mpbs_regions.sort() with_overlap_regions = mpbs_regions.intersect(tfbs_summit_regions, mode=OverlapType.ORIGINAL) without_overlap_regions = mpbs_regions.subtract(tfbs_summit_regions, whole_region=True) tfbs_regions = GenomicRegionSet("TFBS Regions") for region in iter(with_overlap_regions): region.name = region.name.split(":")[0] + ":Y" tfbs_regions.add(region) for region in iter(without_overlap_regions): region.name = region.name.split(":")[0] + ":N" tfbs_regions.add(region) tfbs_regions.sort() tfbs_fname = os.path.join(self.output_location, "{}.bed".format(self.mpbs_name)) tfbs_regions.write_bed(tfbs_fname)
def open_sam(sam_filename, samtype): if samtype == "sam": SAM_or_BAM_Reader = HTSeq.SAM_Reader elif samtype == "bam": SAM_or_BAM_Reader = HTSeq.BAM_Reader else: raise ValueError("Unknown input format %s specified." % samtype) try: if sam_filename != "-": read_seq_file = SAM_or_BAM_Reader(sam_filename) read_seq = read_seq_file first_read = iter(read_seq).next() else: read_seq_file = SAM_or_BAM_Reader(sys.stdin) read_seq_iter = iter(read_seq_file) first_read = read_seq_iter.next() read_seq = itertools.chain([first_read], read_seq_iter) pe_mode = first_read.paired_end except: msg = "Error occured when reading beginning of SAM/BAM file.\n" sys.stderr.write(msg) raise try: yield (pe_mode, read_seq) except: sys.stderr.write("Error occured when processing SAM input (%s):\n" % read_seq_file.get_line_number_string()) raise
def add( self, added=None ): try : iter( added ) except TypeError, te : added = [ added ] offsetX = self.__margin[1] offsetY = 0 for item in added : item.setParent( self.layout ) item.setVisible( True ) x = item.geometry().x() y = item.geometry().y() w = item.geometry().width() h = item.geometry().height() item.setGeometry( offsetX, self.linesH, w, h ) # increment line offset horizontal offsetX += w + 10 if offsetY < h : offsetY = h # increment line offset height and width self.linesH += offsetY + 5 if self.linesW < offsetX: self.linesW = offsetX # resize the layout in the case of a scroll if self.__scroll : self.layout.resize( self.linesW+self.__margin[0], self.linesH+self.__margin[1] )