def __setattr__(self, name, value): if hasattr(self, name): object.__setattr__(self, name, value) elif name != "_impl" and hasattr(self._impl, name): self._impl.__setattr__(name, value) else: self.__dict__[name] = value
def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result
def _toPNG(mol): if hasattr(mol, '__sssAtoms'): highlightAtoms = mol.__sssAtoms else: highlightAtoms = [] try: mol.GetAtomWithIdx(0).GetExplicitValence() except RuntimeError: mol.UpdatePropertyCache(False) if not hasattr(rdMolDraw2D, 'MolDraw2DCairo'): mc = copy.deepcopy(mol) try: img = Draw.MolToImage(mc, size=molSize, kekulize=kekulizeStructures, highlightAtoms=highlightAtoms) except ValueError: # <- can happen on a kekulization failure mc = copy.deepcopy(mol) img = Draw.MolToImage(mc, size=molSize, kekulize=False, highlightAtoms=highlightAtoms) bio = BytesIO() img.save(bio, format='PNG') return bio.getvalue() else: nmol = rdMolDraw2D.PrepareMolForDrawing(mol, kekulize=kekulizeStructures) d2d = rdMolDraw2D.MolDraw2DCairo(molSize[0], molSize[1]) d2d.DrawMolecule(nmol, highlightAtoms=highlightAtoms) d2d.FinishDrawing() return d2d.GetDrawingText()
def check_server_disallowed(self): """ Check if server domain name or IP is disallowed in settings.py. """ hostname = self.netloc_parts[2].lower() if (hasattr(settings, 'DISALLOWED_DOMAIN_LIST') and settings.DISALLOWED_DOMAIN_LIST): for domain in settings.DISALLOWED_DOMAIN_LIST: if hostname == domain or hostname.endswith('.' + domain): raise ValidationError(unicode( _("Domain name %(domain)s is disallowed.") % locals())) try: ip = socket.gethostbyname(hostname) except socket.error: raise ValidationError(unicode( _("Could not resolve IP address for %(hostname)s.") % locals())) if (not hasattr(settings, 'DISALLOWED_SERVER_IP_LIST') or not settings.DISALLOWED_SERVER_IP_LIST): return server = long_ip(ip) # print 'server', server, dotted_ip(server), ip for disallowed in settings.DISALLOWED_SERVER_IP_LIST: disallowed = disallowed.strip() if disallowed == '' or disallowed.startswith('#'): continue mask = bit_mask(32) if '/' in disallowed: disallowed, bits = disallowed.split('/', 1) mask = slash_mask(int(bits)) identifier = long_ip(disallowed) & mask masked = server & mask if masked == identifier: raise ValidationError(unicode( _("Server IP address %(ip)s is disallowed.") % locals()))
def cursor(self, function, return_pages=False, **params): """Returns a generator for results that match a specified query. :param function: Instance of a Twython function (Twython.get_home_timeline, Twython.search) :param \*\*params: Extra parameters to send with your request (usually parameters accepted by the Twitter API endpoint) :rtype: generator Usage:: >>> from twython import Twython >>> twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) >>> results = twitter.cursor(twitter.search, q='python') >>> for result in results: >>> print result """ if not hasattr(function, 'iter_mode'): raise TwythonError('Unable to create generator for Twython \ method "%s"' % function.__name__) while True: content = function(**params) if not content: raise StopIteration if hasattr(function, 'iter_key'): results = content.get(function.iter_key) else: results = content if return_pages: yield results else: for result in results: yield result if function.iter_mode == 'cursor' and \ content['next_cursor_str'] == '0': raise StopIteration try: if function.iter_mode == 'id': if 'max_id' not in params: # Add 1 to the id because since_id and # max_id are inclusive if hasattr(function, 'iter_metadata'): since_id = content[function.iter_metadata].get('since_id_str') else: since_id = content[0]['id_str'] params['since_id'] = (int(since_id) - 1) elif function.iter_mode == 'cursor': params['cursor'] = content['next_cursor_str'] except (TypeError, ValueError): # pragma: no cover raise TwythonError('Unable to generate next page of search \ results, `page` is not a number.')
def __init__(self, args, output_manager, _error_func): super(LocalDeviceEnvironment, self).__init__(output_manager) self._blacklist = (device_blacklist.Blacklist(args.blacklist_file) if args.blacklist_file else None) self._device_serials = args.test_devices self._devices_lock = threading.Lock() self._devices = None self._concurrent_adb = args.enable_concurrent_adb self._enable_device_cache = args.enable_device_cache self._logcat_monitors = [] self._logcat_output_dir = args.logcat_output_dir self._logcat_output_file = args.logcat_output_file self._max_tries = 1 + args.num_retries self._skip_clear_data = args.skip_clear_data self._tool_name = args.tool self._trace_output = None if hasattr(args, 'trace_output'): self._trace_output = args.trace_output self._trace_all = None if hasattr(args, 'trace_all'): self._trace_all = args.trace_all devil_chromium.Initialize( output_directory=constants.GetOutDirectory(), adb_path=args.adb_path) # Some things such as Forwarder require ADB to be in the environment path. adb_dir = os.path.dirname(adb_wrapper.AdbWrapper.GetAdbPath()) if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep): os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH']
def convert(ctx, x): if isinstance(x, (ctx.mpf, ctx.mpc)): return x if isinstance(x, ctx._constant): return +x if isinstance(x, complex) or hasattr(x, "_mpc_"): re = ctx.convert(x.real) im = ctx.convert(x.imag) return ctx.mpc(re,im) if isinstance(x, basestring): v = mpi_from_str(x, ctx.prec) return ctx.make_mpf(v) if hasattr(x, "_mpi_"): a, b = x._mpi_ else: try: a, b = x except (TypeError, ValueError): a = b = x if hasattr(a, "_mpi_"): a = a._mpi_[0] else: a = convert_mpf_(a, ctx.prec, round_floor) if hasattr(b, "_mpi_"): b = b._mpi_[1] else: b = convert_mpf_(b, ctx.prec, round_ceiling) if a == fnan or b == fnan: a = fninf b = finf assert mpf_le(a, b), "endpoints must be properly ordered" return ctx.make_mpf((a, b))
def get_img(doc, page_num, thumbnail, format_): """ Convert doc to img if not already done. Given doc is updated with new img if created. :param doc: :class:`labresult.model.Document` :param page_num: int, desired page number :param thumbnail: bool, thumbnail or not :param format_: str, "svg" pr "png" :rtype: bytes, img data """ imgobj = get_existing_img_or_new_one(doc, page_num, thumbnail, format_) img_data= None if thumbnail and hasattr(imgobj.thumbnail,'_id'): img_data = imgobj.thumbnail.read() elif not thumbnail and hasattr(imgobj.data,'_id'): img_data = imgobj.data.read() if not img_data : if not hasattr(doc.pdf, '_id'): doc.pdf = get_pdf(doc) doc.pdf.seek(0) pdf_data = doc.pdf.read() img_data = tasks.pdf2img.delay(pdf_data, page_num, thumbnail, format_).get() tasks.save_img(img_data, doc.id, page_num, thumbnail, format_) return img_data
def __new__(cls, name, bases, attrs): instance = super(OptionParserMeta, cls).__new__(cls, name, bases, attrs) if not hasattr(instance, '_mixin_setup_funcs'): instance._mixin_setup_funcs = [] if not hasattr(instance, '_mixin_process_funcs'): instance._mixin_process_funcs = [] if not hasattr(instance, '_mixin_after_parsed_funcs'): instance._mixin_after_parsed_funcs = [] for base in _sorted(bases + (instance,)): func = getattr(base, '_mixin_setup', None) if func is not None and func not in instance._mixin_setup_funcs: instance._mixin_setup_funcs.append(func) func = getattr(base, '_mixin_after_parsed', None) if func is not None and func not in instance._mixin_after_parsed_funcs: instance._mixin_after_parsed_funcs.append(func) # Mark process_<opt> functions with the base priority for sorting for func in dir(base): if not func.startswith('process_'): continue func = getattr(base, func) if getattr(func, '_mixin_prio_', None) is not None: # Function already has the attribute set, don't override it continue func.__func__._mixin_prio_ = getattr(base, '_mixin_prio_', 1000) return instance
def _set_model(self, model): import tensorflow as tf import keras.backend.tensorflow_backend as KTF self.model = model self.sess = KTF.get_session() if self.histogram_freq and not self.merged: mod_type = self.model.get_config()['name'] if mod_type == 'Sequential': layers = {l.get_config()['name']: l for l in self.model.layers} elif mod_type == 'Graph': layers = self.model.nodes else: raise Exception('Unrecognized model:', self.model.get_config()['name']) for l in layers: cur_layer = layers[l] if hasattr(cur_layer, 'W'): tf.histogram_summary('{}_W'.format(l), cur_layer.W) if hasattr(cur_layer, 'b'): tf.histogram_summary('{}_b'.format(l), cur_layer.b) if hasattr(cur_layer, 'get_output'): tf.histogram_summary('{}_out'.format(l), cur_layer.get_output()) self.merged = tf.merge_all_summaries() self.writer = tf.train.SummaryWriter(self.log_dir, self.sess.graph_def)
def getContextMenu(self, priority=QAction.NormalPriority): ''' Insert the subwidgets' contextMenus between double separators ''' contextMenu = None if hasattr(self, 'actions'): if hasattr(self.actions, 'contextMenu'): contextMenu = self.actions.getContextMenu() lastActionWasASeparator = False menusWereInserted = False for action in contextMenu.actions(): if menusWereInserted == True: debug(self, "Menus were already inserted...", 5) else: if lastActionWasASeparator == False: if action.isSeparator(): debug(self, "Found a separator...", 5) lastActionWasASeparator = True else: debug(self, "Not a separator...", 5) lastActionWasASeparator = False else: if not(action.isSeparator()): debug(self, "Not a double separator...", 5) lastActionWasASeparator = False else: debug(self, "Found a double separator...", 5) for widget in self.getLWidgets() : widgetMenu = widget.getContextMenu(priority) if not(widgetMenu.isEmpty()): debug(self, "Inserting %d entries in '%s'..." % (len(widgetMenu.actions()), widgetMenu.title())) contextMenu.insertMenu(action, widgetMenu) menusWereInserted = True lastActionWasASeparator = True return contextMenu
def apply(self, mcp): """Apply the configuration to the specified master control program""" working_dir = self._get_working_dir(mcp) if not os.path.isdir(working_dir): raise ConfigError("Specified working directory \'%s\' is not a directory" % working_dir) if not os.access(working_dir, os.W_OK): raise ConfigError("Specified working directory \'%s\' is not writable" % working_dir) mcp.state_handler.working_dir = working_dir if hasattr(self, 'command_context'): if mcp.context: mcp.context.base = self.command_context else: mcp.context = command_context.CommandContext(self.command_context) self._apply_nodes(mcp) if hasattr(self, 'ssh_options'): self.ssh_options = default_or_from_tag(self.ssh_options, SSHOptions) self.ssh_options._apply(mcp) self._apply_jobs(mcp) self._apply_services(mcp) if hasattr(self, 'notification_options'): self.notification_options = default_or_from_tag(self.notification_options, NotificationOptions) self.notification_options._apply(mcp)
def _build_conch_options(self): """Verify and construct the ssh (conch) option object This is just a dictionary like object that options the twisted ssh implementation uses. """ ssh_options = options.ConchOptions() if not hasattr(self, 'agent'): ssh_options['noagent'] = True else: if 'SSH_AUTH_SOCK' in os.environ: ssh_options['agent'] = True else: raise Error("No SSH Agent available ($SSH_AUTH_SOCK)") if hasattr(self, "identities"): for file_name in self.identities: file_path = os.path.expanduser(file_name) if not os.path.exists(file_path): raise Error("Private key file %s doesn't exist" % file_name) if not os.path.exists(file_path + ".pub"): raise Error("Public key %s doesn't exist" % (file_name + ".pub")) ssh_options.opt_identity(file_name) return ssh_options
def _extend(self, json, site): self.id = json.comment_id self.creation_date = datetime.datetime.fromtimestamp(json.creation_date) if hasattr(json, 'owner'): self.owner_id = json.owner['owner_id'] if 'owner_id' in json.owner else json.owner['user_id'] self.owner = User.partial(lambda self: self.site.user(self.id), site, { 'id': self.owner_id, 'user_type': Enumeration.from_string(json.owner['user_type'], UserType), 'display_name': json.owner['display_name'], 'reputation': json.owner['reputation'], 'email_hash': json.owner['email_hash']}) else: self.owner = None if hasattr(json, 'reply_to'): self.reply_to_user_id = json.reply_to['user_id'] self.reply_to = User.partial(lambda self: self.site.user(self.id), site, { 'id': self.reply_to_user_id, 'user_type': Enumeration.from_string(json.reply_to['user_type'], UserType), 'display_name': json.reply_to['display_name'], 'reputation': json.reply_to['reputation'], 'email_hash': json.reply_to['email_hash']}) self.post_type = PostType.from_string(json.post_type)
def _extend(self, json, site): self.id = json.question_id self.creation_date = datetime.datetime.fromtimestamp(json.creation_date) self.vote_count = self.up_vote_count - self.down_vote_count self.timeline = StackExchangeLazySequence(TimelineEvent, None, site, json.question_timeline_url, self._up('timeline')) self.revisions = StackExchangeLazySequence(PostRevision, None, site, 'revisions/%s' % self.id, self._up('revisions'), 'revisions') self.comments_url = json.question_comments_url self.comments = StackExchangeLazySequence(Comment, None, site, self.comments_url, self._up('comments')) self.answers_url = json.question_answers_url if hasattr(json, 'answers'): self.answers = [Answer(x, site) for x in json.answers] else: self.answers = [] if hasattr(json, 'owner'): self.owner_id = json.owner['user_id'] owner_dict = dict(json.owner) owner_dict['id'] = self.owner_id del owner_dict['user_id'] owner_dict['user_type'] = UserType.from_string(owner_dict['user_type']) self.owner = User.partial(lambda self: self.site.user(self.id), site, owner_dict) self.url = 'http://' + self.site.root_domain + '/questions/' + str(self.id)
def clone_request(request, method): """ Internal helper method to clone a request, replacing with a different HTTP method. Used for checking permissions against other methods. """ ret = Request(request=request._request, parsers=request.parsers, authenticators=request.authenticators, negotiator=request.negotiator, parser_context=request.parser_context) ret._data = request._data ret._files = request._files ret._full_data = request._full_data ret._content_type = request._content_type ret._stream = request._stream ret._method = method if hasattr(request, '_user'): ret._user = request._user if hasattr(request, '_auth'): ret._auth = request._auth if hasattr(request, '_authenticator'): ret._authenticator = request._authenticator if hasattr(request, 'accepted_renderer'): ret.accepted_renderer = request.accepted_renderer if hasattr(request, 'accepted_media_type'): ret.accepted_media_type = request.accepted_media_type return ret
def highlightBlock(self, qstring): """ Highlight a block of text. """ qstring = compat.unicode(qstring) prev_data = self.previous_block_data() if prev_data is not None: self._lexer._epd_state_stack = prev_data.syntax_stack elif hasattr(self._lexer, '_epd_state_stack'): del self._lexer._epd_state_stack index = 0 # Lex the text using Pygments for token, text in self._lexer.get_tokens(qstring): l = len(text) format = self._get_format(token) if format is not None: self.setFormat(index, l, format) index += l if hasattr(self._lexer, '_epd_state_stack'): data = BlockUserData(syntax_stack=self._lexer._epd_state_stack) self.currentBlock().setUserData(data) # there is a bug in pyside and it will crash unless we # hold on to the reference a little longer data = self.currentBlock().userData() # Clean up for the next go-round. del self._lexer._epd_state_stack
def emit(self, record): try: msg = self.format(record) stream = self.stream isBuffer = hasattr(record, 'filebuffer') toSort = hasattr(record, 'filesort') if toSort: self.toSort = True if isBuffer: #toggle buffer switcch self.isBuffer = not self.isBuffer if self.isBuffer or isBuffer: if len(msg): self.logBuffer.append(msg.replace(". Test:",". Case:")) else: if len(self.logBuffer): if self.toSort: self.logBuffer.sort() for item in self.logBuffer: item = item.replace(". Case:",". Test:") stream.write(item) stream.write(self.terminator) self.logBuffer = [] self.toSort = False if len(msg): stream.write(msg) stream.write(self.terminator) self.flush() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
def orm_item_locator(orm_obj): """ This function is called every time an object that will not be exported is required. Where orm_obj is the referred object. We postpone the lookup to locate_object() which will be run on the generated script """ the_class = orm_obj._meta.object_name original_class = the_class pk_name = orm_obj._meta.pk.name original_pk_name = pk_name pk_value = getattr(orm_obj, pk_name) while hasattr(pk_value, "_meta") and hasattr(pk_value._meta, "pk") and hasattr(pk_value._meta.pk, "name"): the_class = pk_value._meta.object_name pk_name = pk_value._meta.pk.name pk_value = getattr(pk_value, pk_name) clean_dict = make_clean_dict(orm_obj.__dict__) for key in clean_dict: v = clean_dict[key] if v is not None and not isinstance(v, (six.string_types, six.integer_types, float, datetime.datetime)): clean_dict[key] = six.u("%s" % v) output = """ importer.locate_object(%s, "%s", %s, "%s", %s, %s ) """ % ( original_class, original_pk_name, the_class, pk_name, pk_value, clean_dict ) return output
def scaffold_list_columns(self): """ Return a list of columns from the model. """ columns = [] for p in self._get_model_iterator(): if hasattr(p, 'direction'): if self.column_display_all_relations or p.direction.name == 'MANYTOONE': columns.append(p.key) elif hasattr(p, 'columns'): if len(p.columns) > 1: filtered = tools.filter_foreign_columns(self.model.__table__, p.columns) if len(filtered) > 1: warnings.warn('Can not convert multiple-column properties (%s.%s)' % (self.model, p.key)) continue column = filtered[0] else: column = p.columns[0] if column.foreign_keys: continue if not self.column_display_pk and column.primary_key: continue columns.append(p.key) return columns
def time_column(table, ifo=None): """Extract the 'time' column from the given table. This function uses time_func to determine the correct column to use as a proxy for 'time' and returns that column. The following mappings are used: - `sngl_inspiral` -> 'end' time - `sngl_burst` -> 'peak' time - `sngl_ringdown` -> 'start' time @param table any `LIGO_LW` table @param ifo an interferometer prefix if you want single-detector times @returns a numpy array object with a 'time' element for each row in the table """ if hasattr(table, "get_time"): return numpy.asarray(table.get_time()) func_name = time_func(ligolw_table.StripTableName(table.tableName)).__name__ if hasattr(table, func_name): return numpy.asarray(getattr(table, func_name)()) else: return numpy.asarray(map(func_name, table))
def netflix_solve(i_stream, o_stream): """Creates prediction output for any valid input for the Netflix Prize problem. Args: i_stream: The input stream. o_stream: The output stream. """ # pre-conditions assert hasattr(i_stream, 'read') assert hasattr(o_stream, 'write') customer_ids = [] movie_data, cust_data, answers = load_data() # parse input for line in i_stream: line = line.strip() if line[-1] == ':': if len(customer_ids) > 0: # see the future (or the past, actually) predictions = netflix_predict(movie_id, customer_ids, movie_data, cust_data) # print current movie's output netflix_print(movie_id, customer_ids, predictions, o_stream) movie_id = int(line[:-1]) customer_ids = [] else: customer_ids.append(int(line)) # predict ratings for final movie predictions = netflix_predict(movie_id, customer_ids, movie_data, cust_data) # print final movie's output netflix_print(movie_id, customer_ids, predictions, o_stream) print_rmse(o_stream, calculated_ratings, answers)
def process(self, formdata=None, obj=None, **kwargs): """ Take form, object data, and keyword arg input and have the fields process them. :param formdata: Used to pass data coming from the enduser, usually `request.POST` or equivalent. :param obj: If `formdata` has no data for a field, the form will try to get it from the passed object. :param `**kwargs`: If neither `formdata` or `obj` contains a value for a field, the form will assign the value of a matching keyword argument to the field, if provided. """ if formdata is not None and not hasattr(formdata, 'getlist'): if hasattr(formdata, 'getall'): formdata = WebobInputWrapper(formdata) else: raise TypeError("formdata should be a multidict-type wrapper that supports the 'getlist' method") for name, field, in self._fields.iteritems(): if obj is not None and hasattr(obj, name): field.process(formdata, getattr(obj, name)) elif name in kwargs: field.process(formdata, kwargs[name]) else: field.process(formdata)
def _handle(self, args): if len(args) == 0: self._raise_omni_error('Insufficient number of arguments - Missing command to run') call = args[0].lower() # disallow calling private methods if call.startswith('_'): return if hasattr(self, call): return getattr(self, call)(args[1:]) elif hasattr(self.chhandler, call): return getattr(self.chhandler, call)(args[1:]) elif hasattr(self.amhandler, call): # Extract the slice name arg and put it in an option self.amhandler.opts.sliceName = self.amhandler._extractSliceArg(args) # Try to auto-correct API version msg = self.amhandler._correctAPIVersion(args) if msg is None: msg = "" (message, val) = getattr(self.amhandler,call)(args[1:]) if message is None: message = "" return (msg+message, val) else: self._raise_omni_error('Unknown function: %s' % call)
def overload(func): """Function decorator for defining overloaded functions.""" frame = sys._getframe(1) locals = frame.f_locals # See if there is a previous overload variant available. Also verify # that the existing function really is overloaded: otherwise, replace # the definition. The latter is actually important if we want to reload # a library module such as genericpath with a custom one that uses # overloading in the implementation. if func.__name__ in locals and hasattr(locals[func.__name__], 'dispatch'): orig_func = locals[func.__name__] def wrapper(*args, **kwargs): ret, ok = orig_func.dispatch(*args, **kwargs) if ok: return ret return func(*args, **kwargs) wrapper.isoverload = True wrapper.dispatch = make_dispatcher(func, orig_func.dispatch) wrapper.next = orig_func wrapper.__name__ = func.__name__ if hasattr(func, '__isabstractmethod__'): # Note that we can't reliably check that abstractmethod is # used consistently across overload variants, so we let a # static checker do it. wrapper.__isabstractmethod__ = func.__isabstractmethod__ return wrapper else: # Return the initial overload variant. func.isoverload = True func.dispatch = make_dispatcher(func) func.next = None return func
def destroy(self): self.disable() if hasattr(self, 'wrongWaySeq'): self.wrongWaySeq.finish() self.wrongWaySeq = None taskMgr.removeTasksMatching('removeIt') taskMgr.removeTasksMatching('removeCam*') taskMgr.removeTasksMatching('clearRaceEndPanel') for obj in self.directObjList: obj.destroy() if hasattr(self, 'mapScene'): self.mapScene.removeNode() self.mapScene = None self.aspect2dRoot.removeNode() self.aspect2dRoot = None self.raceModeRoot.removeNode() self.raceModeRoot = None self.render2dRoot.removeNode() self.render2dRoot = None self.closeButton = None self.gag = None self.lapLabel = None self.timeLabels = None self.placeLabelStr = None self.placeLabelNum = None self.photoFinishLabel = None self.mapScene = None self.race = None return
def _enterRawMode(): global _inRawMode, _savedRawMode if _inRawMode: return fd = sys.stdin.fileno() try: old = tty.tcgetattr(fd) new = old[:] except: log.msg('not a typewriter!') else: # iflage new[0] = new[0] | tty.IGNPAR new[0] = new[0] & ~(tty.ISTRIP | tty.INLCR | tty.IGNCR | tty.ICRNL | tty.IXON | tty.IXANY | tty.IXOFF) if hasattr(tty, 'IUCLC'): new[0] = new[0] & ~tty.IUCLC # lflag new[3] = new[3] & ~(tty.ISIG | tty.ICANON | tty.ECHO | tty.ECHO | tty.ECHOE | tty.ECHOK | tty.ECHONL) if hasattr(tty, 'IEXTEN'): new[3] = new[3] & ~tty.IEXTEN #oflag new[1] = new[1] & ~tty.OPOST new[6][tty.VMIN] = 1 new[6][tty.VTIME] = 0 _savedRawMode = old tty.tcsetattr(fd, tty.TCSANOW, new) #tty.setraw(fd) _inRawMode = 1
def test_custom_lookup_ns_fallback(self): class TestElement1(etree.ElementBase): FIND_ME = "custom" class TestElement2(etree.ElementBase): FIND_ME = "nsclasses" class MyLookup(etree.CustomElementClassLookup): def lookup(self, t, d, ns, name): if name == 'c1': return TestElement1 lookup = etree.ElementNamespaceClassLookup( MyLookup() ) etree.set_element_class_lookup(lookup) ns = lookup.get_namespace("otherNS") ns[None] = TestElement2 root = etree.XML(xml_str) self.assertFalse(hasattr(root, 'FIND_ME')) self.assertEqual(root[0].FIND_ME, TestElement1.FIND_ME) self.assertFalse(hasattr(root[0][1], 'FIND_ME')) self.assertEqual(root[0][-1].FIND_ME, TestElement2.FIND_ME)
def create(cls, **kwargs): instance = cls(**kwargs) if hasattr(instance, 'created_at'): setattr(instance, 'created_at', datetime.datetime.utcnow()) if hasattr(instance, 'modified_at'): setattr(instance, 'modified_at', datetime.datetime.utcnow()) return instance
def pre(self, emulator=None): """ _pre_ Pre execution checks """ if emulator is not None: return emulator.emulatePre(self.step) logging.info("Pre-executing CMSSW step") if hasattr(self.step.application.configuration, "configCacheUrl"): # means we have a configuration & tweak in the sandbox psetFile = self.step.application.command.configuration psetTweak = self.step.application.command.psetTweak self.stepSpace.getFromSandbox(psetFile) if psetTweak: self.stepSpace.getFromSandbox(psetTweak) if hasattr(self.step, "pileup"): self.stepSpace.getFromSandbox("pileupconf.json") # add in ths scram env PSet manip script whatever happens self.step.runtime.scramPreScripts.append("SetupCMSSWPset") return None
# Save hyperparams with open(os.path.join(params_path, 'config.yml'), 'w') as f: yaml.dump(saved_hyperparams, f) # save command line arguments with open(os.path.join(params_path, 'args.yml'), 'w') as f: ordered_args = OrderedDict([(key, vars(args)[key]) for key in sorted(vars(args).keys())]) yaml.dump(ordered_args, f) print(f"Log path: {save_path}") try: model.learn(n_timesteps, eval_log_path=save_path, eval_env=env, eval_freq=args.eval_freq, **kwargs) except KeyboardInterrupt: pass # Save trained model print(f"Saving to {save_path}") model.save(f"{save_path}/{env_id}") if hasattr(model, 'save_replay_buffer') and args.save_replay_buffer: print("Saving replay buffer") model.save_replay_buffer(os.path.join(save_path, 'replay_buffer.pkl')) # if normalize: # Important: save the running average, for testing the agent we need that normalization # model.get_vec_normalize_env().save(os.path.join(params_path, 'vecnormalize.pkl')) # Deprecated saving: # env.save_running_average(params_path)
def vmwarecli(command=None, action=None, namespace=None): logger.debug("Namespace {}".format(namespace)) urllib3.disable_warnings() vcduser = None vcdpasword = None vcdhost = None vcdorg = None if hasattr(__builtins__, 'raw_input'): input = raw_input if namespace.vcdvdc is None: while True: vcduser = input("Enter vcd username: "******"Please enter vcd password: "******"Please enter vcd host name or ip: ") if vcdhost is not None and len(vcdhost) > 0: break else: vcdhost = namespace.vcdhost if namespace.vcdorg is None: while True: vcdorg = input("Please enter vcd organization name: ") if vcdorg is not None and len(vcdorg) > 0: break else: vcdorg = namespace.vcdorg try: vim = vimconnector(uuid=None, name=vcdorg, tenant_id=None, tenant_name=namespace.vcdvdc, url=vcdhost, url_admin=vcdhost, user=vcduser, passwd=vcdpasword, log_level="DEBUG", config={'admin_username': namespace.vcdamdin, 'admin_password': namespace.vcdadminpassword}) vim.vca = vim.connect() except vimconn.vimconnConnectionException: print("Failed connect to vcloud director. Please check credential and hostname.") return # list if command == 'list' or namespace.command == 'list': logger.debug("Client requested list action") # route request to list actions list_actions(vim=vim, action=action, namespace=namespace) # view action if command == 'view' or namespace.command == 'view': logger.debug("Client requested view action") view_actions(vim=vim, action=action, namespace=namespace) # delete action if command == 'delete' or namespace.command == 'delete': logger.debug("Client requested delete action") delete_actions(vim=vim, action=action, namespace=namespace) # create action if command == 'create' or namespace.command == 'create': logger.debug("Client requested create action") create_actions(vim=vim, action=action, namespace=namespace) # image action if command == 'image' or namespace.command == 'image': logger.debug("Client requested create action") image_action(vim=vim, action=action, namespace=namespace)
def mback(energy, mu, group=None, order=3, z=None, edge='K', e0=None, emin=None, emax=None, whiteline=None, leexiang=False, tables='cl', fit_erfc=False, return_f1=False, _larch=None): """ Match mu(E) data for tabulated f"(E) using the MBACK algorithm and, optionally, the Lee & Xiang extension Arguments: energy, mu: arrays of energy and mu(E) order: order of polynomial [3] group: output group (and input group for e0) z: Z number of absorber edge: absorption edge (K, L3) e0: edge energy emin: beginning energy for fit emax: ending energy for fit whiteline: exclusion zone around white lines leexiang: flag to use the Lee & Xiang extension tables: 'cl' or 'chantler' fit_erfc: True to float parameters of error function return_f1: True to put the f1 array in the group Returns: group.f2: tabulated f2(E) group.f1: tabulated f1(E) (if return_f1 is True) group.fpp: matched data group.mback_params: Group of parameters for the minimization References: * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711 * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970 * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266 * Chantler: http://dx.doi.org/10.1063/1.555974 """ order = int(order) if order < 1: order = 1 # set order of polynomial if order > MAXORDER: order = MAXORDER ### implement the First Argument Group convention energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu, ), group=group, fcn_name='mback') group = set_xafsGroup(group, _larch=_larch) if e0 is None: # need to run find_e0: e0 = xray_edge(z, edge, _larch=_larch)[0] if e0 is None: e0 = group.e0 if e0 is None: find_e0(energy, mu, group=group) ### theta is an array used to exclude the regions <emin, >emax, and ### around white lines, theta=0.0 in excluded regions, theta=1.0 elsewhere (i1, i2) = (0, len(energy) - 1) if emin != None: i1 = index_of(energy, emin) if emax != None: i2 = index_of(energy, emax) theta = np.ones(len(energy)) # default: 1 throughout theta[0:i1] = 0 theta[i2:-1] = 0 if whiteline: pre = 1.0 * (energy < e0) post = 1.0 * (energy > e0 + float(whiteline)) theta = theta * (pre + post) if edge.lower().startswith('l'): l2 = xray_edge(z, 'L2', _larch=_larch)[0] l2_pre = 1.0 * (energy < l2) l2_post = 1.0 * (energy > l2 + float(whiteline)) theta = theta * (l2_pre + l2_post) ## this is used to weight the pre- and post-edge differently as ## defined in the MBACK paper weight1 = 1 * (energy < e0) weight2 = 1 * (energy > e0) weight = np.sqrt(sum(weight1)) * weight1 + np.sqrt(sum(weight2)) * weight2 ## get the f'' function from CL or Chantler if tables.lower() == 'chantler': f1 = f1_chantler(z, energy, _larch=_larch) f2 = f2_chantler(z, energy, _larch=_larch) else: (f1, f2) = f1f2(z, energy, edge=edge, _larch=_larch) group.f2 = f2 if return_f1: group.f1 = f1 n = edge if edge.lower().startswith('l'): n = 'L' params = Group( s=Parameter(1, vary=True, _larch=_larch), # scale of data xi=Parameter(50, vary=fit_erfc, min=0, _larch=_larch), # width of erfc em=Parameter(xray_line(z, n, _larch=_larch)[0], vary=False, _larch=_larch), # erfc centroid e0=Parameter(e0, vary=False, _larch=_larch), # abs. edge energy ## various arrays need by the objective function en=energy, mu=mu, f2=group.f2, weight=weight, theta=theta, leexiang=leexiang, _larch=_larch) if fit_erfc: params.a = Parameter(1, vary=True, _larch=_larch) # amplitude of erfc else: params.a = Parameter(0, vary=False, _larch=_larch) # amplitude of erfc for i in range(order): # polynomial coefficients setattr(params, 'c%d' % i, Parameter(0, vary=True, _larch=_larch)) fit = Minimizer(match_f2, params, _larch=_larch, toler=1.e-5) fit.leastsq() eoff = energy - params.e0.value normalization_function = params.a.value * erfc( (energy - params.em.value) / params.xi.value) + params.c0.value for i in range(MAXORDER): j = i + 1 attr = 'c%d' % j if hasattr(params, attr): normalization_function = normalization_function + getattr( getattr(params, attr), 'value') * eoff**j group.fpp = params.s * mu - normalization_function group.mback_params = params
def set_options_from_map(self, config_options, source=None, *, ignore_missing=False, allow_empty=True, filter_func=None): """Sets several config options to their (individual) values. This helper method acts like a "mass-option_set_to()" and adds a few convenience features such as filtering out unknown options. The "config options" parameter should be a dict where the keys are config option names, and values are config option values. It can also be a sequence of 2-tuples, similar to dict.update(). Any false value, e.g. None, is also accepted, and makes this method immediately return "success" or "failure", depending on the "allow_empty" keyword parameter. @param config_options: either a dict of config option name X value, or an iterable of 2-tuples(option, value). See option_set_to() for a description of possible config option names and values. @type config_options: dict :: C{str} => undef or iterable of 2-tuple (C{str}, undef) or anything false @keyword source: additional information about the decision's origin. Defaults to None. @type source: undef or C{None} @keyword ignore_missing: whether to ignore nonexistent config options Normally, this an error, but if the config options come from an unreliable source, this keyword may be set to True to discard unknown options. Defaults to False. @type ignore_missing: C{bool} @keyword allow_empty: whether an empty (or otherwise false) config_options parameter should be interpreted as success or failure. Defaults to True, which allows empty input. The meaning of "empty" here is "no config option has been set". An input config options sequence that has been completely filtered out due to unknown options is considered as "equally" empty as an empty dict. @type allow_empty: usually C{bool} @keyword filter_func: either None, which disables name/value-based filtering, True which filters out false values, or a function that receives a config option name as first and its value as second argument and returns True if the option should be set and false otherwise. @type filter_func: C{None} | C{bool} | callable :: n,v -> bool @return: success (True/False) @rtype: C{bool} """ if not config_options: return allow_empty elif hasattr(config_options, "keys"): options_iter = ((k, config_options[k]) for k in config_options) else: options_iter = config_options if ignore_missing: # check has_option() before calling option_set_to(), # there's no way to know why option_set_to() failed def verbosely_check_option_exists(item, *, has_option=self.has_option): # item: 2-tuple (name, value) if has_option(item[0]): return True else: self.logger.warning("Unknown config option: %s", item[0]) return False # --- options_iter = filter(verbosely_check_option_exists, options_iter) # -- if not filter_func: def check_option_value_allowed(option, value): return True elif filter_func is True: def check_option_value_allowed(option, value): if not value: # COULDFIX: empty str -- is a false value return False else: return True else: check_option_value_allowed = filter_func # -- option_set_to = self.option_set_to have_set_any_config_option = False for config_option, value in options_iter: if not check_option_value_allowed(config_option, value): self.logger.debug( "Ignoring %r decision for config option %s (filtered out)", value, config_option) elif option_set_to(config_option, value, source=source): have_set_any_config_option = True else: return False # -- return True if have_set_any_config_option else allow_empty
def writeFeatures(self, outfile): if hasattr(self, "features"): outfile.write(self.features)
def create_universe(code, infilepath_or_buffer=None, from_universes=None, exclude_delisted=False, append=False, replace=False): """ Create a universe of securities. Parameters ---------- code : str, required the code to assign to the universe (lowercase alphanumerics and hyphens only) infilepath_or_buffer : str or file-like object, optional create the universe from the conids in this file (specify '-' to read file from stdin) from_universes : list of str, optional create the universe from these existing universes exclude_delisted : bool exclude delisted securities that would otherwise be included (default is not to exclude them) append : bool append to universe if universe already exists (default False) replace : bool replace universe if universe already exists (default False) Returns ------- dict status message """ if append and replace: raise ValueError("append and replace are mutually exclusive") params = {} if from_universes: params["from_universes"] = from_universes if exclude_delisted: params["exclude_delisted"] = exclude_delisted if replace: params["replace"] = replace url = "/master/universes/{0}".format(code) if append: method = "PATCH" else: method = "PUT" if infilepath_or_buffer == "-": response = houston.request(method, url, params=params, data=to_bytes(sys.stdin)) elif infilepath_or_buffer and hasattr(infilepath_or_buffer, "read"): if infilepath_or_buffer.seekable(): infilepath_or_buffer.seek(0) response = houston.request(method, url, params=params, data=to_bytes(infilepath_or_buffer)) elif infilepath_or_buffer: with open(infilepath_or_buffer, "rb") as f: response = houston.request(method, url, params=params, data=f) else: response = houston.request(method, url, params=params) houston.raise_for_status_with_json(response) return response.json()
def diff_securities(universes=None, conids=None, infilepath_or_buffer=None, fields=None, delist_missing=False, delist_exchanges=None, wait=False): """ Flag security details that have changed in IB's system since the time they were last loaded into the securities master database. Diff can be run synchronously or asynchronously (asynchronous is the default and is recommended if diffing more than a handful of securities). Parameters ---------- universes : list of str, optional limit to these universes conids : list of int, optional limit to these conids infilepath_or_buffer : str or file-like object, optional limit to the conids in this file (specify '-' to read file from stdin) fields : list of str, optional only diff these fields delist_missing : bool auto-delist securities that are no longer available from IB delist_exchanges : list of str, optional auto-delist securities that are associated with these exchanges wait : bool run the diff synchronously and return the diff (otherwise run asynchronously and log the results, if any, to flightlog) Returns ------- dict dict of conids and fields that have changed (if wait), or status message """ params = {} if universes: params["universes"] = universes if conids: params["conids"] = conids if fields: params["fields"] = fields if delist_missing: params["delist_missing"] = delist_missing if delist_exchanges: params["delist_exchanges"] = delist_exchanges if wait: params["wait"] = wait # if run synchronously use a high timeout timeout = 60*60*10 if wait else None if infilepath_or_buffer == "-": response = houston.get("/master/diff", params=params, data=to_bytes(sys.stdin), timeout=timeout) elif infilepath_or_buffer and hasattr(infilepath_or_buffer, "read"): if infilepath_or_buffer.seekable(): infilepath_or_buffer.seek(0) response = houston.get("/master/diff", params=params, data=to_bytes(infilepath_or_buffer), timeout=timeout) elif infilepath_or_buffer: with open(infilepath_or_buffer, "rb") as f: response = houston.get("/master/diff", params=params, data=f, timeout=timeout) else: response = houston.get("/master/diff", params=params, timeout=timeout) houston.raise_for_status_with_json(response) return response.json()
def __init__(self): self._attrs = [ name for name in dir(_os) if not name.startswith('_') and hasattr(self,name) ]
"""Called for path inputs""" return self._validate_path(path) def _remap_output(self,operation,path): """Called for path outputs""" return self._validate_path(path) def _remap_pair(self,operation,src,dst,*args,**kw): """Called for path pairs like rename, link, and symlink operations""" return ( self._remap_input(operation+'-from',src,*args,**kw), self._remap_input(operation+'-to',dst,*args,**kw) ) if hasattr(os, 'devnull'): _EXCEPTIONS = [os.devnull,] else: _EXCEPTIONS = [] try: from win32com.client.gencache import GetGeneratePath _EXCEPTIONS.append(GetGeneratePath()) del GetGeneratePath except ImportError: # it appears pywin32 is not installed, so no need to exclude. pass class DirectorySandbox(AbstractSandbox): """Restrict operations to a single subdirectory - pseudo-chroot"""
return (checksum_data, pkg_hash, digest_hash) def package(self, package, fileChecksumType, fileChecksum): self.warn(1, "Uploading package %s" % package) if not os.access(package, os.R_OK): self.die(-1, "Could not read file %s" % package) try: h = uploadLib.get_header(package, source=self.options.source) except uploadLib.UploadError, e: # GS: MALFORMED PACKAGE print "Unable to load package", package, ":", e return None if hasattr(h, 'packaging'): packaging = h.packaging else: packaging = 'rpm' if packaging == 'rpm' and self.options.nosig is None and not h.is_signed(): # pkilambi:bug#173886:force exit to check for sig if --nosig raise uploadLib.UploadError("ERROR: %s: unsigned rpm (use --nosig to force)" % package) try: ret = self._push_package_v2(package, fileChecksumType, fileChecksum) except uploadLib.UploadError, e: ret, diff_level, pdict = e.args[:3] severities = { 1: 'path changed', 2: 'package resigned',
# # * Neither the name of the European Southern Observatory nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY ESO ``AS IS'' AND ANY EXPRESS OR IMPLIED # WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL ESO BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR # BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER # IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE from django.conf import settings ARCHIVE_ROOT = settings.ARCHIVE_ROOT if hasattr( settings, 'ARCHIVE_ROOT' ) else 'archives/' if len( ARCHIVE_ROOT ) > 0 and ARCHIVE_ROOT[-1] != '/': ARCHIVE_ROOT = ARCHIVE_ROOT + '/' class archive_settings: """ Determines the root for resources for both webupdates and annoucements. """ ANNOUNCEMENT_ROOT = ARCHIVE_ROOT + 'announcements' WEBUPDATE_ROOT = ARCHIVE_ROOT + 'webupdates'
class AbstractSandbox: """Wrap 'os' module and 'open()' builtin for virtualizing setup scripts""" _active = False def __init__(self): self._attrs = [ name for name in dir(_os) if not name.startswith('_') and hasattr(self,name) ] def _copy(self, source): for name in self._attrs: setattr(os, name, getattr(source,name)) def run(self, func): """Run 'func' under os sandboxing""" try: self._copy(self) if _file: builtins.file = self._file builtins.open = self._open self._active = True return func() finally: self._active = False if _file: builtins.file = _file builtins.open = _open self._copy(_os) def _mk_dual_path_wrapper(name): original = getattr(_os,name) def wrap(self,src,dst,*args,**kw): if self._active: src,dst = self._remap_pair(name,src,dst,*args,**kw) return original(src,dst,*args,**kw) return wrap for name in ["rename", "link", "symlink"]: if hasattr(_os,name): locals()[name] = _mk_dual_path_wrapper(name) def _mk_single_path_wrapper(name, original=None): original = original or getattr(_os,name) def wrap(self,path,*args,**kw): if self._active: path = self._remap_input(name,path,*args,**kw) return original(path,*args,**kw) return wrap if _file: _file = _mk_single_path_wrapper('file', _file) _open = _mk_single_path_wrapper('open', _open) for name in [ "stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat", "startfile", "mkfifo", "mknod", "pathconf", "access" ]: if hasattr(_os,name): locals()[name] = _mk_single_path_wrapper(name) def _mk_single_with_return(name): original = getattr(_os,name) def wrap(self,path,*args,**kw): if self._active: path = self._remap_input(name,path,*args,**kw) return self._remap_output(name, original(path,*args,**kw)) return original(path,*args,**kw) return wrap for name in ['readlink', 'tempnam']: if hasattr(_os,name): locals()[name] = _mk_single_with_return(name) def _mk_query(name): original = getattr(_os,name) def wrap(self,*args,**kw): retval = original(*args,**kw) if self._active: return self._remap_output(name, retval) return retval return wrap for name in ['getcwd', 'tmpnam']: if hasattr(_os,name): locals()[name] = _mk_query(name) def _validate_path(self,path): """Called to remap or validate any path, whether input or output""" return path def _remap_input(self,operation,path,*args,**kw): """Called for path inputs""" return self._validate_path(path) def _remap_output(self,operation,path): """Called for path outputs""" return self._validate_path(path) def _remap_pair(self,operation,src,dst,*args,**kw): """Called for path pairs like rename, link, and symlink operations""" return ( self._remap_input(operation+'-from',src,*args,**kw), self._remap_input(operation+'-to',dst,*args,**kw) )
def __contains__(self, attr): return hasattr(self, attr)
def __new__(cls, name, bases, attrs): super_new = super(BaseGeneralizationMeta, cls).__new__ parents = [b for b in bases if isinstance(b, BaseGeneralizationMeta)] # Get the declared Meta inner class before the super-metaclass removes # it: meta = attrs.get('Meta') # We must remove the specialization declarations from the Meta inner # class since ModelBase will raise a TypeError is it encounters these: if meta: specialization = meta.__dict__.pop('specialization', None) else: specialization = None new_model = super_new(cls, name, bases, attrs) # Ensure that the _meta attribute has some additional attributes: if not hasattr(new_model._meta, 'abstract_specialization_managers'): new_model._meta.abstract_specialization_managers = [] if not hasattr(new_model._meta, 'concrete_specialization_managers'): new_model._meta.concrete_specialization_managers = [] if not parents: return new_model if new_model._meta.abstract: # This is an abstract base-class and no specializations should be # declared on the inner class: if specialization is not None: # We need to ensure this is actually None and not just evaluates # to False as we enforce that it's not declared: raise TypeError( "Abstract models should not have a specialization declared " "on their inner Meta class" ) elif BaseGeneralizationModel in bases: # This must be a direct descendant from the BaseGeneralizationModel. # Prepare the look-up mapping of specializations which the sub- # classes will update: new_model._meta.specializations = {} new_model._meta.specialization = PATH_SEPERATOR if specialization is not None: # We need to ensure this is actually None and not just evaluates # to False as we enforce that it's not declared: raise TypeError( "General models should not have a specialization declared " "on their inner Meta class" ) else: if specialization is None: raise TypeError( "Specialized models must declare specialization on their " "inner Meta class" ) if not SPECIALIZATION_RE.match(specialization): raise ValueError("Specializations must be alphanumeric string") parent_class = new_model.__base__ new_model._meta.specializations = {} new_model._generalized_parent = parent_class path_specialization = '%s%s%s' % ( parent_class._meta.specialization, specialization, PATH_SEPERATOR ) # Calculate the specialization as a path taking into account the # specialization of any ancestors: new_model._meta.specialization = path_specialization # Update the specializations mapping on the General model so that it # knows to use this class for that specialization: ancestor = getattr(new_model, '_generalized_parent', None) while ancestor: ancestor._meta.specializations[ new_model._meta.specialization ] = new_model ancestor = getattr(ancestor, '_generalized_parent', None) parent_class._meta.specializations[path_specialization] = new_model is_proxy = new_model._meta.proxy if getattr(new_model, '_default_specialization_manager', None): if not is_proxy: new_model._default_specialization_manager = None new_model._base_specialization_manager = None else: new_model._default_specialization_manager = \ new_model._default_specialization_manager._copy_to_model( new_model ) new_model._base_specialization_manager = \ new_model._base_specialization_manager._copy_to_model( new_model ) for obj_name, obj in attrs.items(): # We need to do this to ensure that a declared SpecializationManager # will be correctly set-up: if isinstance(obj, SpecializationManager): new_model.add_to_class(obj_name, obj) for base in parents: # Inherit managers from the abstract base classes. new_model.copy_managers( base._meta.abstract_specialization_managers ) # Proxy models inherit the non-abstract managers from their base, # unless they have redefined any of them. if is_proxy: new_model.copy_managers( base._meta.concrete_specialization_managers ) specialized_model_prepared.send(sender=new_model) new_model.model_specialization = new_model._meta.specialization return new_model
def updateTypes(names, typeName): if not hasattr(self, "featureTypes"): self.featureTypes = dict() for name in names: self.featureTypes[name] = typeName
def isDiskReplicationInProgress(self): return hasattr(self, "diskReplicate")
def _str_as_bool(val): try: val = bool(strtobool(val)) except (AttributeError, ValueError): pass return val if isinstance(val, bool) else None @enum.unique class SettingsConsole(enum.Enum): OFF = 0 WRAP = 1 REDIRECT = 2 if hasattr(multiprocessing, "get_all_start_methods"): AVAILABLE_START_METHODS = multiprocessing.get_all_start_methods() else: # TODO: this can go away when we deprecate Python 2 AVAILABLE_START_METHODS = ["fork", "spawn"] DEFAULT_START_METHOD = "spawn" # defaulting to spawn for now, fork needs more testing class Settings(object): """Settings Constructor Arguments: entity: personal user or team to use for Run. project: project name for the Run. Raises:
def update_device_info(cls, vm, device_conf): # FIXME! We need to gather as much info as possible from the libvirt. # In the future we can return this real data to management instead of # vm's conf for x in vm.domain.get_device_elements('disk'): alias, devPath, name = _get_drive_identification(x) readonly = vmxml.find_first(x, 'readonly', None) is not None bootOrder = vmxml.find_attr(x, 'boot', 'order') devType = vmxml.attr(x, 'device') if devType == 'disk': # raw/qcow2 drv = vmxml.find_attr(x, 'driver', 'type') else: drv = 'raw' # Get disk address address = vmxml.device_address(x) # Keep data as dict for easier debugging deviceDict = {'path': devPath, 'name': name, 'readonly': readonly, 'bootOrder': bootOrder, 'address': address, 'type': devType} # display indexed pairs of ordered values from 2 dicts # such as {key_1: (valueA_1, valueB_1), ...} def mergeDicts(deviceDef, dev): return dict((k, (deviceDef[k], getattr(dev, k, None))) for k in deviceDef.iterkeys()) vm.log.debug('Looking for drive with attributes %s', deviceDict) for d in device_conf: # When we analyze a disk device that was already discovered in # the past (generally as soon as the VM is created) we should # verify that the cached path is the one used in libvirt. # We already hit few times the problem that after a live # migration the paths were not in sync anymore (BZ#1059482). if (hasattr(d, 'alias') and d.alias == alias and d.path != devPath): vm.log.warning('updating drive %s path from %s to %s', d.alias, d.path, devPath) d.path = devPath if d.path == devPath: d.name = name d.type = devType d.drv = drv d.alias = alias d.address = address d.readonly = readonly if bootOrder: d.bootOrder = bootOrder vm.log.debug('Matched %s', mergeDicts(deviceDict, d)) # Update vm's conf with address for known disk devices knownDev = False for dev in vm.conf['devices']: # See comment in previous loop. This part is used to update # the vm configuration as well. if ('alias' in dev and dev['alias'] == alias and dev['path'] != devPath): vm.log.warning('updating drive %s config path from %s ' 'to %s', dev['alias'], dev['path'], devPath) dev['path'] = devPath if (dev['type'] == hwclass.DISK and dev['path'] == devPath): dev['name'] = name dev['address'] = address dev['alias'] = alias dev['readonly'] = str(readonly) if bootOrder: dev['bootOrder'] = bootOrder vm.log.debug('Matched %s', mergeDicts(deviceDict, dev)) knownDev = True # Add unknown disk device to vm's conf if not knownDev: archIface = DEFAULT_INTERFACE_FOR_ARCH[vm.arch] iface = archIface if address['type'] == 'drive' else 'pci' diskDev = {'type': hwclass.DISK, 'device': devType, 'iface': iface, 'path': devPath, 'name': name, 'address': address, 'alias': alias, 'readonly': str(readonly)} if bootOrder: diskDev['bootOrder'] = bootOrder vm.log.warn('Found unknown drive: %s', diskDev) vm.conf['devices'].append(diskDev)
def test_gae_ip2(ip, appid="xxnet-1", use_openssl=True): if use_openssl: try: ssl_sock = connect_ssl(ip, timeout=max_timeout) get_ssl_cert_domain(ssl_sock) except socket.timeout: if __name__ == "__main__": xlog.warn("connect timeout") return False except Exception as e: if __name__ == "__main__": xlog.exception("test_gae_ip %s e:%r", ip, e) return False if not hasattr(ssl_sock._connection, "protos"): if __name__ == "__main__": xlog.warn("ip:%s not support http/2", ip) try: if not check_goagent(ssl_sock, appid): return False else: return ssl_sock except: return False else: ssl_sock = None conn = hyper.HTTP20Connection(ssl_sock, host='%s.appspot.com' % appid, ip=ip, port=443) try: conn.request('GET', '/_gh/') except Exception as e: #xlog.exception("gae %r", e) xlog.debug("ip:%s http/1.1:%r", ip, e) return False try: response = conn.get_response() except Exception as e: if __name__ == "__main__": xlog.exception("http2 get response fail:%r", e) return False xlog.debug("ip:%s http/2", ip) if response.status == 404: if __name__ == "__main__": xlog.warn("app check %s status:%d", appid, response.status) return False if response.status == 503: # out of quota server_type = response.headers.get('Server', "") if "gws" not in server_type and "Google Frontend" not in server_type and "GFE" not in server_type: if __name__ == "__main__": xlog.warn("503 but server type:%s", server_type) return False else: if __name__ == "__main__": xlog.info("503 server type:%s", server_type) return ssl_sock if response.status != 200: if __name__ == "__main__": xlog.warn("app check %s ip:%s status:%d", appid, ip, response.status) return False content = response.read() if "GoAgent" not in content: if __name__ == "__main__": xlog.warn("app check %s content:%s", appid, content) return False if __name__ == "__main__": xlog.info("check_goagent ok") return ssl_sock
def setImage(self, img, autoRange=True, autoLevels=True, levels=None, axes=None, xvals=None, pos=None, scale=None): """Set the image to be displayed in the widget. Options are: img: ndarray; the image to be displayed. autoRange: bool; whether to scale/pan the view to fit the image. autoLevels: bool; whether to update the white/black levels to fit the image. levels: (min, max); the white and black level values to use. axes: {'t':0, 'x':1, 'y':2, 'c':3}; Dictionary indicating the interpretation for each axis. This is only needed to override the default guess. """ if not isinstance(img, np.ndarray): raise Exception("Image must be specified as ndarray.") self.image = img if xvals is not None: self.tVals = xvals elif hasattr(img, 'xvals'): try: self.tVals = img.xvals(0) except: self.tVals = np.arange(img.shape[0]) else: self.tVals = np.arange(img.shape[0]) #self.ui.timeSlider.setValue(0) #self.ui.normStartSlider.setValue(0) #self.ui.timeSlider.setMaximum(img.shape[0]-1) if axes is None: if img.ndim == 2: self.axes = {'t': None, 'x': 0, 'y': 1, 'c': None} elif img.ndim == 3: if img.shape[2] <= 4: self.axes = {'t': None, 'x': 0, 'y': 1, 'c': 2} else: self.axes = {'t': 0, 'x': 1, 'y': 2, 'c': None} elif img.ndim == 4: self.axes = {'t': 0, 'x': 1, 'y': 2, 'c': 3} else: raise Exception("Can not interpret image with dimensions %s" % (str(img.shape))) elif isinstance(axes, dict): self.axes = axes.copy() elif isinstance(axes, list) or isinstance(axes, tuple): self.axes = {} for i in range(len(axes)): self.axes[axes[i]] = i else: raise Exception("Can not interpret axis specification %s. Must be like {'t': 2, 'x': 0, 'y': 1} or ('t', 'x', 'y', 'c')" % (str(axes))) for x in ['t', 'x', 'y', 'c']: self.axes[x] = self.axes.get(x, None) self.imageDisp = None if autoLevels: self.autoLevels() if levels is not None: self.levelMax = levels[1] self.levelMin = levels[0] self.currentIndex = 0 self.updateImage() if self.ui.roiBtn.isChecked(): self.roiChanged() if self.axes['t'] is not None: #self.ui.roiPlot.show() self.ui.roiPlot.setXRange(self.tVals.min(), self.tVals.max()) self.timeLine.setValue(0) #self.ui.roiPlot.setMouseEnabled(False, False) if len(self.tVals) > 1: start = self.tVals.min() stop = self.tVals.max() + abs(self.tVals[-1] - self.tVals[0]) * 0.02 elif len(self.tVals) == 1: start = self.tVals[0] - 0.5 stop = self.tVals[0] + 0.5 else: start = 0 stop = 1 for s in [self.timeLine, self.normRgn]: s.setBounds([start, stop]) #else: #self.ui.roiPlot.hide() self.imageItem.resetTransform() if scale is not None: self.imageItem.scale(*scale) if scale is not None: self.imageItem.setPos(*pos) if autoRange: self.autoRange() self.roiClicked()
def test_broadcast_ragged_batch_shape(self, base_jd_class, jda_class): base_jd_models = {} # Writing a JDC with ragged batch shape will broadcast the first # distribution over the second. # (though note, this model breaks `log_prob` with nontrivial sample shape). def coroutine(): x = yield Root(tfd.Normal(0., scale=1.)) yield tfd.Normal(x[..., tf.newaxis], [1., 2., 3., 4., 5.]) base_jd_models[tfd.JointDistributionCoroutine] = coroutine base_jd_models[tfd.JointDistributionSequential] = [ tfd.Normal(0., scale=1.), lambda x: tfd.Normal(x[..., tf.newaxis], [1., 2., 3., 4., 5.]) ] base_jd_models[tfd.JointDistributionNamed] = { 'x': tfd.Normal(0., scale=1.), 'y': lambda x: tfd.Normal(x[..., tf.newaxis], [1., 2., 3., 4., 5.]) } # But we can get equivalent behavior in a JDCA by expanding dims so that # the batch dimensions line up. jd_auto_models = {} def coroutine_auto(): x = yield tfd.Normal(0., scale=[1.]) yield tfd.Normal(x, [1., 2., 3., 4., 5.]) jd_auto_models[tfd.JointDistributionCoroutineAutoBatched] = coroutine_auto jd_auto_models[tfd.JointDistributionSequentialAutoBatched] = [ tfd.Normal(0., scale=[1.]), lambda x: tfd.Normal(x, [1., 2., 3., 4., 5.]) ] jd_auto_models[tfd.JointDistributionNamedAutoBatched] = ( collections.OrderedDict(( ('x', tfd.Normal(0., scale=[1.])), ('y', lambda x: tfd.Normal(x, [1., 2., 3., 4., 5.]))))) # Writing a JD with ragged batch shape will broadcast the first # distribution over the second. # (though note, this model breaks `log_prob` with nontrivial sample shape). jd_broadcasting = base_jd_class(base_jd_models[base_jd_class]) # This model's broadcasting behavior is a footgun (it can break inference # routines and cause silently incorrect optimization); it should be # disallowed by `validate_args`. with self.assertRaisesRegexp(Exception, 'Component batch shapes are inconsistent'): jda_invalid = jda_class(jd_auto_models[jda_class], batch_ndims=1, validate_args=True) _ = self.evaluate(jda_invalid.log_prob( jda_invalid.sample(seed=test_util.test_seed()))) # But, if the user wants to run with no guardrails, one can eke out # performance wins when evaluating a shared value over multiple models. jda_broadcasting = jda_class(jd_auto_models[jda_class], batch_ndims=1) self.assertAllEqual( jda_broadcasting._model_flatten(jda_broadcasting.event_shape), [[], []]) self.assertAllEqual(jda_broadcasting.batch_shape, [5]) joint_sample = jda_broadcasting.sample(seed=test_util.test_seed()) x_sample, y_sample = self.evaluate( list(joint_sample.values()) if hasattr(joint_sample, 'values') else joint_sample) # The model samples only a single value for x, shared across the batch. self.assertAllEqual(x_sample.shape, [1]) self.assertAllEqual(y_sample.shape, [5]) lp_jd_broadcast = self.evaluate(jd_broadcasting.log_prob( jd_broadcasting._model_unflatten([x_sample[..., 0], y_sample]))) lp_jda_broadcast = self.evaluate(jda_broadcasting.log_prob( jda_broadcasting._model_unflatten([x_sample, y_sample]))) self.assertAllEqual(lp_jda_broadcast.shape, [5]) self.assertAllEqual(lp_jd_broadcast, lp_jda_broadcast) # Try drawing multiple samples and computing log-prob. joint_sample = self.evaluate(jda_broadcasting.sample( [2, 3], seed=test_util.test_seed())) lp_jda_broadcast = self.evaluate(jda_broadcasting.log_prob(joint_sample)) self.assertAllEqual(lp_jda_broadcast.shape, [2, 3, 5])
def connect_ssl(ip, port=443, timeout=5, check_cert=True): ip_port = (ip, port) if config.PROXY_ENABLE: sock = socks.socksocket(socket.AF_INET) else: sock = socket.socket(socket.AF_INET) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # set struct linger{l_onoff=1,l_linger=0} to avoid 10048 socket error sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0)) # resize socket recv buffer 8K->32K to improve browser releated application performance sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 32 * 1024) sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, True) sock.settimeout(timeout) ssl_sock = openssl_wrap.SSLConnection(openssl_context, sock, ip) ssl_sock.set_connect_state() time_begin = time.time() ssl_sock.connect(ip_port) time_connected = time.time() ssl_sock.do_handshake() try: h2 = ssl_sock.get_alpn_proto_negotiated() if h2 == "h2": ssl_sock.h2 = True # xlog.debug("ip:%s http/2", ip) else: ssl_sock.h2 = False #xlog.deubg("alpn h2:%s", h2) except: if hasattr(ssl_sock._connection, "protos") and ssl_sock._connection.protos == "h2": ssl_sock.h2 = True # xlog.debug("ip:%s http/2", ip) else: ssl_sock.h2 = False # xlog.debug("ip:%s http/1.1", ip) time_handshaked = time.time() # report network ok check_local_network.network_stat = "OK" check_local_network.last_check_time = time_handshaked check_local_network.continue_fail_count = 0 cert = ssl_sock.get_peer_certificate() if not cert: raise socket.error(' certficate is none') if check_cert: issuer_commonname = next( (v for k, v in cert.get_issuer().get_components() if k == 'CN'), '') if __name__ == "__main__": xlog.debug("issued by:%s", issuer_commonname) if not issuer_commonname.startswith('Google'): raise socket.error(' certficate is issued by %r, not Google' % (issuer_commonname)) connct_time = int((time_connected - time_begin) * 1000) handshake_time = int((time_handshaked - time_connected) * 1000) #xlog.debug("conn: %d handshake:%d", connct_time, handshake_time) # sometimes, we want to use raw tcp socket directly(select/epoll), so setattr it to ssl socket. ssl_sock._sock = sock ssl_sock.connct_time = connct_time ssl_sock.handshake_time = handshake_time return ssl_sock
def __add__(self, b): if not hasattr(b, "is_kernel"): return Sum(Constant(c=float(b)), self) return Sum(self, b)
def show(img=None, ax=None, title=None, sz=None, bbs=None, confs=None, texts=None, bb_colors=None, cmap='gray', grid=False, save_path=None, text_sz=10, df=None, pts=None, **kwargs): 'show an image' try: if isinstance(img, (str, Path)): img = read(img, 1) if isinstance(img, torch.Tensor): img = img.cpu().detach().numpy().copy() if isinstance(img, PIL.Image.Image): img = np.array(img) except: ... if not isinstance(img, np.ndarray): display(img) return if len(img.shape) == 3 and len(img) == 3: # this is likely a torch tensor img = img.transpose(1, 2, 0) img = np.copy(img) if img.max() == 255: img = img.astype(np.uint8) h, w = img.shape[:2] if sz is None: if w < 50: sz = 1 elif w < 150: sz = 2 elif w < 300: sz = 5 elif w < 600: sz = 10 else: sz = 20 if isinstance(sz, int): sz = (sz, sz) if ax is None: fig, ax = plt.subplots(figsize=kwargs.get('figsize', sz)) _show = True else: _show = False if df is not None: try: texts = df.text except: pass bbs = df2bbs(df) # assumes df has 'x,y,X,Y' columns if isinstance(texts, pd.core.series.Series): texts = texts.tolist() if confs: colors = [[255, 0, 0], [223, 111, 0], [191, 191, 0], [79, 159, 0], [0, 128, 0]] bb_colors = [colors[int(cnf * 5) - 1] for cnf in confs] if isinstance(bbs, np.ndarray): bbs = bbs.astype(np.uint16).tolist() if bbs is not None: if 'th' in kwargs: th = kwargs.get('th') kwargs.pop('th') else: if w < 800: th = 2 elif w < 1600: th = 3 else: th = 4 if hasattr(bbs, 'shape'): if isinstance(bbs, torch.Tensor): bbs = bbs.cpu().detach().numpy() bbs = bbs.astype(np.uint32).tolist() _x_ = np.array(bbs).max() rel = True if _x_ < 1.5 else False if rel: bbs = [BB(bb).absolute((h, w)) for bb in bbs] bb_colors = [[randint(255) for _ in range(3)] for _ in range(len(bbs)) ] if bb_colors is 'random' else bb_colors bb_colors = [bb_colors] * len(bbs) if isinstance(bb_colors, str) else bb_colors bb_colors = [None] * len(bbs) if bb_colors is None else bb_colors img = C(img) if len(img.shape) == 2 else img [ rect(img, tuple(bb), c=bb_colors[ix], th=th) for ix, bb in enumerate(bbs) ] if texts is not None: if hasattr(texts, 'shape'): if isinstance(texts, torch.Tensor): texts = texts.cpu().detach().numpy() texts = texts.tolist() if texts is 'ixs': texts = [i for i in range(len(bbs))] if callable(texts): texts = [texts(bb) for bb in bbs] assert len(texts) == len( bbs), 'Expecting as many texts as bounding boxes' texts = list(map(str, texts)) texts = ['*' if len(t.strip()) == 0 else t for t in texts] [ puttext(ax, text.replace('$', '\$'), tuple(bbs[ix][:2]), size=text_sz) for ix, text in enumerate(texts) ] if title: ax.set_title(title, fontdict=kwargs.pop('fontdict', None)) if pts: pts = np.array(pts) if pts.max() < 1.1: pts = (pts * np.array([[w, h]])).astype(np.uint16).tolist() ax.scatter(*zip(*pts)) ax.imshow(img, cmap=cmap, **kwargs) if grid: ax.grid() else: ax.set_axis_off() if save_path: fig.savefig(save_path) return if _show: plt.show()
def _path(self): if hasattr(self, "_parent"): return self._parent._path()+[self._yang_name] else: return [u'rbridge-id', u'ssh', u'client']
def __mul__(self, b): if not hasattr(b, "is_kernel"): return Product(Constant(c=float(b)), self) return Product(self, b)
def _train_step(self, bandit, policy): P, B = policy, bandit C = B.get_context() if hasattr(B, "get_context") else None rwd, arm = P.act(B, C) oracle_rwd = B.oracle_payoff(C) return rwd, arm, oracle_rwd
def __del__(self): """Explicit close at deletion """ if hasattr(self, "closed") and not self.closed: self.close()
def __iter__(self): if not hasattr(self, '__variants'): setattr(self, '__variants', self.variants.all()) return iter(getattr(self, '__variants'))
def is_list_like(obj): return hasattr(obj, '__getitem__') and hasattr(obj, 'append')