def __init__(self, **kwords): self.attributes = Attributes(**kwords) dimensions = attr_init(kwords, "dimensions", []) storage_precision = attr_init(kwords, "storage_precision", None) init_data = attr_init(kwords, "init_data", None) self.table = create_multi_dim_array(dimensions, init_data=init_data) self.dimensions = dimensions self.storage_precision = storage_precision
def authnRequest(self): ''' Builds the AuthnRequest ''' doc = Document() authnRequest = self._createElementNS( doc, 'urn:oasis:names:tc:SAML:2.0:protocol', 'saml2p:AuthnRequest') authnRequest.setAttribute("Destination", self.setting.samlSsoEndpointUrl) authnRequest.setAttribute("Provider-Name", "SuisseID Service Provider AG") authnRequest.setAttribute("ForceAuthn", "true") authnRequest.setAttribute("ID", str(uuid.uuid4())) authnRequest.setAttribute("Version", "2.0") authnRequest.setAttribute("IssueInstant", "2012-12-18T09:00:00") authnRequest.setAttribute("AssertionConsumerServiceURL", self.setting.returnUrl) issuer = self._createElementNS( doc, 'urn:oasis:names:tc:SAML:2.0:assertion', 'saml2:Issuer') issuer.appendChild(doc.createTextNode(self.setting.spName)) authnRequest.appendChild(issuer) extensions = self._createElementNS( doc, 'urn:oasis:names:tc:SAML:2.0:protocol', 'saml2p:Extensions') attributes = Attributes() attributes.enable() attributes.append(extensions) privacyNotice = self._createElementNS( doc, 'http://schemas.xmlsoap.org/ws/2005/05/identity', 'ic:PrivacyNotice') privacyNotice.setAttribute("Version", "1") privacyNotice.appendChild( doc.createTextNode('http://localhost:8888/auth/privacy')) extensions.appendChild(privacyNotice) authnRequest.appendChild(extensions) doc.appendChild(authnRequest) #print doc.toprettyxml(" ", "\n") #request = """ #<saml2p:AuthnRequest xmlns:saml2p="urn:oasis:names:tc:SAML:2.0:protocol" AssertionConsumerServiceURL="%s" Destination="https://idp.signdemo.com/suisseid/SSOPOST/metaAlias/swisssign.net/idp_v15" ForceAuthn="true" ID="_529a99cc-9aa3-4bef-899c-fd2587478740" IsPassive="false" IssueInstant="2012-08-31T16:18:17.878Z" ProviderName="Demo Service Provider" Version="2.0"> # <saml2:Issuer xmlns:saml2="urn:oasis:names:tc:SAML:2.0:assertion">Service Provider</saml2:Issuer> #</saml2p:AuthnRequest> # """ % (self.setting.returnUrl) #return base64.b64encode(str(request)) return base64.b64encode(str(doc.toxml()))
def build_correlaions(self): dataset = pd.read_csv(self.data_path) dataset = dataset.drop(labels='date', axis=1) attr = Attributes() data = dataset[attr.ALL_FEATURES] relation = data.corr() masking = np.zeros_like(relation, dtype=np.bool) masking[np.triu_indices_from(masking)] = True sns.heatmap(relation, annot=True, fmt=".2f", mask=masking) plt.xticks(range(len(relation.columns)), relation.columns) plt.yticks(range(len(relation.columns)), relation.columns) plt.show() print(self.__abs_correlations(data, 40))
def __init__(self, config): ''' Constructor ''' logging.info(' Network_selecter: Constructor') self.config = config self.network = Network_User(config) self.attributes = Attributes(config) self.attrs_0 = None return
def getNewAttributes(self, attributes=None, args=None): u""" The <code>getAttributes</code> method answers <attr>attributes</attr> attribute or a new instance of <code>Attributes</code>. The attribute set holds the @attributes of the <attr>self</attr> node. Note that the attribute set is stored “as such” without making a copy. So any change to the attribute set when manipulating the tree, will also reflect in the original attribute set.<br/> If the <attr>args</attr> dictionary is defined, then the these values are added over the the key-value of new create <code>Attributes</code> instance. This is a separate method to allow inheriting node classes to redefine the class of the attribute set. The <attr>args</attr> attribute can be either a dictionary or a list of dictionaries. """ if isinstance(attributes, dict): attributes = Attributes(**attributes) if attributes is None: attributes = Attributes() if args is not None: if not isinstance(args, (list, tuple)): args = (args,) for arg in args: if arg is not None: attributes.update(arg) return attributes
def output(self): '''Instantiates the Attributes and Correction classes, assigns associated instance variables, and returns a plot of the terrain correction array. ''' self.attributes = Attributes(self.elevation_array, resolution=self.elevation_array.shape[0], projection=c.PROJECTION, side_len=c.SIDE_LEN) self.slope, self.aspect = self.attributes.calc_attributes() self.correct = Correction(attribute_grids=(self.slope, self.aspect), local_timezone=c.TIMEZONE, date_str=self.DEM[:8], lat_lon=c.LAT_LON) self.param.time.bounds = (0, self.correct.sunposition_df.shape[0] - 1) print(self.param.time.bounds) self.correct_array = self.correct.calc_correction_onetime(self.time) return self._imshow(array=self.correct_array, cmap='magma', opt='correction')
from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, classification_report from sklearn.ensemble import RandomForestClassifier if __name__ == '__main__': # Load and label the data df = pd.read_csv('../data/city.csv', low_memory=False) df['assessor_id'] = df['assessor_id'].str[1:] df = add_labels(df) # Clean, drop, and engineer features. Impute missing values. clean = Preprocessing() df = clean.transform(df) # Scale numerical features cols_to_scale = Attributes().get_num_attribs() scaler = RobustScaler() df[cols_to_scale] = scaler.fit_transform(df[cols_to_scale]) #Split the data y = df.pop('labels') X = df X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30) X_train_res, y_train_res, idx_res = balance(X_train, y_train, method='downsample') # Save lat/lon for map visual, but remove before training loc_X_train = X_train_res[:, 12:14] X_test = X_test.values loc_X_test = X_test[:, 12:14]
def p_attributes_definition_final(p): 'attributes_definition : attribute_value' p[0] = Attributes(p[1], [])
def p_attributes_definition_recursive(p): 'attributes_definition : attribute_value attributes_definition' p[0] = Attributes(p[1], p[2].attributes)
# AI Abby Virtual Assistant [Version 1.19-22alpha] # (c) 2020, AI Abby Virtual Assistant. All Rights Reserved. # Author: Johndev4 # Created: September 19-22, 2020 print("Initializing the program...") from os import system system('title AI Abby Virtual Assistant') from attributes import Attributes from voice_system import VoiceSystem from get_started import get_username import operations if __name__ == "__main__": attr = Attributes() voice_Sys = VoiceSystem(attr.AI_ASSISTANT, attr.ME) system('cls') print(f"{attr.HEAD}\r\n\r\n") voice_Sys.speak(f"Hello, I'm {attr.AI_ASSISTANT}, your virtual assistant.") attr.ME = get_username(attr) voice_Sys.greet(attr.ME) voice_Sys.speak(f"What can I help you today?") while True: query = voice_Sys.listen() operations.func(attr, query)
def get_attributes(asin_list): ''' Retrieve certain attributes for all active ASINS from goodstuff database Args: asin_list(list of lists): Each internal list contains 10 ASIN. This because the mws function allows to submit an api request for 10 ASIN at a time. Returns: master(list): A list of attributes for each ASIN. The attributes are: identifier: ASIN number brand: The manufacturers brand pkg_length: The lenght of the package containing the item pkg_width: The width of the item. pkg_height: The height of the item: salesrank: The selling rate. lower the numbers the sale fater. manufacturer: The company that makes the item max_age: The suggested maximum age the item is for (months) min_age: The minimum age the item is for (months) product_group: Theretail category the item belongs. ''' master = [] for asins in asin_list: try: response = mws.get_matching_product(MarketplaceId=marketplaceid, ASINList=asins) except AttributeError: print('AttributeError') time.sleep(20) response = mws.get_matching_product(MarketplaceId=marketplaceid, ASINList=asins) except BotoServerError: print('BotoServerError') time.sleep(20) response = mws.get_matching_product(MarketplaceId=marketplaceid, ASINList=asins) except: print('Exception') raise for result in response._result: product = result.Product identifier = result['ASIN'] try: attributes = product.AttributeSets.ItemAttributes[0] except AttributeError: continue dimensions = attributes.PackageDimensions a = Attributes(product, attributes, dimensions) master.append([ identifier, a.get_brand(), a.get_length(), a.get_width(), a.get_height(), a.get_salesrank(), a.get_manufacturer(), a.get_max_age(), a.get_min_age(), a.get_product_group() ]) return master
def transform(self, df): ### CLEAN AND DROP attribs = Attributes() # drop customer segmentation info (3) #tinker segment_cols = attribs.get_segment_cols() df.drop(columns=segment_cols, inplace=True) # drop cols with data leakage (2) leak_cols = attribs.get_leak_cols() df.drop(columns=leak_cols, inplace=True) # drop rows with leakage df.drop(df[df.year_built == 2017].index, inplace=True) df.drop(df[df.effective_year_built == 2017].index, inplace=True) # drop cols with too many nulls (28) null_cols = attribs.get_null_cols() df.drop(columns=null_cols, inplace=True) # drop redundant features (74) redundant_cols = attribs.get_redundant_cols() df.drop(columns=redundant_cols, inplace=True) # drop irrelevant features (18) irrelevant_cols = attribs.get_irrelevant_cols() df.drop(columns=irrelevant_cols, inplace=True) # drop 1050 rows without sale_date or sale_price (same set) df.dropna(subset=['last_sale_price', 'last_sale_date'], inplace=True) # remap buidling_condition (misspelling intentional) df.replace({'buidling_condition':{ 'LOW':1, 'FAIR':2, 'AVERAGE':3, 'AVERAGE +':4, 'AVERAGE ++':5, 'GOOD':6, 'GOOD +':7, 'GOOD ++':8, 'VERY GOOD':9, 'VERY GOOD +':10, 'VERY GOOD ++':11, 'EXCELLENT':12, 'EXCELLENT +':13, 'EXCELLENT++':14, 'EXCEPTIONAL 1':15} }, inplace=True) # convert true/false to 1/0 df['nrel_attached_garage'].astype(int, copy=False) # combine full and half baths df['num_baths'] = df['full_bath_count'] + (0.5 * df['half_bath_count']) df.drop(columns=['full_bath_count', 'half_bath_count'], inplace=True) ### FEATURE ENGINEER # Spatial clustering #TODO won't work in production b/c engineering off of labels. df['num_upgrades_parcel'] = \ df['labels'].groupby(df['parcel_id']).transform('sum') df.drop(columns=['parcel_id', 'subdivision', 'zip'], inplace=True) # Days since last sale df['update_date'] = pd.to_datetime(df['update_date']) df['last_sale_date'] = pd.to_datetime(df['last_sale_date']) df['time_since_sale'] = \ (df['update_date'] - df['last_sale_date']).dt.days df.drop(columns=['update_date', 'last_sale_date'], inplace=True) # Handle sparse permits data #TODO improve method #Quick: total permits ever permit_cols = attribs.get_permit_cols() df['num_permits_since_purchase'] = (df[permit_cols].notnull()).sum(1) df.drop(columns=permit_cols, inplace=True) ### IMPUTATION # Fill median (numerical) df['acres'].fillna(df['acres'].median(), inplace=True) df['census_income_median'].fillna(df['census_income_median'].median(),\ inplace=True) # Fill mode (numerical) df['pv_potential_kwhr_yr'].fillna(df['pv_potential_kwhr_yr'].mode()[0],\ inplace=True) # Fill 'Unknown' df.replace({'zillow_neighborhood': np.nan}, \ {'zillow_neighborhood': 'Unknown'}, inplace=True) # Fill mode (categorical) cols = ['ac_type', 'exterior_wall_type', 'frame_type', 'heating_type', \ 'interior_wall_type', 'land_use', 'roof_cover_type'] for col in cols: mode = df[col].mode()[0] df[col].fillna(mode, inplace=True) # DUMMYTIZE dummy_cols = Attributes().get_dummy_cols() df = pd.get_dummies(df, columns=dummy_cols, drop_first=True) processed = df return processed
def __init__(self, data_path, ablation): self.data_path = data_path self.attr = Attributes() self.ablation = ablation
from attributes import Attributes if __name__ == '__main__': a = Attributes(3, 3, 3) print(f'Цена за покупку аттрибутов {a.full_cost}') print(a.print_attributes)
def __init__(self, skills = Skills(), attributes = Attributes(), health = Health() ): self.skills = skills self.attributes = attributes self.health = health
# leaf_font_size=6) # pylab.savefig(ps.name + ".png") #print Z clusters_values = fcluster(Z, 0.75) clusters_dict = {} for i in range(len(clusters_values)): c_id = clusters_values[i] old_v = clusters_dict.get(c_id, []) old_v.append(i) clusters_dict[c_id] = old_v clusters = [] for v in clusters_dict.values(): clusters.append(v) attr = Attributes(ps, v, classifier) print attr.extract_attributes() res = t.get_results_purity_inverse_purity(ps.name, clusters) res2 = t.get_results_bcubed(ps.name, clusters) results[0] += res[0] results[1] += res[1] results2[0] += res2[0] results2[1] += res2[1] print res print res2 print len(clusters) #print clusters #print len(set(clusters))
def __init__(self, name="prisoner", inventory=None, equipped=None, attributes=None, skills=None, level=1, health=100, fatigue=100, magicka=10, race="imperial", encumbrance=0, sneak=False, effects=None, spells=None, sex="male", actions=None, gold=0, quests=None, notoriety=0, bounty=0, fame=0): self.name = name self.level = level self.health = health self.fatigue = fatigue self.magicka = magicka self.race = race self.encumbrance = encumbrance self.sneak = sneak self.notoriety = notoriety self.bounty = bounty self.gold = gold self.fame = fame self.sex = sex if inventory: self.inventory = inventory else: self.inventory = Inventory() if equipped: self.equipped = equipped else: self.equipped = [] if attributes: self.attributes = attributes else: self.attributes = Attributes() if skills: self.skills = skills else: self.skills = Skills() if effects: self.effects = effects else: self.effects = [] if spells: self.spells = spells else: self.spells = [] if actions: self.actions = actions else: self.actions = [] if quests: self.quests = quests else: self.quests = []