def __load_clf(self, node): """ Recursively check and load classifier objects into 'clf_object' Arguments: ---------- node: type: dict info: current tree node to load the 'clf_file' classifier """ check_keys(keys=self.keys, data_struct=node, error='Invalid JSON keys') node['clf_object'] = NodeClassif(node['clf_file']) try: clf_labels = node['clf_object'].get_labels() clf_child_names = node['clf_children'].keys() clf_child_nodes = node['clf_children'].values() check_keys(keys=clf_child_names, data_struct=clf_labels, error='Invalid JSON keys') for child_node in clf_child_nodes: self.__load_clf(child_node) except AttributeError: exit('Invalid JSON values')
def __init__(self, **kwargs): self['ad_types'] = [1] # ad types 12 and 31 do something cool self['page_types'] = ['desktop'] # ['home', 'desktop', 'feed', 'mobile', 'desktopFeed'], self['images'] = [] self['text_groups'] = [] self['created_at'] = utils.rounded_now() self.update(kwargs) utils.check_keys(self, ['product_id'])
def __init__(self, creative_perm_id, targeting_perm_id, landing_perm_id, product_id): self['cp_id'] = creative_perm_id self['tp_id'] = targeting_perm_id self['lp_id'] = landing_perm_id self['p_id'] = product_id self['click_data'] = {} self['fb_ids'] = [] self['imp_data'] = {} self['spend_data'] = {} utils.check_keys(self, ['cp_id','tp_id', 'lp_id', 'p_id'])
def test_make_add_value_with_flow(app): t = GlobalTestData() @app.pipeline(config=dict(path='123')) def p_builder3(worker, sentence2): return sentence2\ .rename(sentence=sentence2)\ .add_value(path=worker.config['path'])\ .subscribe_flow(NameExtractionOneWayFlow(use_lower=True))\ .subscribe_func(t.save_one_item) @app.pipeline() def p_builder_general3(worker, sentence2: DataFrame): return sentence2.subscribe_pipeline(p_builder3, config=dict(path='321')) p_builder3.compile() p_builder_general3.compile() p_builder_general3(sentence2="Oleg") run_pipelines(app) result = t.get_result() assert check_keys(result.keys(), ['sentence', 'path', 'names']) assert result['names'][0] == "oleg" assert result['path'] == '321'
def post_orders(): session = db_session.create_session() data = request.json validation_error = [] ids = [] for i in data['data']: if not check_keys(i, ('order_id', 'weight', 'region', 'delivery_hours')) or \ not check_all_keys_in_dict(i, ('order_id', 'weight', 'region', 'delivery_hours')): validation_error.append({"id": i['order_id']}) else: order = session.query(Order).filter( Order.order_id == i['order_id']).first() if order: session.delete(order) session.commit() ids.append({"id": i['order_id']}) delivery_hours = [] for j in i['delivery_hours']: delivery_hour = DeliveryHour(order_id=i['order_id']) delivery_hour.set_delivery_hour(j) delivery_hours.append(delivery_hour) order = Order(order_id=i['order_id'], weight=i['weight'], region=i['region'], delivery_hours=delivery_hours) session.add(order) session.commit() if validation_error: return make_resp({'validation_error': { "orders": validation_error }}, 400) else: return make_resp({"orders": ids}, 201)
def post_couriers(): session = db_session.create_session() get_data = request.json validation_error = [] ids = [] for i in get_data['data']: if not check_keys(i, ('courier_id', 'courier_type', 'regions', 'working_hours')) or \ not check_all_keys_in_dict(i, ('courier_id', 'courier_type', 'regions', 'working_hours')): validation_error.append({"id": i['courier_id']}) else: ids.append({"id": i['courier_id']}) regions = [] for j in i['regions']: region = Region(region=j, courier_id=i['courier_id']) regions.append(region) working_hours = [] for j in i['working_hours']: working_hour = WorkingHour(courier_id=i['courier_id']) working_hour.set_working_hour(j) working_hours.append(working_hour) courier = Courier(courier_id=i['courier_id'], courier_type=i['courier_type'], regions=regions, working_hours=working_hours) session.add(courier) session.commit() if validation_error: return make_resp({'validation_error': { "couriers": validation_error }}, 400) else: return make_resp({"couriers": ids}, 201)
def __init__(self, scope: Construct, ns: str, *, auth_dict: dict): keys = list(auth_dict.keys()) subscription_id = auth_dict['subscription_id'] if check_keys( key='subscription_id', key_list=keys) else None client_id = auth_dict['client_id'] if check_keys( key='client_id', key_list=keys) else None client_secret = auth_dict['client_secret'] if check_keys( key='client_secret', key_list=keys) else None tenant_id = auth_dict['tenant_id'] if check_keys( key='tenant_id', key_list=keys) else None access_key = auth_dict['access_key'] if check_keys( key='access_key', key_list=keys) else None key_data = auth_dict['key_data'] if check_keys(key='key_data', key_list=keys) else None super().__init__(scope, ns) # define resources here backend = super().backend features = AzurermProviderFeatures() provider = AzurermProvider(self, 'azure', features=[features], subscription_id=subscription_id, client_id=client_id, client_secret=client_secret, tenant_id=tenant_id) resource_group = ResourceGroup(self, 'azurerg', name='common', location='East US')
def test_make_add_value_with_flow(app): @app.pipeline() def p_builder(worker, sentence2): return sentence2\ .rename(sentence=sentence2)\ .add_value(path='123')\ .subscribe_flow(NameExtractionOneWayFlow(use_lower=True)) p_builder.compile() result = p_builder(sentence2="Oleg") assert utils.check_keys(result.keys(), ['names', 'sentence', 'path']) assert result['names'][0] == "oleg" assert result['path'] == '123'
def test_long_concatenate(app): @app.pipeline() def p_builder(worker, sentence, use_lower): data_with_name = sentence \ .apply_flow(NameExtractionOneWayFlow(), as_worker=False) data = concatenate(sentence=data_with_name.get('names'), use_lower=use_lower) return data p_builder.compile() result = p_builder(sentence="Oleg", use_lower=True) assert utils.check_keys(result.keys(), ['use_lower', 'sentence']) assert result['sentence'][0] == "Oleg"
def test_multiple_vars(app): @app.pipeline() def p_builder(worker, sentence, use_lower): data = concatenate(sentence=sentence, use_lower=use_lower) data_with_name = data\ .subscribe_flow(NameExtractionOneWayFlow(), as_worker=False)\ .apply_flow(NameExtractionFlowMultiple(), as_worker=False) return data_with_name p_builder.compile() result = p_builder(sentence="Oleg", use_lower=True) assert utils.check_keys(result.keys(), ['names', 'sentence']) assert result['names'][0] == "Oleg"
def orders_complete(): session = db_session.create_session() get_data = request.json date_time = datetime.datetime.strptime(get_data['complete_time'], '%Y-%m-%dT%H:%M:%S.%fZ') if not check_all_keys_in_dict(get_data, ('courier_id', 'order_id', 'complete_time')) or \ not check_keys(get_data, ('courier_id', 'order_id', 'complete_time')): return make_resp('', 400) complete_order = session.query(OrderInProgress).filter( OrderInProgress.courier_id == get_data['courier_id'], OrderInProgress.order_id == get_data['order_id']).first() if complete_order: complete_order.complete_time = date_time complete_order.set_duration(session) complete_id = complete_order.order_id session.commit() return make_resp({"order_id": complete_id}, 200) return make_resp('', 400)
def test(): if request.method == "POST": received = request.get_json(force=True) if not utils.check_keys(received): return handlers.invalid_keys() data = utils.extract_json(received) if data == False: return handlers.invalid_values() series = utils.json_to_pd(data) if type(series) == bool: return handlers.invalid_values() return flask.make_response(model.predict(series)[0], 200) else: return "Flask app to check if human user or bot"
def __init__(self, **kwargs): self.update(kwargs) self['key_count'] = len(kwargs) utils.check_keys(self, ['key_count', 'product_id', 'countries'])
def __init__(self, **kwargs): self['metas'] = [{'property': '', 'content': ''}] self['created_at'] = utils.rounded_now() self.update(kwargs) utils.check_keys(self, ['base_url', 'iframe_slug', 'page_title', 'product_id'])
def __init__(self, **kwargs): self['creative_groups'] = [] self['targeting_groups'] = [] self['created_at'] = utils.rounded_now() self.update(kwargs) utils.check_keys(self, ['name', 'client_name'])
def __init__(self, **kwargs): self.update(kwargs) utils.check_keys(self, ['image','text_group', 'body', 'title', 'product_id']) self.sync()
def __init__(self, scope: Construct, ns: str, *, auth_dict: dict, k8s_stack_variable: OptionsK8Stack): keys = list(auth_dict.keys()) access_key = auth_dict['access_key'] if check_keys( key='access_key', key_list=keys) else None key_data = auth_dict['key_data'] if check_keys(key='key_data', key_list=keys) else None subscription_id = auth_dict['subscription_id'] if check_keys( key='subscription_id', key_list=keys) else None client_id = auth_dict['client_id'] if check_keys( key='client_id', key_list=keys) else None client_secret = auth_dict['client_secret'] if check_keys( key='client_secret', key_list=keys) else None tenant_id = auth_dict['tenant_id'] if check_keys( key='tenant_id', key_list=keys) else None ######### App Variables########### # keys = list(k8s_stack_variable.keys()) var_tags = k8s_stack_variable.tags var_rg_name = k8s_stack_variable.rg_name var_vm_size = k8s_stack_variable.vm_size var_dns_prefix = k8s_stack_variable.dns_prefix common_code_dir = k8s_stack_variable.common_code_dir super().__init__(scope, ns) ##### Terraform Variables ######## tf_key_data = TerraformVariable(self, 'key_data', type='string', default=key_data) tf_access_key = TerraformVariable(self, 'access_key', type='string', default=access_key) tf_location = TerraformVariable(self, 'location', type='string', default='West Europe') tf_storage_resource_group_name = TerraformVariable( self, 'stogage_resource_group_name', type='string', default='Prateek-Test') tf_resource_group_name = TerraformVariable(self, 'resource_group_name', type='string', default=var_rg_name) tf_storage_account_name = TerraformVariable( self, 'storage_account_name', type='string', default=config('storage_account_name')) tf_container_name = TerraformVariable(self, 'container_name', type='string', default='tfstate') tf_storage_tfstate_key = TerraformVariable( self, 'storage_tfstate_key', type='string', default='prod.terraform.tfstate.prateek-vm2') tf_node_count = TerraformVariable(self, 'node_count', type='number', default=1) tf_min_count = TerraformVariable(self, 'min_count', type='number', default=1) tf_max_count = TerraformVariable(self, 'max_count', type='number', default=2) tf_max_pod = TerraformVariable(self, 'max_pod', type='number', default=20) features = AzurermProviderFeatures() AzurermProvider(self, 'azure', features=[features], subscription_id=subscription_id, client_id=client_id, client_secret=client_secret, tenant_id=tenant_id) #TerraformModule(self, 'common_module', source='../{0}'.format(common_code_dir)) node_pool = KubernetesClusterDefaultNodePool( name='default', node_count=tf_node_count.number_value, vm_size=var_vm_size) resource_group = ResourceGroup(self, 'azure-rg', name=var_rg_name, location=tf_location.string_value) identity = KubernetesClusterIdentity(type='SystemAssigned') linux_profile = KubernetesClusterLinuxProfile( admin_username='******', ssh_key=[ KubernetesClusterLinuxProfileSshKey( key_data=tf_key_data.string_value) ]) cluster = KubernetesCluster( self, 'my-kube-cluster', name='my-kube-cluster', default_node_pool=[node_pool], dns_prefix=var_dns_prefix, location=resource_group.location, resource_group_name=resource_group.name, node_resource_group="{0}-nodes".format(resource_group.name), identity=[identity], linux_profile=[linux_profile], network_profile=[ KubernetesClusterNetworkProfile(network_plugin='azure') ], addon_profile=[ KubernetesClusterAddonProfile( kube_dashboard=[ KubernetesClusterAddonProfileKubeDashboard( enabled=True) ], # oms_agent=[KubernetesClusterAddonProfileOmsAgent(enabled=True,log_analytics_workspace_id='test')] ) ], role_based_access_control=[ KubernetesClusterRoleBasedAccessControl(enabled=True) ], tags=var_tags) kube_config = cluster.kube_config_raw File(self, 'kube-config', filename=os.path.join( os.path.join(os.curdir, '..', 'generated_files')), content=kube_config) TerraformOutput(self, 'kube_config', value=kube_config, sensitive=True) cluster_node_pool = KubernetesClusterNodePool( self, "k8sNodePool", kubernetes_cluster_id=cluster.id, name='k8snodepool', node_count=tf_node_count.number_value, vm_size=var_vm_size, enable_auto_scaling=True, min_count=tf_min_count.number_value, max_count=tf_max_count.number_value, max_pods=tf_max_pod.number_value, lifecycle=TerraformResourceLifecycle(create_before_destroy=True, ignore_changes=['node_count' ])) #RoleAssignment(self, "network_contributer", scope=resource_group.id, # principal_id=identity.principal_id, # role_definition_name='Network Contributor') #RoleAssignment(self, "kubectl_pull", scope=resource_group.id, # principal_id=cluster.kubelet_identity(index='0').object_id, # role_definition_name='AcrPull') #############Removed Temporarly ###################################### k8s_provider = KubernetesProvider( self, 'k8s', load_config_file=False, host=cluster.kube_config(index='0').host, client_key=add_base64decode( cluster.kube_config(index='0').client_key), client_certificate=add_base64decode( cluster.kube_config(index='0').client_certificate), cluster_ca_certificate=add_base64decode( cluster.kube_config(index='0').cluster_ca_certificate)) helm_provider = HelmProvider( self, 'helm', kubernetes=[ HelmProviderKubernetes( load_config_file=False, host=cluster.kube_config(index='0').host, client_key=add_base64decode( cluster.kube_config(index='0').client_key), client_certificate=add_base64decode( cluster.kube_config(index='0').client_certificate), cluster_ca_certificate=add_base64decode( cluster.kube_config(index='0').cluster_ca_certificate)) ]) # Add traefik and certmanager to expose services by https. traefik_ns_metadata = NamespaceMetadata(name='traefik', labels={ 'created_by': 'PythonCDK', 'location': 'eastus', 'resource_group': var_rg_name }) traefik_ns = Namespace(self, 'traefik-ns', metadata=[traefik_ns_metadata]) helm_traefik2_value = ''' additionalArguments: - "--entrypoints.websecure.http.tls" - "--providers.kubernetesingress=true" - "--providers.kubernetesIngress.ingressClass=traefik" - "--ping" - "--metrics.prometheus" ports: web: redirectTo: websecure ''' helm_traefik2_release = Release( self, 'traefik2', name='traefik', repository='https://containous.github.io/traefik-helm-chart', chart='traefik', namespace='traefik', values=[helm_traefik2_value]) cert_manager_ns_metadata = NamespaceMetadata(name='cert-manager', labels={ 'created_by': 'PythonCDK', "location": 'westeurope', 'resource_group': var_rg_name }) cert_manager_ns = Namespace( self, 'cert-manager-ns', metadata=[cert_manager_ns_metadata], )
def __init__(self, scope: Construct, ns: str, *, auth_dict: dict): keys = list(auth_dict.keys()) subscription_id = auth_dict['subscription_id'] if check_keys( key='subscription_id', key_list=keys) else None client_id = auth_dict['client_id'] if check_keys( key='client_id', key_list=keys) else None client_secret = auth_dict['client_secret'] if check_keys( key='client_secret', key_list=keys) else None tenant_id = auth_dict['tenant_id'] if check_keys( key='tenant_id', key_list=keys) else None access_key = auth_dict['access_key'] if check_keys( key='access_key', key_list=keys) else None key_data = auth_dict['key_data'] if check_keys(key='key_data', key_list=keys) else None super().__init__(scope, ns) # define resources here backend = AzurermBackend(self, resource_group_name='Prateek-Test', storage_account_name='terraformstateprateek', container_name='tfstate', key="prod.terraform.tfstate.prateek-vm2", access_key=access_key) features = AzurermProviderFeatures() provider = AzurermProvider(self, 'azure', features=[features], subscription_id=subscription_id, client_id=client_id, client_secret=client_secret, tenant_id=tenant_id) resource_group = ResourceGroup(self, 'azure-rg', name='test', location='East US') virtual_network = VirtualNetwork( self, 'azure-net', name='TerraformVNet', location='East US', address_space=['10.0.0.0/16'], resource_group_name=resource_group.name, depends_on=[resource_group]) virtual_subnetwork = Subnet( self, 'azure-subnet', name='TerraformSubVNet', resource_group_name=resource_group.name, address_prefixes=['10.0.0.0/24'], virtual_network_name=virtual_network.name, depends_on=[resource_group, virtual_network]) ip_configuration = NetworkInterfaceIpConfiguration( name='private_ip', private_ip_address_allocation='Dynamic', subnet_id=virtual_subnetwork.id) v_nic = NetworkInterface(self, 'azure-vnet', name='vNic', location='East US', ip_configuration=[ip_configuration], resource_group_name=resource_group.name, depends_on=[resource_group]) storage_disk = VirtualMachineStorageOsDisk(name='azure_os_disk', create_option='FromImage', disk_size_gb=50) storage_image_ref = VirtualMachineStorageImageReference( offer='UbuntuServer', publisher='Canonical', sku='18.04-LTS', version='latest') os_profile = VirtualMachineOsProfile(admin_username='******', computer_name='prateek-vm2') ssh_keys = VirtualMachineOsProfileLinuxConfigSshKeys( path='/home/{0}/.ssh/authorized_keys'.format( os_profile.admin_username), key_data=key_data) os_profile_linux_config = VirtualMachineOsProfileLinuxConfig( disable_password_authentication=True, ssh_keys=[ssh_keys]) azure_vm = VirtualMachine( self, 'azure_vm', location='East US', name='Prateek-vm2', network_interface_ids=[v_nic.id], resource_group_name=resource_group.name, storage_os_disk=[storage_disk], storage_image_reference=[storage_image_ref], os_profile=[os_profile], # os_profile_linux_config=[os_profile_linux_config], vm_size='Standard_D2_v2')