def apply_params(url, params): route_params = keyfilter(params_filter, params) return ( reduce(lambda acc, kv: acc.replace(kv[0], kv[1]), route_params.items(), url), keyfilter(not_params_filter, params), )
def update(token, **kwargs): customer_id = get_customer_id(token) args = keyfilter(lambda x: x in ["customer_name"], kwargs) doc = frappe.get_doc("Customer", customer_id) doc.update(args) doc.save(ignore_permissions=True) return keyfilter(lambda x: x in CUSTOMER_FIELDS, doc.as_dict())
def _create_address(customer, args): fields = [ "address_line1", "address_line2", "city", "state", "country", "pincode" ] _args = keyfilter(lambda x: x in fields, args) doc = frappe.get_doc( merge({ "doctype": "Address", "address_type": "Billing" }, _args)) doc.append("links", {"link_doctype": "Customer", "link_name": customer}) doc.insert(ignore_permissions=True) return keyfilter(lambda x: x in ["name"] + fields, doc.as_dict())
def get_slideshow(): homepage = frappe.get_single("Homepage") if homepage.hero_section_based_on != "Slideshow" or not homepage.slideshow: return None def get_route(item): ref_doctype, ref_name = item.get("cm_ref_doctype"), item.get("cm_ref_docname") if ref_doctype and ref_name: route, show_in__website = frappe.get_cached_value( ref_doctype, ref_name, ["route", "show_in_website"] ) if route and show_in__website: return transform_route({"route": route}) return None return [ merge( keyfilter(lambda y: y in ["image", "heading", "description"], x), {"route": get_route(x), "kind": x.get("cm_ref_doctype")}, ) for x in frappe.get_all( "Website Slideshow Item", filters={"parent": homepage.slideshow}, fields=[ "image", "heading", "description", "cm_ref_doctype", "cm_ref_docname", ], ) ]
def get_groups(parsed, store, conf): """ Return groups based on argument provided :param Namespace parsed: arguments parsed :param store: Otter scaling group collection :param dict conf: config :return: Deferred fired with list of {"tenantId": .., "groupId": ..} dict """ log = mock_log() if parsed.group: groups = [g.split(":") for g in parsed.group] return succeed([{"tenantId": tid, "groupId": gid} for tid, gid in groups]) elif parsed.all: d = store.get_all_groups() d.addCallback(lambda tgs: concat(tgs.values())) elif parsed.tenant_id: d = get_groups_of_tenants(log, store, parsed.tenant_id) elif parsed.disabled_tenants: non_conv_tenants = conf["non-convergence-tenants"] d = store.get_all_groups() d.addCallback(keyfilter(lambda k: k not in set(non_conv_tenants))) d.addCallback(lambda tgs: concat(tgs.values())) elif parsed.conf_conv_tenants: d = get_groups_of_tenants(log, store, conf["convergence-tenants"]) else: raise SystemExit("Unexpected group selection") return d
def get_values(_type): fields = list(map(lambda x: "{}_{}".format(_type, x), params)) _get = compose( valmap(lambda x: x or 0), keymap(lambda x: x.replace("{}_".format(_type), "")), keyfilter(lambda x: x in fields), ) return _get(data)
def get_clb_contents(): """ Get Rackspace Cloud Load Balancer contents as list of `CLBNode`. CLB health monitor information is also returned as a pmap of :obj:`CLB` objects mapped on LB ID. :return: Effect of (``list`` of :obj:`CLBNode`, `pmap` of :obj:`CLB`) :rtype: :obj:`Effect` """ # If we get a CLBNotFoundError while fetching feeds, we should throw away # all nodes related to that load balancer, because we don't want to act on # data that we know is invalid/outdated (for example, if we can't fetch a # feed because CLB was deleted, we don't want to say that we have a node in # DRAINING with draining time of 0; we should just say that the node is # gone). def gone(r): return catch(CLBNotFoundError, lambda exc: r) lb_ids = [lb['id'] for lb in (yield _retry(get_clbs()))] node_reqs = [_retry(get_clb_nodes(lb_id).on(error=gone([]))) for lb_id in lb_ids] healthmon_reqs = [ _retry(get_clb_health_monitor(lb_id).on(error=gone(None))) for lb_id in lb_ids] all_nodes_hms = yield parallel(node_reqs + healthmon_reqs) all_nodes, hms = all_nodes_hms[:len(lb_ids)], all_nodes_hms[len(lb_ids):] lb_nodes = { lb_id: [CLBNode.from_node_json(lb_id, node) for node in nodes] for lb_id, nodes in zip(lb_ids, all_nodes)} clbs = { str(lb_id): CLB(bool(health_mon)) for lb_id, health_mon in zip(lb_ids, hms) if health_mon is not None} draining = [n for n in concat(lb_nodes.values()) if n.description.condition == CLBNodeCondition.DRAINING] feeds = yield parallel( [_retry(get_clb_node_feed(n.description.lb_id, n.node_id).on( error=gone(None))) for n in draining] ) nodes_to_feeds = dict(zip(draining, feeds)) deleted_lbs = set([ node.description.lb_id for (node, feed) in nodes_to_feeds.items() if feed is None]) def update_drained_at(node): feed = nodes_to_feeds.get(node) if node.description.lb_id in deleted_lbs: return None if feed is not None: node.drained_at = extract_clb_drained_at(feed) return node nodes = map(update_drained_at, concat(lb_nodes.values())) yield do_return(( list(filter(bool, nodes)), pmap(keyfilter(lambda k: k not in deleted_lbs, clbs))))
def __init__(self, **kwargs): self.attrs = pipe( self.imports, reversed, map(vars), merge, keyfilter(compose(str.islower, first), ), valfilter(callable), ) self.attrs.update()
def get(token): try: customer_id = get_customer_id(token) except: return None doc = frappe.get_doc("Customer", customer_id) orders = frappe.db.exists("Sales Order", {"customer": customer_id}) return merge( keyfilter(lambda x: x in CUSTOMER_FIELDS, doc.as_dict()), {"can_register_messaging": bool(orders)}, )
def fit(self, train_loader, epochs, val_loader=None, send_weixin=False, save_per_epochs=None, callbacks=[]): validate = val_loader is not None # Weixin if send_weixin: self._enable_send_weixin() # Create engine engine = self._create_engine() # Register events engine.add_event_handler(Events.EPOCH_STARTED, self._log_epochs, epochs) if validate: engine.add_event_handler(Events.EPOCH_COMPLETED, self._evaluate, val_loader) engine.add_event_handler(Events.EPOCH_COMPLETED, self._log_results, validate) # Set checkpoint if save_per_epochs: checkpoint_handler = ModelCheckpoint(self.save_path, self.name, save_per_epochs, save_as_state_dict=True, require_empty=False) checkpoint_handler._iteration = self.epochs() engine.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {"trainer": self}) for callback in callbacks: engine.add_event_handler(Events.EPOCH_COMPLETED, _callback_wrapper(callback), self) # Run engine.run(train_loader, epochs) # Destroy self._disable_send_weixin() # Return history hist = { metric: hist[-epochs:] for metric, hist in self.metric_history.items() } if not validate: hist = keyfilter(lambda k: not k.startswith("val_"), hist) return hist
def _prepare_kwargs(self, **kwargs): """Filter keywords with the function arguments. Call any value that is callable, no arguments are applied to these function. """ return valmap( self._call_lazy_function, merge( keyfilter(partial(operator.contains, self.arguments), self.keywords), kwargs, ) )
def get_settings(): ahong_settings = frappe.get_single("Ahong eCommerce Settings") website_settings = get_website_settings() return merge( keyfilter(lambda x: x in ["copyright", "footer_address"], website_settings), { "privacy": bool(ahong_settings.privacy), "terms": bool(ahong_settings.terms), "show_about_us": bool(ahong_settings.show_about_us), "hide_build_info": bool(ahong_settings.hide_build_info), }, )
def create(token, **kwargs): decoded_token = get_decoded_token(token) session_user = frappe.session.user settings = frappe.get_single("Leiteng Website Settings") if not settings.user: frappe.throw(frappe._("Site setup not complete")) frappe.set_user(settings.user) uid = decoded_token["uid"] customer_id = frappe.db.exists("Customer", {"le_firebase_uid": uid}) if customer_id: frappe.throw(frappe._("Customer already created")) args = keyfilter( lambda x: x in [ "customer_name", "mobile_no", "email", "address_line1", "address_line2", "city", "state", "country", "pincode", ], kwargs, ) doc = frappe.get_doc( merge( { "doctype": "Customer", "le_firebase_uid": uid, "customer_type": "Individual", "customer_group": frappe.db.get_single_value( "Selling Settings", "customer_group" ), "territory": frappe.db.get_single_value( "Selling Settings", "territory" ), }, args, ) ).insert() auth.set_custom_user_claims(uid, {"customer": True}, app=app) frappe.set_user(session_user) return pick(["name", "customer_name"], doc.as_dict())
def _(dictionary): try: parameters = pipeline( dictionary, [ keyfilter(lambda k: k != '__type'), valmap(json_deserialize_types), ], ) Class = get_member_by_path(dictionary['__type']) instance = make_instance(Class) for key, value in parameters.items(): setattr(instance, key, json_deserialize_types(value)) return instance except KeyError: return valmap(json_deserialize_types, dictionary)
def json_serialize_types(value): try: value.__dict__ except AttributeError: return value return { '__type': type(value).__module__ + "." + type(value).__name__, **pipeline( value.__dict__, [ keyfilter(lambda l: not l.startswith("_")), json_serialize_types, ], ), }
def set_items_from_reference(self): ref_doc = frappe.get_doc(self.print_dt, self.print_dn) self.items = [] for ref_item in ref_doc.items: self.append( "items", merge( keyfilter( lambda x: x in ["item_code", "item_name", "qty", "batch_no"], ref_item.as_dict(), ), get_item_details( ref_item.item_code, ref_item.batch_no, price_list=self.price_list, ), ), )
def attach(self, trainer): if not self.enabled: return if self.metrics is None: metrics = trainer.test_metrics elif isinstance(self.metrics, Sequence): assert not isinstance( self.metrics, str), "Metrics can't be str, maybe wrap it in a list." for m in self.metrics: assert m in trainer.test_metrics, "%s is not in test_metrics" % m metrics = keyfilter(lambda k: k in self.metrics, trainer.test_metrics) elif isinstance(self.metrics, Dict): metrics = self.metrics else: raise ValueError("Invalid metrics, got %s" % self.metrics) self.evaluator = create_supervised_evaluator(trainer.model, metrics, trainer.device) self.trainer = trainer
def get_all_scaling_group_servers(changes_since=None, server_predicate=identity): """ Return tenant's servers that belong to any scaling group as {group_id: [server1, server2]} ``dict``. No specific ordering is guaranteed :param datetime changes_since: Get server since this time. Must be UTC :param server_predicate: function of server -> bool that determines whether the server should be included in the result. :return: dict mapping group IDs to lists of Nova servers. """ def has_group_id(s): return 'metadata' in s and isinstance(s['metadata'], dict) def group_id(s): return group_id_from_metadata(s['metadata']) servers_apply = compose(keyfilter(lambda k: k is not None), groupby(group_id), filter(server_predicate), filter(has_group_id)) return get_all_server_details(changes_since).on(servers_apply)
def __init__( self, data=None, index=None, columns=None, estimator=None, parent=None, feature_level=None, copy=False, extensions=[ 'harness.python.ext.base.JinjaExtension', 'harness.python.ext.SciKit.SciKitExtension', 'harness.python.ext.Bokeh.BokehModelsExtension', 'harness.python.ext.Bokeh.BokehPlottingExtension', 'harness.python.ext.Bokeh.BokehChartsExtension' ], ): kwargs = dict( estimator=estimator, parent=parent, feature_level=feature_level, extensions=extensions, ) self.set_params(**kwargs) for ext in self.extensions: if not ext in self.env.extensions: self.env.add_extension(ext) ext = self.env.extensions[ext] if (not (ext.mixin is None) and not (ext.mixin in self.__class__.__bases__)): self.__class__.__bases__ += (ext.mixin, ) kwargs = pipe(locals(), keyfilter(partial(operator.contains, self._blacklist)), valfilter(complement(lambda x: x is None))) super().__init__(**kwargs)
def fit2(self, train_loader, epochs=1, save=None, callbacks=()): engine = create_supervised_trainer(self.model, self.criterion, self.optimizer, self.metrics, self.device) engine.add_event_handler(Events.ITERATION_STARTED, self._lr_scheduler_step) self._timer.attach(engine, start=Events.EPOCH_STARTED) engine.add_event_handler(Events.EPOCH_STARTED, self._log_epochs, epochs) engine.add_event_handler(Events.EPOCH_COMPLETED, self._increment_epoch) engine.add_event_handler(Events.EPOCH_COMPLETED, self._log_results) # Set checkpoint if save: checkpoint_handler = save.parse(self) engine.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {"trainer": self}) for callback in callbacks: engine.add_event_handler(Events.EPOCH_COMPLETED, wrap(callback), self) # Run engine.run(train_loader, epochs) # Return history hist = { metric: hist[-epochs:] for metric, hist in self.metric_history.items() } hist = keyfilter(lambda k: not k.startswith("val_"), hist) return hist
def get_notes(token, so_name): decoded_token = get_decoded_token(token) customer_id = frappe.db.exists( "Customer", {"le_firebase_uid": decoded_token["uid"]} ) if not customer_id: frappe.throw(frappe._("Customer does not exist on backend")) if customer_id != frappe.db.get_value("Sales Order", so_name, "customer"): frappe.throw(frappe._("Not allowed to view this document")) get_dn_fields = compose( keyfilter( lambda x: x in [ "name", "partner", "partner_name", "scheduled_datetime", "posting_datetime", "total", "total_taxes_and_charges", "grand_total", "rounding_adjustment", "rounded_total", "status", ] ), first, ) get_item_fields = compose( list, map( keyfilter( lambda x: x in [ "name", "item_code", "item_name", "item_group", "rate", "amount", "so_detail", ] ) ), map(lambda x: merge(x, {"name": x.get("child_name")})), ) get_deliveries = compose( lambda x: x.values(), valmap(lambda x: merge(get_dn_fields(x), {"items": get_item_fields(x)})), groupby("name"), lambda x: frappe.db.sql( """ SELECT dn.name, dn.sales_partner AS partner, sp.partner_name, dn.le_scheduled_datetime AS scheduled_datetime, TIMESTAMP(dn.posting_date, dn.posting_time) AS posting_datetime, dn.total, dn.total_taxes_and_charges, dn.grand_total, dn.rounding_adjustment, dn.rounded_total, dn.workflow_state AS status, dni.name AS child_name, dni.item_code, dni.item_name, dni.item_group, dni.qty, dni.rate, dni.amount, dni.so_detail FROM `tabDelivery Note Item` AS dni LEFT JOIN `tabDelivery Note` AS dn ON dn.name = dni.parent LEFT JOIN `tabSales Partner` AS sp ON sp.name = dn.sales_partner WHERE dn.status < 2 AND dn.workflow_state IN ('Pending', 'Completed') AND dni.against_sales_order = %(against_sales_order)s """, values={"against_sales_order": x}, as_dict=1, ), ) return get_deliveries(so_name)
def create(token, **kwargs): decoded_token = get_decoded_token(token) session_user = frappe.session.user webapp_user = frappe.get_cached_value("Ahong eCommerce Settings", None, "webapp_user") if not webapp_user: frappe.throw(frappe._("Site setup not complete")) uid = decoded_token["uid"] customer_id = frappe.db.exists("Customer", {"cm_firebase_uid": uid}) if customer_id: frappe.throw(frappe._("Customer already created")) args = keyfilter( lambda x: x in [ "customer_name", "mobile_no", "email_id", "address_line1", "address_line2", "city", "state", "country", "pincode", ], kwargs, ) def insert_or_update(): existing = frappe.db.exists("Customer", {"cm_mobile_no": args.get("mobile_no")}) if existing: doc = frappe.get_doc("Customer", existing) doc.update({ "customer_name": args.get("customer_name"), "cm_firebase_uid": uid }) if args.get("address_line1") and args.get("city"): address = _create_address(existing, args) doc.update({"customer_primary_address": address.get("name")}) print([args.get("email_id"), doc.customer_primary_contact]) if args.get("email_id") and doc.customer_primary_contact: contact = frappe.get_doc("Contact", doc.customer_primary_contact) contact.add_email(args.get("email_id"), autosave=True) doc.save(ignore_permissions=True) return doc frappe.set_user(webapp_user) doc = frappe.get_doc( merge( { "doctype": "Customer", "cm_firebase_uid": uid, "cm_mobile_no": args.get("mobile_no"), "customer_type": "Individual", "customer_group": frappe.db.get_single_value("Selling Settings", "customer_group"), "territory": frappe.db.get_single_value("Selling Settings", "territory"), }, args, )).insert(ignore_permissions=True) frappe.set_user(session_user) return doc doc = insert_or_update() auth.set_custom_user_claims(uid, {"customer": True}, app=app) return keyfilter(lambda x: x in CUSTOMER_FIELDS, doc.as_dict())
'remote_cloudtrail_bucket': bool, } } }, required=True, extra=ALLOW_EXTRA) OUTPUT_SCHEMA = Schema({ 'output': ACCOUNT_LINK_PROVISIONED, }, required=True, extra=ALLOW_EXTRA) request_type = get_in(['event', 'RequestType']) properties = get_in(['event', 'ResourceProperties']) stacks = get_in(['event', 'ResourceProperties', 'Stacks']) reactor_callback_url = get_in(['event', 'ResourceProperties', 'ReactorCallbackUrl']) supported_metadata = {'Region', 'ExternalId', 'AccountId', 'AccountName', 'ReactorId', 'ReactorCallbackUrl'} callback_metadata = keyfilter(lambda x: x in supported_metadata) default_metadata = { 'version': '1', 'message_source': 'cfn', } ##################### # # Coeffects, i.e. from the outside world # ##################### def coeffects(world): return pipe(world, coeffects_cfn)
def sanitize(d): if d: filtered = keyfilter(lambda k: k not in ["etag", "elapsed"], d) return filtered return d
def coeffects_s3(world): response = s3.list_buckets() return keyfilter(lambda x: x in {'Buckets'}, response)
def extract_dur_disp_tab(doc: Disp_dur) -> Disp_dur: return z.keyfilter(lambda x: x in {'dispatchCount', 'duration'}, doc)
def pick(whitelist, d): return keyfilter(lambda k: k in whitelist, d)
def fit(self, train_loader, epochs=1, val_loader=None, save=None, iterations=None, callbacks=()): engine = create_supervised_trainer(self.model, self.criterion, self.optimizer, self.metrics, self.device) self._attach_timer(engine) engine.add_event_handler(Events.ITERATION_STARTED, self._lr_scheduler_step) engine.add_event_handler(Events.EPOCH_STARTED, self._log_epochs, epochs) if val_loader is not None: if isinstance(val_loader, tuple): val_loader, eval_per_epochs = val_loader else: eval_per_epochs = 1 evaluator = create_supervised_evaluator(self.model, self.test_metrics, self.device) engine.add_event_handler(Events.EPOCH_COMPLETED, _evaluate, evaluator, val_loader, eval_per_epochs) engine.add_event_handler(Events.EPOCH_COMPLETED, self._increment_epoch) engine.add_event_handler(Events.EPOCH_COMPLETED, self._log_results) if val_loader is not None: engine.add_event_handler(Events.EPOCH_COMPLETED, self._log_val_results, evaluator, eval_per_epochs) # Set checkpoint if save: checkpoint_handler = save.parse(self) engine.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {"trainer": self}) for callback in callbacks: engine.add_event_handler(Events.EPOCH_COMPLETED, wrap(callback), self) if iterations: engine.add_event_handler(Events.ITERATION_COMPLETED, _terminate_on_iterations, iterations) epochs = 1000 # Run engine.run(train_loader, epochs) # Return history hist = { metric: hist[-epochs:] for metric, hist in self.metric_history.items() } if val_loader is None: hist = keyfilter(lambda k: not k.startswith("val_"), hist) return hist
def coeffects_organizations(world): response = orgs.describe_organization() return keyfilter(lambda x: x in {'Organization'}, response)
def coeffects_cur(world): response = cur.describe_report_definitions() return keyfilter(lambda x: x in {'ReportDefinitions'}, response)
def coeffects_cloudtrail(world): response = ct.describe_trails() return keyfilter(lambda x: x in {'trailList'}, response)
def get_clb_contents(): """ Get Rackspace Cloud Load Balancer contents as list of `CLBNode`. CLB health monitor information is also returned as a pmap of :obj:`CLB` objects mapped on LB ID. :return: Effect of (``list`` of :obj:`CLBNode`, `pmap` of :obj:`CLB`) :rtype: :obj:`Effect` """ # If we get a CLBNotFoundError while fetching feeds, we should throw away # all nodes related to that load balancer, because we don't want to act on # data that we know is invalid/outdated (for example, if we can't fetch a # feed because CLB was deleted, we don't want to say that we have a node in # DRAINING with draining time of 0; we should just say that the node is # gone). def gone(r): return catch(CLBNotFoundError, lambda exc: r) lb_ids = [lb['id'] for lb in (yield _retry(get_clbs()))] node_reqs = [ _retry(get_clb_nodes(lb_id).on(error=gone([]))) for lb_id in lb_ids ] healthmon_reqs = [ _retry(get_clb_health_monitor(lb_id).on(error=gone(None))) for lb_id in lb_ids ] all_nodes_hms = yield parallel(node_reqs + healthmon_reqs) all_nodes, hms = all_nodes_hms[:len(lb_ids)], all_nodes_hms[len(lb_ids):] lb_nodes = { lb_id: [CLBNode.from_node_json(lb_id, node) for node in nodes] for lb_id, nodes in zip(lb_ids, all_nodes) } clbs = { str(lb_id): CLB(bool(health_mon)) for lb_id, health_mon in zip(lb_ids, hms) if health_mon is not None } draining = [ n for n in concat(lb_nodes.values()) if n.description.condition == CLBNodeCondition.DRAINING ] feeds = yield parallel([ _retry( get_clb_node_feed(n.description.lb_id, n.node_id).on(error=gone(None))) for n in draining ]) nodes_to_feeds = dict(zip(draining, feeds)) deleted_lbs = set([ node.description.lb_id for (node, feed) in nodes_to_feeds.items() if feed is None ]) def update_drained_at(node): feed = nodes_to_feeds.get(node) if node.description.lb_id in deleted_lbs: return None if feed is not None: node.drained_at = extract_clb_drained_at(feed) return node nodes = map(update_drained_at, concat(lb_nodes.values())) yield do_return((list(filter(bool, nodes)), pmap(keyfilter(lambda k: k not in deleted_lbs, clbs))))