def create_resource_group(self, name, location, tags=None):
     operations = self._resource_group_operations()
     params = ResourceGroup()
     params.setLocation(location)
     if tags:
         tags_as_map = HashMap()
         tags_as_map.putAll(tags)
         params.setTags(tags_as_map)
     operations.createOrUpdate(name, params)
 def create_resource_group(self, name, location, tags=None):
     operations = self._resource_group_operations()
     params = ResourceGroup()
     params.setLocation(location)
     if tags:
         tags_as_map = HashMap()
         tags_as_map.putAll(tags)
         params.setTags(tags_as_map)
     operations.createOrUpdate(name, params)
예제 #3
0
    def load_docs(self, num_workers, cmd=dict(), mutated=0):
        master = Server(self.cluster.master.ip, self.cluster.master.port,
                        self.cluster.master.rest_username,
                        self.cluster.master.rest_password,
                        str(self.cluster.master.memcached_port))
        hm = HashMap()
        hm.putAll({
            DRConstants.create_s: self.init_items_per_collection,
            DRConstants.create_e: self.init_items_per_collection,
            DRConstants.delete_s: 0,
            DRConstants.delete_e: self.init_items_per_collection / 2,
            DRConstants.read_s: self.init_items_per_collection / 2,
            DRConstants.read_e: self.init_items_per_collection
        })
        self.dr = DocRange(hm)

        ws = WorkLoadSettings(cmd.get("keyPrefix", self.key),
                              cmd.get("keySize", self.key_size),
                              cmd.get("docSize", self.doc_size),
                              cmd.get("cr", self.create_perc),
                              cmd.get("rd", self.read_perc),
                              cmd.get("up", self.update_perc),
                              cmd.get("dl", self.delete_perc),
                              cmd.get("workers", self.process_concurrency),
                              cmd.get("ops", self.ops_rate),
                              cmd.get("loadType", None),
                              cmd.get("keyType", None),
                              cmd.get("valueType", None),
                              cmd.get("validate", False),
                              cmd.get("gtm", False), cmd.get("deleted", False),
                              cmd.get("mutated", mutated))
        ws.dr = self.dr
        dg = DocumentGenerator(ws, "", None)
        tasks = list()
        while num_workers > 0:
            for bucket in self.buckets:
                for scope in bucket.scopes.keys():
                    for collection in self.collections:
                        client = NewSDKClient(master, bucket.name, scope,
                                              collection)
                        client.initialiseSDK()
                        th_name = "Loader_%s_%s_%s_%s_%s" \
                                  % (bucket.name, scope, collection,
                                     str(num_workers), time.time())
                        task = WorkLoadGenerate(th_name, dg, client,
                                                self.durability_level)
                        tasks.append(task)
                        self.tm.submit(task)
                        num_workers -= 1
예제 #4
0
    def updateAttributes(self, context, configurationAttributes):
        print "Idp extension. Method: updateAttributes"
        attributeContext = context.getAttributeContext()

        customAttributes = HashMap()
        customAttributes.putAll(attributeContext.getIdPAttributes())

        # Remove givenName attribute
        customAttributes.remove("givenName")

        # Update surname attribute
        if customAttributes.containsKey("sn"):
            customAttributes.get("sn").setValues(
                ArrayList(Arrays.asList(StringAttributeValue("Dummy"))))

        # Set updated attributes
        attributeContext.setIdPAttributes(customAttributes.values())

        return True
예제 #5
0
 def _loader_dict_new(self, cmd={}, scopes=None, collections=None):
     self.loader_map = dict()
     for bucket in self.cluster.buckets:
         scopes_keys = scopes or bucket.scopes.keys()
         for scope in scopes_keys:
             collections_keys = collections or bucket.scopes[scope].collections.keys()
             for collection in collections_keys:
                 if collection == "_default" and scope == "_default":
                     continue
                 ws = WorkLoadSettings(cmd.get("keyPrefix", self.key),
                                       cmd.get("keySize", self.key_size),
                                       cmd.get("docSize", self.doc_size),
                                       cmd.get("cr", self.create_perc),
                                       cmd.get("rd", self.read_perc),
                                       cmd.get("up", self.update_perc),
                                       cmd.get("dl", self.delete_perc),
                                       cmd.get("ex", self.expiry_perc),
                                       cmd.get("workers", self.process_concurrency),
                                       cmd.get("ops", self.ops_rate),
                                       cmd.get("loadType", None),
                                       cmd.get("keyType", None),
                                       cmd.get("valueType", None),
                                       cmd.get("validate", False),
                                       cmd.get("gtm", False),
                                       cmd.get("deleted", False),
                                       cmd.get("mutated", self.mutate)
                                       )
                 hm = HashMap()
                 hm.putAll({DRConstants.create_s: self.create_start,
                            DRConstants.create_e: self.create_end,
                            DRConstants.update_s: self.update_start,
                            DRConstants.update_e: self.update_end,
                            DRConstants.expiry_s: self.expiry_start,
                            DRConstants.expiry_e: self.expiry_end,
                            DRConstants.delete_s: self.delete_start,
                            DRConstants.delete_e: self.delete_end,
                            DRConstants.read_s: self.read_start,
                            DRConstants.read_e: self.read_end})
                 dr = DocRange(hm)
                 ws.dr = dr
                 dg = DocumentGenerator(ws, self.key_type, self.val_type)
                 self.loader_map.update({bucket.name+scope+collection: dg})
예제 #6
0
    def data_validation(self, scopes=None, collections=None):
        doc_ops = self.doc_ops.split(":")
        self.log.info("Validating Active/Replica Docs")
        cmd = dict()
        self.ops_rate = self.input.param("ops_rate", 2000)
        master = Server(self.cluster.master.ip, self.cluster.master.port,
                        self.cluster.master.rest_username, self.cluster.master.rest_password,
                        str(self.cluster.master.memcached_port))
        self.loader_map = dict()
        for bucket in self.cluster.buckets:
            scopes_keys = scopes or bucket.scopes.keys()
            for scope in scopes_keys:
                collections_keys = collections or bucket.scopes[scope].collections.keys()
                self.log.info("scope is {}".format(scope))
                for collection in collections_keys:
                    self.log.info("collection is {}".format(collection))
                    if collection == "_default" and scope == "_default":
                        continue
                    for op_type in doc_ops:
                        cmd.update({"deleted": False})
                        hm = HashMap()
                        if op_type == "create":
                            hm.putAll({DRConstants.read_s: self.create_start,
                                       DRConstants.read_e: self.create_end})
                        elif op_type == "update":
                            hm.putAll({DRConstants.read_s: self.update_start,
                                       DRConstants.read_e: self.update_end})
                        elif op_type == "delete":
                            hm.putAll({DRConstants.read_s: self.delete_start,
                                       DRConstants.read_e: self.delete_end})
                            cmd.update({"deleted": True})
                        else:
                            continue
                        dr = DocRange(hm)
                        ws = WorkLoadSettings(cmd.get("keyPrefix", self.key),
                                              cmd.get("keySize", self.key_size),
                                              cmd.get("docSize", self.doc_size),
                                              cmd.get("cr", 0),
                                              cmd.get("rd", 100),
                                              cmd.get("up", 0),
                                              cmd.get("dl", 0),
                                              cmd.get("ex", 0),
                                              cmd.get("workers", self.process_concurrency),
                                              cmd.get("ops", self.ops_rate),
                                              cmd.get("loadType", None),
                                              cmd.get("keyType", None),
                                              cmd.get("valueType", None),
                                              cmd.get("validate", True),
                                              cmd.get("gtm", False),
                                              cmd.get("deleted", False),
                                              cmd.get("mutated", self.mutate))
                        ws.dr = dr
                        dg = DocumentGenerator(ws, self.key_type, self.val_type)
                        self.loader_map.update({bucket.name+scope+collection+op_type: dg})
        self.log.info("loader_map is {}".format(self.loader_map))

        tasks = list()
        i = self.process_concurrency
        while i > 0:
            for bucket in self.cluster.buckets:
                for scope in bucket.scopes.keys():
                    for collection in bucket.scopes[scope].collections.keys():
                        if collection == "_default" and scope == "_default":
                            continue
                        for op_type in doc_ops:
                            if op_type not in ["create", "update", "delete"]:
                                continue
                            client = NewSDKClient(master, bucket.name, scope, collection)
                            client.initialiseSDK()
                            self.sleep(1)
                            taskName = "Validate_%s_%s_%s_%s_%s_%s" % (bucket.name, scope, collection, op_type, str(i), time.time())
                            task = WorkLoadGenerate(taskName, self.loader_map[bucket.name+scope+collection+op_type],
                                                    client, "NONE",
                                                    self.maxttl, self.time_unit,
                                                    self.track_failures, 0)
                            tasks.append(task)
                            self.doc_loading_tm.submit(task)
                            i -= 1
        self.doc_loading_tm.getAllTaskResult()
        for task in tasks:
            try:
                task.sdk.disconnectCluster()
            except Exception as e:
                print(e)
        for task in tasks:
            self.assertTrue(task.result, "Validation Failed for: %s" % task.taskName)