def run():
    ClusterManager.init()
    cluster_id = ClusterManager.createCluster(CLUSTER_NAME, write_DB=False)
    cluster_id = cluster_id.data.get("cluster_id")
    ClusterManager.addNode(cluster_id, NODE_NAME, write_DB=False)
    wrong_cluster_id = "wrong id"
    try:
        result = ClusterManager.deleteNode(wrong_cluster_id,
                                           NODE_NAME[0],
                                           write_DB=False)
        if result.code == "failed":
            return True
        else:
            return False
    except:
        return False
    finally:
        ClusterManager.deleteNode(cluster_id, "compute1", write_DB=False)
def run():
    ClusterManager.init()
    cluster_id = ClusterManager.createCluster(CLUSTER_NAME, write_DB=False)
    cluster_id = cluster_id.data.get("cluster_id")
    ClusterManager.addNode(cluster_id, NODE_NAME, write_DB=False)

    try:
        result = ClusterManager.listNode(cluster_id)
        node_list = result.data.get("node_list")

        if len(node_list) == 1:
            return True
        else:
            return False
    except:
        return False
    finally:
        ClusterManager.deleteNode(cluster_id, "compute1", write_DB=False)
def run():
    ClusterManager.init()
    instance_id = Preprocess.create_with_provider_instance()
    cluster_id = ClusterManager.createCluster(CLUSTER_NAME, write_DB=False)
    cluster_id = cluster_id.data.get("cluster_id")
    ClusterManager.addNode(cluster_id, NODE_NAME, write_DB=False)

    try:
        result = ClusterManager.addInstance(cluster_id, instance_id, write_DB=False, send_flag=False)
        if result.code == "succeed":
            return True
        else:
            return False
    except:
        return False
    finally:
        ClusterManager.deleteNode(cluster_id, "compute1", write_DB=False)
        Postprocess.deleteInstance()
Beispiel #4
0
 def deleteNode(self, cluster_id, node_name):
     """
     The function for delete a computing node from HA cluster. 
     Put the cluster uuid and node name to this function, it will delete node from HA cluster.
     Args:
         cluster_Id (str): cluster uuid.
         node_name (str): node name.
     Return:
         (map) delete node result.
         {"code" : "0","message": message} -> success.
         {"code" : "1","message": message} -> fail.
     """
     try:
         result = ClusterManager.deleteNode(cluster_id, node_name)
         return result
     except:
         logging.error("HASS--delete node fail")
Beispiel #5
0
 def addNode(self, clusterId, nodeList):
     """
     The function for add a computing node to HA cluster. 
     Put the cluster uuid and nodeList to this function, it will add node to HA cluster.
     Args:
         cluster_Id (str): cluster uuid.
         nodeList (str): node name.
     Return:
         (map) add node result.
         {"code" : "0","message": message} -> success.
         {"code" : "1","message": message} -> fail.
     """
     try:
         result = ClusterManager.addNode(clusterId, nodeList)
         return result
     except:
         logging.error("HASS--add node fail")
Beispiel #6
0
 def deleteInstance(self, clusterId, instanceId):
     """
     The function for delete a instance from HA cluster. 
     Put the cluster uuid and instance id to this function, it will delete instance from HA cluster.
     Args:
         clusterId (str): cluster uuid.
         instanceId (str): instance id.
     Return:
         (map) delete instance result.
         {"code" : "0","message": message} -> success.
         {"code" : "1","message": message} -> fail.
     """
     try:
         result = ClusterManager.deleteInstance(clusterId, instanceId)
         logging.info("HASS--delete instance success")
         return result
     except:
         logging.error("HASS--delete instance fail")
Beispiel #7
0
    def recoverSensorCritical(self, cluster_id, fail_node_name):
        """

        :param cluster_id: 
        :param fail_node_name: 
        :return: 
        """
        cluster = ClusterManager.getCluster(cluster_id)
        if not cluster:
            logging.error("RecoverManager : cluster not found")
            return
        fail_node = cluster.getNodeByName(fail_node_name)
        print("fail node is %s" % fail_node.name)
        print("start recovery vm")
        self.recoverVMByEvacuate(cluster, fail_node)
        # self.recoverVMByLiveMigrate(cluster, fail_node)
        print("end evacuate vm")
        return self.recoverNodeByShutoff(fail_node)
def run():
    ClusterManager.init()
    cluster_id = ClusterManager.createCluster(CLUSTER_NAME, write_DB=False)
    cluster_id = cluster_id.data.get("cluster_id")
    try:
        result = ClusterManager.addNode(cluster_id, NODE_NAME, write_DB=False)
        if result.code == "succeed":
            return True
        else:
            return False
    except:
        return False
    finally:
        ClusterManager.deleteNode(cluster_id, NODE_NAME[0], write_DB=False)
Beispiel #9
0
    def recoverSensorCriticalByConfig(self, cluster_id, fail_node_name):
        """

        :param cluster_id: 
        :param fail_node_name: 
        :return: 
        """
        print("start recover sensor:", time.time())
        cluster = ClusterManager.getCluster(cluster_id)
        if not cluster:
            logging.error("RecoverManager : cluster not found")
            return
        fail_node = cluster.getNodeByName(fail_node_name)
        print("start recover vm:", time.time())
        self.recoverVMByLiveMigrate(cluster, fail_node)
        # self.recoverVMByEvacuate(cluster, fail_node) # test
        print("end recover vm and start reboot host", time.time())
        # return self.recoverNodeByReboot(fail_node)
        temp = self.recoverNodeByReboot(fail_node)
        print("finish recover host:", time.time())
        return temp
Beispiel #10
0
        node = ring[data['key']]
        print(f"Sending data:{data}")
        node.send_json(data)
        node.recv_json()
        time.sleep(1)

    #Send remove signal to Consul
    cm.deregisterNode(nodeName=nodeName)

    print("Done")


if __name__ == "__main__":
    servers = []

    cm = ClusterManager()

    nodes = cm.listNodes()

    for nodeName, node in nodes.items():
        port = node['Port']
        servers.append(f'tcp://127.0.0.1:{port}')

    print("Servers:", servers)

    # Create Consistent Hash ring
    ring = create_consistent_hashing_ring(servers)

    # Phase 1 Code
    # generate_data_round_robin(servers)
    # generate_data_consistent_hashing(servers)
Beispiel #11
0
 def __init__(self):
     self.tc=0;
     self.cluster_manager=ClusterManager()
     self.grid_list=GridList()
     self.gap = Helper().gap()
Beispiel #12
0
class D_Stream:
    #=================变量声明=======================
    #时间尺度里的当前时间点,初始值是0,之后以整数1为单位推进
    tc=0
    #grid_list,用于存储grid的character_vector的dic
    grid_list=None
    #clusters 类簇的记录表,kv结构,v集合,集合里保存key
    #clusters={}
    #ClusterManager
    cluster_manager=None

    gap=0

    #========以下是函数=================

    #TODO:初始化变量,结构等,做好准备工作
    def __init__(self):
        self.tc=0;
        self.cluster_manager=ClusterManager()
        self.grid_list=GridList()
        self.gap = Helper().gap()

    #注释:现在直接往cluster里加grid就可以
    # def __addToCluster(self,grid,cluster):
    #     #到cluster里
    #     #更新操作clusters表





    def __adjust_sparse(self,grid_object):
        #TODO:
        logging.warn("the __adjust_sparse was been removed")
        return
        #把这个grid从cluster中移除
        cluster_object=self.cluster_manager.getCluster(grid_object.clusterKey())
        cluster_object.delGrid(grid_object)
        if cluster_object.size()==0:
            self.cluster_manager.delCluster(cluster_object.key())
        if not cluster_object.isClusterSingle():
            self.cluster_manager.splitCluster(cluster_object.key())


#=================================================

    def __adjust_dense_neighbor_dense(self,grid_object,grid_h_object):
    #如果g还没有cluster,调用__addToClusters
        if -1==grid_object.clusterKey():
            logging.debug("grid "+grid_object.key()+" has not been cluster")
            grid_h_cluster_object=self.cluster_manager.getCluster(grid_h_object.clusterKey())
            grid_h_cluster_object.addGrid(grid_object)

        #如果g已经有cluster且他的cluseter比h大,那么吞并h的cluster,否则反向吞并
        elif  -1!=grid_object.clusterKey():
            logging.debug("grid " + grid_object.key() + "has been cluster")
            grid_cluster_object=self.cluster_manager.getCluster(grid_object.clusterKey())
            grid_h_cluster_object=self.cluster_manager.getCluster(grid_h_object.clusterKey())
            if grid_cluster_object.size()>grid_h_cluster_object.size():
                logging.info("cluster of grid "+grid_object.key()+" > cluster of grid"+grid_h_object.key())
                self.cluster_manager.mergeCluster(grid_cluster_object.key(),grid_h_cluster_object.key())
            else:
                logging.info("cluster of grid " + grid_object.key() + " <= cluster of grid" + grid_h_object.key())
                self.cluster_manager.mergeCluster(grid_h_cluster_object.key(),grid_cluster_object.key())

    #=================================================

    def __adjust_dense_neighbor_transitional(self,grid_object,grid_h_object):
        # 拿h的cluster
        grid_h_cluster_object = self.cluster_manager.getCluster(grid_h_object.clusterKey())
        if -1==grid_object.clusterKey():
            logging.info("grid "+grid_object.key()+"hasn't been clustered")
            grid_h_cluster_object.addGrid(grid_object)
            #把grid加到h的cluster里并判断此时h是不是outside,如果不是再把grid拿出来
            if not grid_h_cluster_object.isOutsideGrid(grid_h_object):
                logging.info("grid "+grid_object.key()+" is not outside of cluster "+grid_h_cluster_object.key()+" if added to it")
                grid_h_cluster_object.delGrid(grid_object)
                # if grid_h_cluster_object.size()==0:
                #     self.cluster_manager.delCluster(grid_h_cluster_object.key())
        else:
            logging.info("grid " + grid_object.key() + "has been clustered")
            grid_cluster_object = self.cluster_manager.getCluster(grid_object.clusterKey())
            #grid易主
            if grid_cluster_object.size()>=grid_h_cluster_object.size():
                logging.info("size of cluster of gird "+grid_object.key()+" is greater than grid "+grid_h_object.key())
                grid_h_cluster_object.delGrid(grid_h_object)
                grid_cluster_object.addGrid(grid_h_object)
                if grid_h_cluster_object.size()==0:
                    self.cluster_manager.delCluster(grid_h_cluster_object.key())

    #=================================================


    def __adjust_dense(self,grid_object):
        #得到这个dense grid (暂且叫g)的所有neighboring grid,在所有neighbor中找出cluster最大的一个grid叫grid_h
        neighbors=self.grid_list.getNeighborGrids(grid_object.key())
        if 0==len(neighbors):
            logging.debug("there is no neighbors in grid "+grid_object.key())
            return -1

        max_size=0
        grid_h_object=None
        for item in neighbors:
            if item.clusterKey()!=-1:
                cluster=self.cluster_manager.getCluster(item.clusterKey())
                if cluster.size()>max_size:
                    max_size=cluster.size()
                    grid_h_object=item



        #如果这个if触发,说明neighbor都是没有cluster的
        if 0==max_size:
            logging.debug("neighbors of grid "+grid_object.key()+" is not clustered")
            return -2



        #如果这个h是一个dense
        if DensityStatus.DENSE==grid_h_object.densityStatus():
            logging.debug("neighbor h of grid "+grid_object.key()+" is Dense")
            self.__adjust_dense_neighbor_dense(grid_object,grid_h_object)

        #如果h是一个transitinal,
        elif DensityStatus.TRANSITIONAL==grid_h_object.densityStatus():
            logging.debug("neighbor h of grid " + grid_object.key() + " is TRANSITIONAL")
            self.__adjust_dense_neighbor_transitional(grid_object,grid_h_object)

        return 0

    #=================================================





    def __adjust_transitional(self,grid_object):

        neighbor_clusters=self.cluster_manager.getNeighborClusters(grid_object)
    #在neighbor的grid中找出一个cluster,这个cluster最大且当g加入以后g是outside
        the_ret_cluster_key=0
        the_ret_cluster_size=0
        for cluster in neighbor_clusters:
            the_ret_cluster_key=-1
            the_ret_cluster_size=0
            if cluster.size()>the_ret_cluster_size:
                if cluster.isOutsideIfAdd(grid_object):
                    the_ret_cluster_size=cluster.size()
                    the_ret_cluster_key=cluster.key()
        #for循环结束就能找到了,当然也可能没有
        if -1!=the_ret_cluster_key and 0!=the_ret_cluster_size:
            if grid_object.clusterKey() != -1:
                cluster = self.cluster_manager.getCluster(grid_object.clusterKey())
                cluster.delGrid(grid_object)
                if cluster.size() == 0:
                    self.cluster_manager.delCluster(cluster.key())
            target=self.cluster_manager.getCluster(the_ret_cluster_key)
            target.addGrid(grid_object)


#=================================================


        #=================================================


    #=================================================

    def __initial_clustring(self):
        #把所有的dense的grid设置为单独的cluster
        dense_grids=self.grid_list.getDenseGrids()
        for grid in dense_grids:
            self.cluster_manager.addNewCluster(grid)

        stop_flag=0 #为0则不停止,为1停止,即一遍循环没有任何class被修改后标记为1
        #在grid_list中找所有是dense的C_Vector,把他们的label按0开始各自设置,并在clusters里记录,其他的非dense的grid设置成no_class
        while 0==stop_flag:
            #标记类簇被改变,若有类簇发生改变则++
            change_flag=0
            #遍历clusters,拿到每个clusters的set
            all_clusters=self.cluster_manager.getAllCluster()
            keys=list(all_clusters.keys())
            for k in keys:
                cluster=None
                if k in all_clusters:
                    cluster=all_clusters[k]
                else:
                    continue
                change_flag+=self.__initial_clustering_neighbors(cluster)


            #while停止
            if 0==change_flag:
                stop_flag=1

    def __initial_clustering_neighbors(self,cluster):
        change_flag=0
        # 找到属于outside的grid
        outside_grids = cluster.getOutsideGrids()
        for outside_grid in outside_grids:
            # 对属于outside的grid,获取它的neighboring 的grid
            neighbor_grids = self.grid_list.getNeighborGrids(outside_grid.key())
            for neighbor_grid in neighbor_grids:
                # 若outside的grid所在cluster的尺度大于neighboring的grid,则吞并neighboring的cluster,同时change_flag++
                try:
                    # print(neighbor_grid)
                    neighbor_cluster = self.cluster_manager.getCluster(neighbor_grid.clusterKey())
                    if cluster.key() == neighbor_cluster.key():
                        # 如果两个key一样,则不需要执行
                        continue
                    if cluster.size() > neighbor_cluster.size():
                        self.cluster_manager.mergeCluster(cluster.key(), neighbor_cluster.key())
                        change_flag += 1
                        return change_flag
                    else:
                        self.cluster_manager.mergeCluster(neighbor_cluster.key(), cluster.key())

                        change_flag += 1
                        return change_flag
                except KeyError:
                    logging.debug("except KeyError")
                    # 否则反向吞并,同时change_flag++
                    if neighbor_grid.clusterKey() != -1:
                        print("your program are being in trouble!!!!!")
                        exit()
                    if neighbor_grid.densityStatus() == DensityStatus.TRANSITIONAL:
                        cluster.addGrid(neighbor_grid)
                        change_flag += 1
        return change_flag

    def __adjust_clustring(self):
        #=======(这一步论文里没讲清楚,自己加上去的)============把所有的dense的grid设置为单独的cluster
        dense_grids=self.grid_list.getDenseGrids()
        for grid in dense_grids:
            if grid.isNotClustered() and grid.densityStatus()==DensityStatus.DENSE:
                logging.debug("dense grid "+grid.key()+" is of no cluster")
                self.cluster_manager.addNewCluster(grid)


        #得到一个已经改变过的change_flag==1的数组
        change_grids=self.grid_list.getChangeGrids()
        #遍历这个数组,处理每一个grid的C_Vector
        for grid_object in change_grids:
            # 处理sparse
            if DensityStatus.SPARSE==grid_object.densityStatus():
                logging.debug(grid_object.key()+" is being define as SPARSE")
                self.__adjust_sparse(grid_object)

            #处理dense
            if DensityStatus.DENSE==grid_object.densityStatus():
                logging.debug(grid_object.key() + "is being define as DENSE to process")
                self.__adjust_dense(grid_object)

            #处理transitional
            if DensityStatus.TRANSITIONAL==grid_object.densityStatus():
                logging.debug(grid_object.key() + "is being define as TRANSITIONAL")
                self.__adjust_transitional(grid_object)




                # 进入sporadic删除判定逻辑,处理所有grid

    def judgeAndremoveSporadic(self, current_time):
        grids=self.grid_list.getSparseGrids()
        for grid_object in grids:
            if SparseStatus.TODELETE == grid_object.sparseStatus():
                if grid_object.clusterKey()!=-1:
                    cluster=self.cluster_manager.getCluster(grid_object.clusterKey())
                    cluster.delGrid(grid_object)
                    if cluster.size() == 0:
                        self.cluster_manager.delCluster(cluster.key())
                self.grid_list.delGrid(grid_object.key(), current_time)
            elif SparseStatus.TEMP == grid_object.sparseStatus() or SparseStatus.NORMAL == grid_object.sparseStatus():
                # 判断s1和s2
                if grid_object.densityThreshold(current_time) > grid_object.densityWithTime(
                        current_time) and current_time >= (1 + Helper().beta) * grid_object.time_remove():
                    # 符合s1,s2 放给TODELETE
                    grid_object.setSparseStatus(SparseStatus.TODELETE)
                else:
                    # 回NORMAL
                    grid_object.setSparseStatus(SparseStatus.NORMAL)


    def do_DStream(self,rawData):
        self.tc+=1
        #得到key值后,我们将数据点打到相应的grid中,然后更新其信息
        logging.info("add New Data")
        self.grid_list.addNewData(rawData,self.tc)
        #grid_key=Helper.getKeyFromRawData(rawData)
        #判断grid_list里面有没有对应key,没有先添加
        #if not self.grid_list.has_key(grid_key):
            #1、创建新的C_Vector;
            #2、加入grid_list 里;
        #更新grid的信息
        #if not 0==self.__refreshGrid(grid_key,rawData):
           # print("key 不存在,__refreshGrid调用失败\n")

        #TODO:首次到达gap
        if self.tc == self.gap:
            logging.debug("touch the gap first time")
            self.__initial_clustring()
        elif self.tc%self.gap == 0:
            logging.debug("TOUCH THE GAP")
            #判断sporadic的状态并删除符合条件的grid
            logging.debug("judgeAndremoveSporadic")
            self.judgeAndremoveSporadic(self.tc)
            logging.debug("__adjust_clustring")
            self.__adjust_clustring()
            #清空change为0
            logging.debug("clearChangeFlag")
            self.grid_list.clearChangeFlag()
Beispiel #13
0
 def __init__(self):
     ClusterManager.init()
     self.Operator = Operator()
     self.RecoveryManager = RecoveryManager()
Beispiel #14
0
 def __init__(self):
     self.ipmi_module = IPMIManager()
     self.cluster_list = ClusterManager.getClusterList()
     self.config = ConfigParser.RawConfigParser()
     self.config.read('hass.conf')
     self.port = int(self.config.get("detection", "polling_port"))
Beispiel #15
0
 def test_unique_tokens_generic(self):
     for pattern in TestTemplating.templatize.get_patterns():
         cluster = ClusterManager.get_cluster(pattern)
         self.assertTrue(
             cluster.consensus(Artifact.get_unique_tokens,
                               Util.token_key_comparator))
def delete_cluster(cluster_id):
    ClusterManager.deleteNode(cluster_id, HOST, write_DB=False)
    Postprocess.deleteInstance()
from Templatize import Templatize
import sys
import os
"""
cd Opportunity/ 
object.properties
"""
#Opportunity,Lead /Users/cghai/Documents/Code/BaselineTemplating/ActiveMDS VO.xml.xml,EO.xml.xml
templatize = Templatize(debug=True,
                        objects=sys.argv[1].split(','),
                        mds_path=sys.argv[2],
                        patterns=sys.argv[3].split(','))
root_dir = os.getcwd()
for pattern in templatize.patterns:
    os.chdir(root_dir)
    cluster = ClusterManager.get_cluster(pattern)
    cluster.get_template()
    pattern_dir = cluster.get_name().replace('.', '_')
    artifacts = cluster.get_artifacts()
    template = artifacts[0].get_template()
    template.write('SmartBaseline.xml')
    for artifact in cluster.get_artifacts():
        tokens = artifact.get_unique_tokens()
        obj_dir = root_dir + '/' + artifact.get_obj()
        os.chdir(obj_dir)
        file_name = 'object.properties'
        f = open(file_name, 'a')
        for token in sorted(tokens.keys()):
            f.write(token)
            f.write("=")
            f.write(tokens[token])
Beispiel #18
0
 def updateAllCluster(self):
     try:
         #print("HASS updateAllCluster")
         ClusterManager.updateAllCluster()
     except Exception as e:
         logging.error("HASS--updateAllCluster fail :" + str(e))
Beispiel #19
0
            for k, v in kv.items():
                collection.append({
                    "key": k,
                    "value": v
                })

            resp = { "collection": collection }
        else:
            resp = { "message": "Invalid op" }

        consumer.send_json(resp)
    
        
if __name__ == "__main__":

    cm = ClusterManager()

    num_server = 1
    if len(sys.argv) > 1:
        num_server = int(sys.argv[1])
        print(f"num_server={num_server}")

    # Listen to add node notifications
    Process(target=pullNotifications, args=(cm, 1999,)).start()
        
    for node_id in range(num_server):
        server_port = 2000 + node_id
        cm.registerNode(server_port)
        print(f"Starting a server at:{server_port}...")
        Process(target=server, args=(server_port,)).start()
    
Beispiel #20
0
class ApnLa:
    def __init__(self, jsonFile, loadModified, costType):

        self.rdp = Rdp(jsonFile, loadModified)

        clusterList = self.rdp.clusterlist
        updateT = self.rdp.updateT
        controlConflicts = self.rdp.controlConflicts
        tInvariants = self.rdp.tInvariants
        costManager = self.costCreator(costType)

        self.clusterManager = ClusterManager(clusterList, updateT,
                                             controlConflicts, tInvariants,
                                             costManager)

    # Transition firing sequence. Uses clusterManager to get transition to fire, updates marking vector in rdp
    # and then updates all necesary information in cluster manager.
    def fireNext(self):
        enabledT = self.rdp.calcularSensibilizadas()
        fireTransition = self.clusterManager.getFireTransition(enabledT)
        cost = self.rdp.fire(fireTransition)
        self.clusterManager.updateCost(fireTransition, cost)
        self.clusterManager.setClusterFiredTransition(fireTransition)
        self.clusterManager.setControlClusterFiredTransition(
            fireTransition)  # TODO
        self.clusterManager.updateIfNecessary(fireTransition)
        return

    def switcharoo(self):
        # self.rdp.costVector[0] = 50
        self.rdp.costVector[6] = 50
        self.rdp.costVector[4] = 25
        print(self.rdp.costVector)

    def printClusters(self):
        print("<br>CLUSTERS:<br>")
        print("&emspRegular clusters")
        for cluster in self.clusterManager.clusters:
            print('&emsp*', cluster.transitionList)
        if len(self.clusterManager.controlClusters) > 0:
            print("&emspControl clusters")
            for cluster in self.clusterManager.controlClusters:
                print('&emsp&emsp*', cluster.transitionList)

    def getClusterProbs(self):
        probs = []
        for cluster in self.clusterManager.clusters:
            if cluster.LA is not None:
                probs.append(cluster.LA.probabilityVector.tolist())
        for cluster in self.clusterManager.controlClusters:
            probs.append(cluster.LA.probabilityVector.tolist())
        return probs

    def getClusterTransitions(self):
        labels = []
        for cluster in self.clusterManager.clusters:
            if cluster.LA is not None:
                labels.append(cluster.transitionList)
        for cluster in self.clusterManager.controlClusters:
            labels.append(cluster.transitionList)
        return labels

    def costCreator(self, costType):
        if costType == "inv":
            costManager = invcostManager(self.rdp.tInvariants, self.rdp)
        elif costType == "simp":
            costManager = simpcostManager(self.rdp)
        else:
            costManager = simpcostManager(self.rdp)
        return costManager