コード例 #1
0
 def add_message_to_cluster(self, message):
     rep = message.get_tokenrepresentation()
     c = self.get_cluster(rep)
     if c==None:
         c = Cluster(rep, "initial")
         self.__cluster.append(c)
     c.add_messages([message])
コード例 #2
0
ファイル: spea.py プロジェクト: jorgeramirez/AE
    def reduce_pareto_set(self, par_set):
        """
        Realiza el clustering
        """
        lista_cluster=[]
        for solucion in par_set.solutions:
            cluster = Cluster()
            cluster.agregar_solucion(solucion)
            lista_cluster.append(cluster)
  
        while len(lista_cluster) > self.max_pareto_points:
            min_distancia = sys.maxint
            for i in range (0,len(lista_cluster)-1):
                for j in range(i+1, len(lista_cluster)-1): 
                    c = lista_cluster[i]
                    distancia = c.calcular_distancia(lista_cluster[j])
                    if distancia < min_distancia:
                        min_distancia = distancia
                        c1 = i
                        c2 = j
               
            cluster = lista_cluster[c1].unir(lista_cluster[c2]) #retorna un nuevo cluster 
            del lista_cluster[c1]
            del lista_cluster[c2]

            lista_cluster.append(cluster)
        
        par_set=[]
        for cluster in lista_cluster:
            solucion = cluster.centroide()
            par_set.append(solucion)
            
        return par_set 
コード例 #3
0
ファイル: clustermgr.py プロジェクト: allfs/autooam
 def alloc_new(self,name, configspec, vmitype, chefmode=True):
     '''Allocates a new cluster.
     
     @param name - name for the cluster (ideally unique)
     @param configspec - a ConfigSpec instance
     @param vmitype - string specifying vmi type (supported: 'vagrant')
     '''
     cid = self._db.insert({})        
     try:
         configspec.validate()
         cluster = Cluster( name, configspec, cid, chefmode=chefmode )
         VMI.Create(vmitype, cluster)
         self._db.update( 
                         { '_id' : cluster.id() }, 
                         { u'name' : name,
                           u'config' : configspec.jsonmap, 
                           u'vmi' : cluster.get_vmi().jsonmap(), 
                           u'machines' : cluster.machines()} )
     except Exception as exc:
         import traceback
         Log.error('Cluster creation failed: %s' % exc)
         Log.error( traceback.format_exc() )
         self._db.remove( { '_id' : cid } )
         raise Exception('Failed to create cluster: %s' % name)
     
     return cluster
コード例 #4
0
def request_cluster(argv):
    """
    only request cluster on GCE, and output all configuration information
    :param argv: sys.argv
    :return: None
    """
    if len(argv) < 7:
        print_help()
        exit(1)

    cluster_name = argv[2]
    ambari_agent_vm_num = int(argv[3])
    docker_num = int(argv[4])
    service_server_num = int(argv[5])
    with_ambari_server = False
    ambari_server_num = int(argv[6])
    if ambari_server_num > 0:
        with_ambari_server = True

    cluster = Cluster()
    cluster.request_gce_cluster(ambari_agent_vm_num, docker_num, service_server_num,
                                with_ambari_server, cluster_name)

    time_to_wait = Config.ATTRIBUTES["gce_boot_time"]
    print "wait ", str(time_to_wait), " seconds for the cluster to boot ... ..."
    time.sleep(int(time_to_wait))

    data = Data()
    data.add_new_cluster(cluster)

    print "complete"
コード例 #5
0
    def exportgroup_create(self, name, project, tenant, varray, exportgrouptype, export_destination=None):
        '''
        This function will take export group name and project name  as input and
        It will create the Export group with given name.
        parameters:
           name : Name of the export group.
           project: Name of the project path.
           tenant: Container tenant name.
        return
            returns with status of creation. 
        '''
        # check for existance of export group.
        try:
            status = self.exportgroup_show(name, project, tenant)
        except SOSError as e:
            if(e.err_code == SOSError.NOT_FOUND_ERR):
                if(tenant == None):
                    tenant = ""
                    
                fullproj = tenant + "/" + project
                projuri = Project(self.__ipAddr, self.__port).project_query(fullproj) 
                nhuri = VirtualArray(self.__ipAddr, self.__port).varray_query(varray)
                
                parms = {
                'name' : name,
                'project' : projuri,
                'varray' : nhuri,
                'type' :exportgrouptype
                }
                if(exportgrouptype and export_destination):
                    if (exportgrouptype == 'Cluster'):
                        cluster_obj = Cluster(self.__ipAddr, self.__port)
                        try:
                            cluster_uri = cluster_obj.cluster_query(export_destination, fullproj)
                        except SOSError as e:
                            raise e
                        parms['clusters'] = [cluster_uri]
                    elif (exportgrouptype == 'Host'):
                        host_obj = Host(self.__ipAddr, self.__port)
                        try:
                            host_uri = host_obj.query_by_name(export_destination)
                        except SOSError as e:
                            raise e
                        parms['hosts'] = [host_uri]
                    # else:   # exportgrouptype == Exclusive
                        # TODO: add code for initiator                 
                body = json.dumps(parms)
                (s, h) = common.service_json_request(self.__ipAddr, self.__port, "POST", 
                                             self.URI_EXPORT_GROUP, body)

                o = common.json_decode(s)
                return o
            else:
                raise e
        if(status):
            raise SOSError(SOSError.ENTRY_ALREADY_EXISTS_ERR,
                           "Export group with name " + name + " already exists")
コード例 #6
0
ファイル: node.py プロジェクト: Bluelich/redis-ctl
def quit(host, port, cluster_id, quit_cluster):
    logging.info('Node %s:%d quit from cluster [ %d ]', host, port, cluster_id)
    instance = pick_by(host, port)
    if instance.assignee is None:
        return
    Cluster.lock_by_id(instance.assignee_id)
    quit_cluster(host, port)
    instance.assignee = None
    db.session.add(instance)
コード例 #7
0
ファイル: kmeans.py プロジェクト: Kodstok/IA
def get_cluster_obs(clusters, r):	
	res = Cluster(-1, [])
	for c in clusters:
		for obs_row in c.observations:
			if obs_row == r:
				res.id = c.id
				res.centroide = c.centroide
				res.observations = c.observations
	return res
コード例 #8
0
 def exportgroup_add_cluster(self, exportgroupname, tenantname, projectname, clusternames, sync):
     exportgroup_uri = self.exportgroup_query(exportgroupname, projectname, tenantname)
     cluster_uris = []
     clusterObject = Cluster(self.__ipAddr, self.__port)
     for clustername in clusternames:
         cluster_uris.append(clusterObject.cluster_query(clustername, tenantname))
     parms = {}
     parms["cluster_changes"] = self._add_list(cluster_uris)
     o = self.send_json_request(exportgroup_uri, parms)
     return self.check_for_sync(o, sync)
コード例 #9
0
    def __init__(self, cluster_document):
        Cluster.__init__(self, cluster_document)
        self._config_members = self._resolve_members("configServers")
        #self._shards = self._resolve_shard_members()
        self._shards = self._resolve_members("shards")

        # members list stores the mongos servers
        if not self._members or not self._config_members or not self._shards:
            raise Exception("Please specify config, shard, and mongos servers for cluster %s"
                            % self.get_cluster_name())
コード例 #10
0
    def exportgroup_create(self, name, project, tenant, varray, exportgrouptype, export_destination=None):
        """
        This function will take export group name and project name  as input
        and it will create the Export group with given name.
        parameters:
           name : Name of the export group.
           project: Name of the project path.
           tenant: Container tenant name.
        return
            returns with status of creation.
        """
        # check for existance of export group.
        try:
            status = self.exportgroup_show(name, project, tenant)
        except SOSError as e:
            if e.err_code == SOSError.NOT_FOUND_ERR:
                if tenant is None:
                    tenant = ""

                fullproj = tenant + "/" + project
                projObject = Project(self.__ipAddr, self.__port)
                projuri = projObject.project_query(fullproj)

                varrayObject = VirtualArray(self.__ipAddr, self.__port)
                nhuri = varrayObject.varray_query(varray)

                parms = {"name": name, "project": projuri, "varray": nhuri, "type": exportgrouptype}

                if exportgrouptype and export_destination:
                    if exportgrouptype == "Cluster":
                        cluster_obj = Cluster(self.__ipAddr, self.__port)
                        try:
                            cluster_uri = cluster_obj.cluster_query(export_destination, fullproj)
                        except SOSError as e:
                            raise e
                        parms["clusters"] = [cluster_uri]
                    elif exportgrouptype == "Host":
                        host_obj = Host(self.__ipAddr, self.__port)
                        try:
                            host_uri = host_obj.query_by_name(export_destination)
                        except SOSError as e:
                            raise e
                        parms["hosts"] = [host_uri]

                body = json.dumps(parms)
                (s, h) = common.service_json_request(self.__ipAddr, self.__port, "POST", self.URI_EXPORT_GROUP, body)

                o = common.json_decode(s)
                return o
            else:
                raise e

        if status:
            raise SOSError(SOSError.ENTRY_ALREADY_EXISTS_ERR, "Export group with name " + name + " already exists")
コード例 #11
0
 def addNewCluster(self, tf, sentence):
     """
     Creates a new cluster and adds it to the clusters.
     tf - term frequency counts of the given sentence
     sentence - sentence to be added to the cluster
     """
     self.newCID += 1
     newCluster = Cluster(self.newCID)
     newCluster.tf = tf
     newCluster.addSentenceToCluster(sentence)
     self.clusters[self.newCID] = newCluster
     print "Added new cluster for cid: {}".format(self.newCID)
コード例 #12
0
ファイル: tdm.py プロジェクト: q64545/x-deeplearning
def train(config):
  '''Tain Loop for TDM Algorithm'''

  train_rawdata_url = config["train_rawdata_url"]
  test_rawdata_url = config["test_rawdata_url"]
  data_dir = config['data_dir']
  raw_train_data = os.path.join(data_dir, train_rawdata_url.split('/')[-1])
  raw_test_data = os.path.join(data_dir, test_rawdata_url.split('/')[-1])
  tree_filename = os.path.join(data_dir, config['tree_filename'])
  train_sample = os.path.join(data_dir, config['train_sample'])
  test_sample = os.path.join(data_dir, config['test_sample'])
  stat_file = os.path.join(data_dir, config['stat_file'])

  print("Start to generating initialization data")
  # Download the raw data
  hdfs_download(train_rawdata_url, raw_train_data)
  hdfs_download(test_rawdata_url, raw_test_data)

  generator = Generator(raw_train_data,
                        raw_test_data,
                        tree_filename,
                        train_sample,
                        test_sample,
                        config['feature_conf'],
                        stat_file,
                        config['seq_len'],
                        config['min_seq_len'],
                        config['parall'],
                        config['train_id_label'],
                        config['test_id_label'])
  generator.generate()

  # Upload generating data to hdfs
  hdfs_upload(data_dir, config["upload_url"])

  # TDM train
  model_embed = os.path.join(data_dir, 'model.embed')
  tree_upload_dir = os.path.join(config['upload_url'], os.path.split(data_dir)[-1])
  for i in range(config['epocs']):
    print('Training, iteration: {iteration}'.format(iteration=i))

    # TODO(genbao.cgb): Train with xdl

    # Download the model file
    hdfs_download(config['model_url'], model_embed)

    # Tree clustering
    cluster = Cluster(model_embed, tree_filename,
                      parall=config['parall'], stat_file=stat_file)
    cluster.train()

    # Upload clustered tree to hdfs
    hdfs_upload(tree_filename, tree_upload_dir, over_write=True)
コード例 #13
0
ファイル: distqueue.py プロジェクト: jstutters/distqueue
def main():
    cluster = Cluster()
    cluster.init_from_file('nodes.yaml')

    for n in cluster.nodes:
        n.execute('hostname; sleep 4')

    while any((n.is_busy() for n in cluster.nodes)):
        print 'waiting'
        sleep(0.5)

    for n in cluster.nodes:
        print n.last_result
コード例 #14
0
    def exportgroup_remove_cluster(self, exportgroupname, tenantname, projectname, clusternames, sync):
        exportgroup_uri = self.exportgroup_query(exportgroupname, projectname, tenantname)
        # cluster search API does not take project parameter.
        cluster_uris = []
        clusterObject = Cluster(self.__ipAddr, self.__port)
        for clustername in clusternames:
            cluster_uris.append(clusterObject.cluster_query(clustername, tenantname))
        parms = {}
        parms["cluster_changes"] = self._remove_list(cluster_uris)
        o = self.send_json_request(exportgroup_uri, parms)
        return self.check_for_sync(o, sync)

        # host
        """
コード例 #15
0
    def initialization(self):
        for i in xrange(0, self.clusterNumber):
            c = Cluster(i, len(self.observations[0]))
            self.clusters.append(c)

        i = 0
        for obs in self.observations:
            obs = np.append(obs, 0)
            self.clusters[i % self.clusterNumber].addObservation(obs, 0)
            i += 1

        for c in self.clusters:
            c.updateCentroid()
            c.updateDist()
コード例 #16
0
ファイル: tree_cluster.py プロジェクト: q64545/x-deeplearning
def train(config):
  '''Tain Loop for TDM Algorithm'''

  data_dir = os.path.join(DIR, config['data_dir'])
  tree_filename = os.path.join(data_dir, config['tree_filename'])
  stat_file = os.path.join(data_dir, config['stat_file'])

  print("Start to cluster tree")
  # Download item id
  upload_dir = os.path.join(config['upload_url'], os.path.split(data_dir)[-1])
  item_id_url = os.path.join(upload_dir, config['item_id_file'])
  item_id_file = os.path.join(data_dir, 'item.id')
  hdfs_download(item_id_url, item_id_file)
  model_embed_tmp = os.path.join(data_dir, 'model.embed.tmp')
  hdfs_download(config['model_url'] + '/item_emb', model_embed_tmp)

  # Read max item id from item id file
  max_item_id = 0
  with open(item_id_file) as f:
    for line in f:
      item_id = int(line)
      if item_id > max_item_id:
        max_item_id = item_id
  max_item_id += 1

  model_embed = os.path.join(data_dir, 'model.embed')
  item_count = 0
  id_set = set()
  with open(model_embed_tmp) as f:
    with open(model_embed, 'wb') as fo:
      for line in f:
        arr = line.split(",")
        item_id = int(arr[0])
        if (len(arr) > 2) and (item_id < max_item_id) and (item_id not in id_set):
          id_set.add(item_id)
          item_count += 1
          fo.write(line)

  os.remove(model_embed_tmp)
  print("Filer embedding done, records:{}, max_leaf_id: {}".format(
      item_count, max_item_id))

  # Tree clustering
  cluster = Cluster(model_embed, tree_filename,
                    parall=config['parall'], stat_file=stat_file)
  cluster.train()

  # Upload clustered tree to hdfs
  tree_upload_dir = os.path.join(config['upload_url'], os.path.split(data_dir)[-1])
  hdfs_upload(tree_filename, tree_upload_dir, over_write=True)
コード例 #17
0
ファイル: update_test.py プロジェクト: shawnkumar/uptest
    def update_test(self):
        cluster = Cluster(get_ctool_nodes())
        cluster.clean_bootstrap('apache/cassandra-2.1')

        [node1, node2] = cluster.get_nodes()

        lastkey = self.fillData(10000000000, cluster)

        cluster.round_robin_update('apache/trunk')

        repout_n1 = cluster.nodetool('repair -hosts ' + node1.get_address(), nodes=[node1], capture_output=True)
        repout_n2 = cluster.nodetool('repair -hosts ' + node2.get_address(), nodes=[node2], capture_output=True)

        (output1, error1) = repout_n1[0]
        (output2, error2) = repout_n1[0]

        #check return values of repair is succesful
        self.assertEqual(repout_n1[1], 1, str(error1))
        self.assertEqual(repout_n2[1], 1, str(error2))

        #perform some basic validation to check querying values works
        (info, rc)= cluster.stress("read n={numWrites} -pop seq=1..{lastkey} no-wrap".format(numWrites=lastkey, lastkey=lastkey))

        #check validation error-free
        self.assertEqual(rc, 1)

        #check that there are no errors in logs:
        self.check_logs(cluster)
コード例 #18
0
ファイル: update_test.py プロジェクト: hoodedalien/uptesting
    def update_test(self):
        cluster = Cluster(get_ctool_nodes())
        cluster.clean_bootstrap('apache/cassandra-2.1')

        [node1, node2] = cluster.get_nodes()
        cluster.stress("write n=50000", [node1,node2])

        cluster.update('apache/trunk', node1)
コード例 #19
0
def main(args):
    lim1 = 0
    lim2 = 0
    lim3 = 0
    if args.dimension >= 1:
        lim1 = args.limit
    if args.dimension >= 2:
        lim2 = args.limit
    if args.dimension == 3:
        lim3 = args.limit if args.dimension > 2 else 0
    link_num = 2
    if args.dimension == 1:
        points = [np.array([float(i) / lim1]) for i in range(lim1)]
        links = [[points[l] for l in [random.randint(0, lim1 - 1) for x in range(link_num)]] for i in range(lim1)]
    elif args.dimension == 2:
        points = [np.array([float(i) / lim1, float(j) / lim2]) for i in range(lim1) for j in range(lim2)]
        links = [[points[l] for l in [random.randint(0, lim1 * lim2 - 2) for x in range(link_num)]]
                 for i in range(lim1) for j in range(lim2)]
    else:
        points = [np.array([float(i) / lim1, float(j) / lim2, float(k) / lim3]) for i in range(lim1) for j in
                  range(lim2) for k in range(lim3)]
        links = [[points[l] for l in [random.randint(0, lim1 * lim2 * lim3 - 1) for x in range(link_num)]]
                 for k in range(lim3) for j in range(lim2) for i in range(lim1)]
    grid = Grid(points, links)
    cluster = Cluster(grid)
    rc = RegularCuboid(cluster)
    start = timer()
    ct = ClusterTree(rc, 1)
    end = timer()
    print("ClusterTree build-up took " + str(end - start))
    start = timer()
    bct = BlockClusterTree(ct, ct)
    end = timer()
    print("BlockClusterTree buil-up took " + str(end - start))
コード例 #20
0
    def compute_abstraction(self):
        """Clusters all possible flop hands into groups."""
        abstraction_file = ''
        equity_file = ''
        if self.street == 'flop':
            abstraction_file = FLOP_SAVE_NAME
            equity_file = FLOP_EQUITY_DISTIBUTIONS
        elif self.street == 'turn':
            abstraction_file = TURN_SAVE_NAME
            equity_file = TURN_EQUITY_DISTRIBUTIONS

        if os.path.isfile(abstraction_file):
            with open(abstraction_file, 'rb') as f:
                return pickle.load(f)

        print('Computing the %s abstraction...' % (self.street, ))
        if os.path.isfile(equity_file):
            with open(equity_file, 'rb') as f:
                equity_distributions = pickle.load(f)
        else:
            print('Calculating equity distributions...')
            hands = archetypal_hands(self.street)
            distributions = pbar_map(self.hand_equity, hands)
            equity_distributions = dict(zip(hands, distributions))
            with open(equity_file, 'wb') as f:
                pickle.dump(equity_distributions,
                            f,
                            protocol=pickle.HIGHEST_PROTOCOL)

        print('Performing k-means clustering...')
        abstraction = Cluster(equity_distributions, self.buckets, self.iters)()
        with open(abstraction_file, 'wb') as f:
            pickle.dump(abstraction, f, protocol=pickle.HIGHEST_PROTOCOL)
        return abstraction
コード例 #21
0
 def create_clusters(self):
     """
     Creates a list of clusters by recursively bisecting those clusters that
     touch the origin
     """
     # Transpose data so that X and Y components are rows
     x, y = self.pca_data.T
     top_left = Point(np.min(x), np.max(y))
     bottom_right = Point(np.max(x), np.min(y))
     origin = (top_left + bottom_right)/2
     # Compute the bounding cluster which encompasses all the points in the
     # training set. The initial set of clusters is the set of 4 clusters
     # formed by the bisection of the bounding cluster.
     clusters = Cluster(top_left, bottom_right).bisect()
     # Recursively split the clusters
     for _ in range(self.levels):
         new_clusters = []
         for cluster in clusters:
             # Only split the cluster if one of the vertices is the origin
             if cluster.touches(origin):
                 new_clusters.extend(cluster.bisect())
             else:
                 new_clusters.append(cluster)
         # Repeat again with the newly created list of clusters
         clusters = new_clusters
     # Return the list of clusters
     return clusters
コード例 #22
0
ファイル: k_means.py プロジェクト: itamar-github/hw3_lab
    def run(self, points, random_seed):
        random.seed(random_seed)
        # Randomly initiate clusters
        self._clusters = []
        initial_centroids = random.sample(points, self._k)
        for i, initial_centroid in enumerate(initial_centroids):
            new_cluster = Cluster(i, initial_centroid)
            self._clusters.append(new_cluster)

        for current_iteration in range(self._num_iterations):
            # Clear all clusters
            for cluster in self._clusters:
                cluster.remove_point()

            # Re-assign all points
            for point in points:
                distances_to_clusters = {
                    x.id: point.distance_to(x.centroid)
                    for x in self._clusters
                }
                closest_cluster_id = sorted(
                    distances_to_clusters.keys(),
                    key=lambda x: distances_to_clusters[x])[0]
                self._clusters[closest_cluster_id].add_point(point)

            # Recompute centroids and look if change happened
            changes = [
                cluster.compute_centroid() for cluster in self._clusters
            ]
            if sum(changes) == 0:  # if everyone is False then sum is 0
                break
コード例 #23
0
ファイル: kmeans.py プロジェクト: Quadrition/ml_project1
    def fit(self, data, normalize=True):
        self.data = data
        predict = [0] * len(self.data)

        if normalize:
            self.normalize_data()

        dimensions = len(self.data[0])

        for i in range(self.n_clusters):
            center = [random.random()] * dimensions
            self.clusters.append(Cluster(center))

        iter_no = 0
        not_moves = False
        while iter_no <= self.max_iter and not not_moves:
            for cluster in self.clusters:
                cluster.data = []

            for i in range(len(self.data)):
                cluster_index = self.predict(self.data[i])
                self.clusters[cluster_index].data.append(self.data[i])
                predict[i] = cluster_index

            not_moves = True
            for cluster in self.clusters:
                old_center = cluster.center[:]
                cluster.recalculate_center()

                not_moves = not_moves and cluster.center == old_center

            iter_no += 1
        return predict
コード例 #24
0
def _mergeClusters(clusters, cluster1, cluster2):
    mergedCluster = Cluster(list(cluster1 | cluster2))
    clusters.remove(cluster1)
    clusters.remove(cluster2)
    clusters.append(mergedCluster)

    return clusters
コード例 #25
0
ファイル: level.py プロジェクト: rappie/platformo
	def __init__(self):
		
		# Player object.
		self.player = None
		
		# Boolean voor of je het level hebt gehaald.
		self.finished = False
		
		# Bepaal de rect van het gehele level.
		self.rect = pygame.Rect(0, 0, settings.TILE_WIDTH*settings.LEVEL_WIDTH, settings.TILE_HEIGHT*settings.LEVEL_HEIGHT)
		
		# Bepaal de lijst met alle game objects van dit level.
		#
		# Dit doen we door eerst een levelmap te genereren met een level-
		# generator. Vervolgens lopen we de level map door en maken we alle game
		# objects aan.
		#
		levelMap = levelgenerator.generateLevelMap()
		gameObjectList = self.getGameObjectListFromLevelMap(levelMap)
		
		# Maak cluster aan waar alle game objects in worden opgeslagen.
		staticObjectList = [k for k in gameObjectList if isinstance(k, Actor) == False]
		self.cluster = Cluster(self, staticObjectList, self.rect)

		# Maak lijst aan met alle actors.
		self.actorList = [k for k in gameObjectList if isinstance(k, Actor) == True]
コード例 #26
0
ファイル: EventDection.py プロジェクト: ycisacat/yqnew
def detect_event():  # main
    news_detection = NewsDetection()
    dict_news = news_detection.get_dic_news()
    cluster = Cluster(dict_news)
    news_clusters = cluster.cluster_news()
    result_clusters = list()
    for one_cluster in news_clusters:
        if one_cluster.__len__() < 3:
            continue
        print '--------'

        for news in one_cluster:
            print news
        result_clusters.append(one_cluster)
    events = result_clusters
    return events
コード例 #27
0
    def __init__(self,
                 width=1024,
                 height=720,
                 background=(255, 255, 255),
                 edifices=None,
                 cars=None,
                 fcs=None,
                 clusters=False):
        pygame.init()
        # pygame.display.set_caption("Escenario de simulación")
        self.width = width
        self.height = height
        self.size = (self.width, self.height)
        self.surface = pygame.Surface(self.size, pygame.SRCALPHA, 32)
        self.background_color = background
        self.screen = pygame.display.set_mode((width, height))
        self.clock = pygame.time.Clock()
        self.ticks = 30
        self.exit = False
        self.screen.fill(self.background_color)

        # MAPA
        self.edifices = edifices
        self.cars = cars
        self.fcs = fcs
        self.fcs.setFemtocellUsers(cars)
        self.fcs.setNeighbors()
        self.cars.setFemtocells(fcs)
        self.cars.setNeighbors()
        self.cars.setSubscribers(n_subscribers=len(fcs), fcs=fcs)
        self.cluster = Cluster(femtocells=self.fcs,
                               users=self.cars,
                               enable=clusters)

        pygame.display.update()
コード例 #28
0
ファイル: core.py プロジェクト: slDias/ek2o4i
def main(input_file_path):
    """
    Allocate users to servers maintaining the lowest cost possible and writes the result to output.txt
    :param str input_file_path: The path to a file containing input parameters
    """

    # Read input file
    with open(input_file_path) as input_file:
        ttask, umax, *new_user_per_tick = [
            int(line) for line in input_file.read().splitlines()
        ]

    tick = 0
    cluster = Cluster()
    output_path = "output.txt"
    if os.path.isfile(output_path):
        os.remove(output_path)
    while tick < len(new_user_per_tick) or cluster.has_server:

        cluster.remove_unused_server()

        # Add new users to servers
        if tick < len(new_user_per_tick):
            for _ in range(new_user_per_tick[tick]):

                new_user = User(ttask)

                least_busy_server = cluster.get_least_busy_server()

                # Create a new server if all servers are busy
                if not least_busy_server:
                    new_server = Server(umax)
                    new_server.add_user(new_user)
                    cluster.add_server(new_server)
                    continue

                least_busy_server.add_user(new_user)

            tick += 1

        # Executes tasks and prints the report
        cluster.execute_server_task()
        with open(output_path, 'a') as output_file:
            output_file.write(cluster.report() + '\n')

    with open(output_path, 'a') as output_file:
        output_file.write(str(cluster.running_cost))
コード例 #29
0
def get_clustered(images: List[np.ndarray],
                  standards: List[np.ndarray]) -> List[Cluster]:
    clusters = [Cluster(standard) for standard in standards]

    for image in images:
        define_image(clusters, image)

    return clusters
コード例 #30
0
 def __init__(self, link, samples):
     self.link = link
     clusters = []
     for sample in samples:
         clusters.append(Cluster(sample.s_id, [sample]))
     self.clusters = clusters
     self.samples = samples
     pass
コード例 #31
0
ファイル: node.py プロジェクト: billbsing/cluster-py
def main():

    parser = argparse.ArgumentParser(
        description='Execute command on all nodes')

    parser.add_argument('-c',
                        '--config',
                        default=DEFAULT_CONFIG_FILENAME,
                        help='config file. Default {DEFAULT_CONFIG_FILENAME}')

    parser.add_argument(
        '--count', help='number of nodes to operate on. Default: All nodes')

    parser.add_argument('command', help='command to run for each node')

    parser.add_argument('params', nargs='*', help='remote command to execute')

    args = parser.parse_args()
    cluster = Cluster()

    if not cluster.load_config(args.config):
        print(f'cannot find config file {args.config}')
        return

    count = 0
    if args.count:
        count = int(args.count)
    node_count = 0
    command = args.command.lower()
    if command not in COMMAND_LIST:
        print(f'unkown command "{args.command}"')

    proc_list = []

    for node in cluster.nodes:
        if node_count >= count and count > 0:
            break
        node_count += 1
        command = args.command.lower()
        proc = Process(target=node_command,
                       args=(cluster, node, command, args.params))
        proc_list.append(proc)
        proc.start()

    for proc in proc_list:
        proc.join()
コード例 #32
0
def merge_clusters(clusters_list, min_intersection_size, OUTPUT_PATH):
	sorted_intersected_tag = get_sorted_intersected_tags(clusters_list, min_intersection_size)
	
	print 'Size before merge: ' + str(len(clusters_list))
	for item in sorted_intersected_tag:
		merged_cluster = None
		for cluster in clusters_list:
			if(set(item[0].split(":")).issubset(set(cluster.stemm_tags))):
				if(merged_cluster == None):
					merged_cluster = cluster
				else:
					print '- merged clusters: ' + str(merged_cluster.ID) + ' and ' + str(cluster.ID)
					merged_cluster.extend_cluster(cluster)
					clusters_list.remove(cluster)
	
	print 'Size after merge: ' + str(len(clusters_list))
	Cluster.serialize_list(clusters_list, OUTPUT_PATH)
コード例 #33
0
ファイル: parsers.py プロジェクト: quboletest/quboletsdb
def setup_parsers():
    config_parser = argparse.ArgumentParser(description="Operate and manage opentsdb on Qubole Data Service.", add_help=False)
    config_parser.add_argument("-c", "--config", help="Path to configuration file", metavar="FILE")
    config_parser.add_argument("-v", "--version", action='version', version=__version__)

    argparser = argparse.ArgumentParser(description="Operate and manage opentsdb on Qubole Data Service.",
                                        parents=[config_parser])
    debug_group = argparser.add_mutually_exclusive_group()
    debug_group.add_argument("-d", "--debug", action="store_true", default=False,
                             help="Turn on debug logging and print to stdout")
    debug_group.add_argument("-x", "--log", dest="log_file",
                             help="Turn on debug logging and print to log file")

    subparsers = argparser.add_subparsers()
    Cluster.setup_parser(subparsers)

    return config_parser, argparser
コード例 #34
0
    def __init__(self, node=None):
        self.cluster = Cluster()
        self.node = node

        self.id = node.id
        self.current_term = 0

        self.vote_for = None  # node.id of the voted candidate
コード例 #35
0
 def delete_intermediate(self):
     for file in self._dir:
         self.to_trash(file)
     if self.switches['MERGE_INPUT'][0]:
         self.to_trash(self.dir.input_seqs)
     Filter(Directory.filter_dir).delete_intermediate()
     Cluster(Directory.cluster_dir).delete_intermediate()
     return
コード例 #36
0
ファイル: main.py プロジェクト: gregoryenriquez/neuro
    def __init__(self, *args, **kwargs):
        braindata = kwargs.get("braindata", None)
        if braindata is None:
            print("Intializing brain...")
            print("Neuron count: {0}".format(args[0]))
            print("Clusters: {0}".format(args[1]))
            self.num_neurons = args[0]
            self.num_clusters = args[1]
            self.max_value = args[2]

            for n in xrange(self.num_clusters):
                x = randrange(0, self.max_value)
                y = randrange(0, self.max_value)
                z = randrange(0, self.max_value)
                self.clusters.insert(len(self.clusters), Cluster(x, y, z, n))

        else:
            nodesfile = braindata + "/neurons.csv"
            clustersfile = braindata + "/clusters.csv"
            edgesfile = braindata + "/edges.csv"

            with open(nodesfile, "r") as nfile:
                self.num_neurons = nfile.readline().strip() # read first line count in
                for line in nfile:
                    line = line.strip()
                    ndata = line.split(", ")
                    self.neurons.append(Neuron(int(ndata[1]), int(ndata[2]), int(ndata[3]), int(ndata[0]), int(ndata[4])))

            with open(clustersfile, "r") as cfile:
                self.num_clusters = cfile.readline().strip() # read first line count in
                for line in cfile:
                    line = line.strip()
                    cdata = line.split(", ")
                    temp_cluster = Cluster(float(cdata[1]), float(cdata[2]), float(cdata[3]), int(cdata[0]))
                    for neuron in self.neurons:
                        # print(neuron.info())
                        if int(cdata[0]) == int(neuron.get_cluster()):
                            temp_cluster.add_neuron(neuron)
                    self.clusters.insert(len(self.clusters), temp_cluster)

            with open(edgesfile, "r") as efile:
                efile.readline().strip() # dump edges count
                for line in efile:
                    line = line.strip()
                    edata = line.split(", ")
                    self.edges.append([int(edata[0]), int(edata[1])])
コード例 #37
0
def get_clusters_from_asgard():
    response = requests.get('{0}/cluster/list.json'.format(base_url))
    if response.ok:
        clusters_json = response.json()
        clusters = [Cluster(cluster_dict) for cluster_dict in clusters_json if cluster_dict[u'cluster']]
        return clusters
    else:
        raise LookupError()
コード例 #38
0
 def __init__(self, json_string):
     model = json.loads(json_string)
     self.pca_model = PCAModel(model['pca model'])
     self.clusters = [Cluster.import_model(cluster) for cluster in
                      model['user clusters']]
     self.joke_clusters = [ItemCluster.import_model(cluster) for cluster in
                           model['joke clusters']]
     self.predictions = model['predictions']
コード例 #39
0
    def __pb_start_click(self):
        try:
            amount_of_points = int(self.ui.le_amount_of_points.text())
        except ValueError:
            amount_of_points = 10000

        try:
            amount_of_classes = int(self.ui.le_amount_of_classes.text())
        except ValueError:
            amount_of_classes = 7

        if not self.point_list:
            self.point_list = self.__generate_points(amount_of_points)

        self.cluster_list = self.__init_clusters(amount_of_classes)
        Cluster.start(amount_of_classes, self.cluster_list, self.point_list)
        self.__draw_cluster_list()
コード例 #40
0
class Optimize():

    def __init__(self, pickup_point_df, car_df, destination_df, k):
        '''

        :param pickup_point_df: Dataframe. column name: lat, lng, index
        :param car_df: Dataframe. column name: lat, lng, index
        :param destination_df: Dataframe. column name: lat, lng, index or id or any unique value
        :param k: k cluster for K means
        '''
        # self.pickup_point_df = pickup_point_df
        self.car_df = car_df
        self.car_df['index'] = -self.car_df['index'] # Change car index, in case it is the same to pickup index.
        self.car_x, self.car_y = self.car_df['lat'].values, self.car_df['lng'].values
        self.car_points = np.array([[i, j] for i,j in zip(self.car_x, self.car_y)])
        self.car_index = self.car_df['index'].values

        self.destination_df = destination_df
        self.end_x, self.end_y = self.destination_df['lat'].values, self.destination_df['lng'].values
        self.end_point = np.array([[i, j] for i,j in zip(self.end_x, self.end_y)])
        self.end_index = self.destination_df['index'].values

        self.x, self.y = pickup_point_df['lat'].values, pickup_point_df['lng'].values
        self.pickup_points = np.array([[i, j] for i,j in zip(self.x, self.y)])
        self.index_pickup = pickup_point_df['index'].tolist()

        self.k = k
        self.cluster = Cluster(k=self.k, route_array=self.pickup_points, index_list=self.index_pickup)
        self.sa = SA()

    def _cluster_info(self):
        centroid_df = pd.DataFrame.from_records(self.cluster.centroid, columns=['lat', 'lng'])  #得到中心点的坐标并存入dataframe
        centroid_df['label'] = self.cluster.predict(self.cluster.centroid) #得到该中心点属于哪个cluster的

        label_points_index = self.cluster.point_in_cluster #得到cluster的label, 及其中的点位置和点的index

        return centroid_df, label_points_index

    @property
    def run(self):

        centroid_df, label_points_index = self._cluster_info()
        print(centroid_df)
        print(self.car_df)
        cluster_order = self.sa.Car(centroid_df, self.car_df)   #分配车,哪个车去哪个cluster
        print(cluster_order)

        result = {}
        for i in range(self.k):

            routes = Merge.merge_start_end_route(np.array([self.car_points[i]]), label_points_index[str(cluster_order[i])]['route_point'], np.array([self.end_point[0]]))
            route_index = Merge.merge_start_end_index(np.array([self.car_index[i]]), label_points_index[str(cluster_order[i])]['route_index'], np.array([self.end_index[0]]))

            path, distance = self.sa.Route(routes, route_index)

            result['Route' + str(i)] = {'Path': path, 'Distance': distance, 'Cluster': cluster_order[i]}

        return result
コード例 #41
0
def experiment(gd, K, lam, reps, gradientReps, improveReps):
    bestll = 0
    seed = 42
    edgeFeatures, edge_set, nEdgeFeatures, nNodes, clusters, NodeIndex = gd.file_process(
    )
    print(NodeIndex, nEdgeFeatures, nNodes, clusters)
    C = Cluster(K,
                reps,
                gradientReps,
                improveReps,
                lam,
                seed,
                edgeFeatures,
                edge_set,
                nEdgeFeatures,
                nNodes,
                clusters,
                whichLoss='SYMMETRICDIFF')
    print('cluster', C.cluster)
    nseeds = 1  # Number of random restarts
    for seed in range(nseeds):
        seed += 1
        C.train()
        ll = C.loglikelihood(C.theta, C.alpha, C.chat)
        print(C.chat)
        if ll > bestll or bestll == 0:
            bestll = ll
            bestClusters = C.chat
            bestTheta = C.theta
            bestAlpha = C.alpha

    file = open('result.txt', 'w')
    print('ll = ', bestll, file=file)
    print('loss_zeroone = ',
          totalLoss(clusters, bestClusters, nNodes, 'ZEROONE'),
          file=file)
    print("loss_symmetric = ",
          totalLoss(clusters, bestClusters, nNodes, 'SYMMETRICDIFF'),
          file=file)
    print("fscore = ",
          1 - totalLoss(clusters, bestClusters, nNodes, 'FSCORE'),
          file=file)
    print('Clusters:\n', bestClusters, file=file)
    print('Theta:\n', bestTheta, file=file)
    print('Alpha:\n', bestAlpha, file=file)
コード例 #42
0
ファイル: mapped_cluster.py プロジェクト: lqsae/insertion
def main():
    args = get_args()
    infile = args.i
    ins = args.ins
    cluster = Cluster(infile, ins)
    if args.flag == 'True':
        out1 = args.out1
        cluster.cluster(out1)
    else:
        read_sequence = args.qs
        dict_read_sequence = get_insetion_sequece(read_sequence)
        out2 = args.out2
        out3 = args.out3
        file = open(out3, 'w+')
        k = args.k
        cluster.plt_cluster(k, out2)
        dict_cluster = cluster.cluster_selected(k)
        for key, value in dict_cluster.items():
            qname, left_sequence, right_sequence = extract_sequence(
                value, dict_read_sequence)
            left = '>' + '_' + str(
                key) + '_' + qname + '_' + 'left' + '\n' + left_sequence + '\n'
            file.write(left)
            right = '>' + '_' + str(
                key
            ) + '_' + qname + '_' + 'right' + '\n' + right_sequence + '\n'
            file.write(right)
コード例 #43
0
ファイル: mylib.py プロジェクト: julienripoche/star_finder
def get_cluster_array(pixels, background, dispersion, threshold=None, my_wcs=None):
    """
    Find the list of clusters in the picture pixels
    according to a threshold defines with background and dispersion
    :param pixels: 2D array corresponding to the picture
    :param background: mean background value
    :param dispersion: dispersion of the background
    :return: list of clusters found in the picture pixels
    """

    # Initiate some variables before looping over all pixels
    if threshold is None:
        threshold = background + 6.0 * dispersion # threshold value
    marks = np.zeros(pixels.shape) # to mark each pixels
    n_row, n_column = pixels.shape
    cluster_array = [] # will contain instances of the class Cluster

    # Loop over pixels and add clusters in cluster_array
    for i in range(n_row):
        for j in range(n_column):
            if marks[i][j] != 1: # if the pixel is not marked
                marks[i][j] = 1 # mark it
                if pixels[i][j] >= threshold: # if luminosity > threshold
                    clust = Cluster()
                    clust.recursive_exploration(i, j, pixels, marks, threshold)
                    if my_wcs != None:
                        clust.find_centroid(my_wcs=my_wcs)
                    else:
                        clust.find_centroid()
                    cluster_array.append(clust)

    # Return the array of clusters
    return cluster_array
コード例 #44
0
def main():
    my_cluster = Cluster()

    log("Setting up namenode HA")
    my_cluster.namenode_ha()

    log("Setting up Metastore HA")
    my_cluster.metastore_ha()

    log("Setting up hiveserver2 HA")
    my_cluster.hiveserver2_ha()

    log("Setting up Resource Manager HA")
    my_cluster.rm_ha()

    log("HA Setup completed successfully.")

    return
コード例 #45
0
ファイル: deploy_pbs.py プロジェクト: It4innovations/quake
def up(workdir, init_cmd, short_names, debug):
    nodes = get_pbs_nodes(short_names)

    workdir = pathlib.Path(workdir).absolute()
    prepare_directory(workdir)

    env = None
    #env["PYTHONPATH"] = f'{ROOT_DIR}:{env.get("PYTHONPATH", "")}'

    cluster = Cluster(str(workdir))
    for node in nodes:
        start_datasrv(cluster, node, workdir, env, init_cmd)
    start_server(cluster, nodes, workdir, env, init_cmd, debug)

    cluster_path = workdir / CLUSTER_FILENAME
    logging.info(f"Writing cluster into {cluster_path}")
    with open(cluster_path, "w") as f:
        cluster.serialize(f)
コード例 #46
0
ファイル: k_means.py プロジェクト: aaasen/wiki-cluster
def _get_clusters(documents, centroids):
    clusters = [Cluster(centroid, []) for centroid in centroids]

    for doc in documents:
        distances = [distance(doc.vector, centroid) for centroid in centroids]
        cluster_index = np.argmin(distances)
        clusters[cluster_index].documents.append(doc)

    return clusters
コード例 #47
0
def pick_and_launch(host, port, cluster_id, start_cluster):
    logging.info('Launching cluster for [ %d ]', cluster_id)
    node = pick_by(host, port)
    if node.assignee is not None:
        raise errors.AppMutexError()
    cluster = Cluster.lock_by_id(cluster_id)
    start_cluster(node.host, node.port)
    node.assignee = cluster
    db.session.add(node)
コード例 #48
0
def clusterisation_via_ligand_size(proteins_list):
    cluster = Cluster()

    for protein in proteins_list:
        if cluster.azole_size_dictionary.get(protein.azole_group) is None:
            cluster.azole_size_dictionary[protein.azole_group] = []
        cluster.azole_size_dictionary[protein.azole_group].append(protein)

    return cluster.azole_size_dictionary
コード例 #49
0
def processNewCluster(jobId, clusterDocs):
    cluster = Cluster(clusterDocs)
    clusterAndJobId = "Cluster id: " + cluster.id + ". Job id: " + jobId
    logger.info("Started processing new cluster. %s.", clusterAndJobId)

    clusterManager = ClusterManager()
    clusterManager.processNewCluster(cluster)

    logger.info("Completed processing new cluster. %s.", clusterAndJobId)
コード例 #50
0
def deploy_support(cluster_name, cert_manager_version):
    """Deploy support components to a cluster

    Args:
        cluster_name (str): The name of the cluster to deploy support components to
        cert_manager_version (str): The version of cert-manager to deploy to the
            cluster, in the form vX.Y.Z. where X.Y.Z is valid SemVer.
    """
    validate_cluster_config(cluster_name)
    validate_support_config(cluster_name)

    config_file_path = find_absolute_path_to_cluster_file(cluster_name)
    with open(config_file_path) as f:
        cluster = Cluster(yaml.load(f), config_file_path.parent)

    if cluster.support:
        with cluster.auth():
            cluster.deploy_support(cert_manager_version=cert_manager_version)
コード例 #51
0
ファイル: node.py プロジェクト: Bluelich/redis-ctl
def pick_and_launch(host, port, cluster_id, start_cluster):
    logging.info('Launching cluster for [ %d ]', cluster_id)
    node = pick_by(host, port)
    if node.assignee is not None:
        raise errors.AppMutexError()
    cluster = Cluster.lock_by_id(cluster_id)
    start_cluster(node.host, node.port)
    node.assignee = cluster
    db.session.add(node)
コード例 #52
0
    def vcenterdatacenter_get_clusters(self, label, vcenter, tenantname, xml=False):
        '''
        Makes a REST API call to retrieve details of a vcenterdatacenter  based on its UUID
        '''
        uri = self.vcenterdatacenter_query(label, vcenter, tenantname)

        (s, h) = common.service_json_request(self.__ipAddr, self.__port, "GET", 
                                             VcenterDatacenter.URI_DATACENTER_CLUSTERS.format(uri),
                                             None, None, xml)

        o = common.json_decode(s)

        from cluster import Cluster
        obj = Cluster(self.__ipAddr, self.__port)

        dtlslst = obj.cluster_get_details_list(o['cluster'])

	return dtlslst
コード例 #53
0
class Home(Stack):
    def __init__(self, id=ID, size=4, instance_type='m5.large'):
        self.id = id
        with Session() as session:
            # Get stack
            if self.id in session:
                self.stack = session[self.id]
            else:
                self.stack = {}
                session[self.id] = self.stack
            # Get Store
            if 'store' in self.stack:
                self.store = self.stack['store']
            else:
                self.store = Store(id=self.id)
                self.stack['store'] = self.store
            # Get Cluster
            if 'cluster' in self.stack:
                self.cluster = self.stack['cluster']
            else:
                self.cluster = Cluster(id=self.id,
                                       size=size,
                                       region_name='eu-west-1',
                                       instance_type=instance_type,
                                       instance_role=HOME_INSTANCE_ROLE)
                self.stack['cluster'] = self.cluster

    def create(self):
        self.store.create()
        self.cluster.create()
        return self

    def terminate(self):
        # We clean the bucket
        # self.store.bucket.Object('.s3keep')
        # We may want to delete bucket
        # self.store.terminate()
        self.cluster.terminate()
        with Session() as session:
            del session[self.id]
            self.stack = None
            # self.store = None
            self.cluster = None
        return self
コード例 #54
0
    def dbscan(self, data):
        self.init_params()
        self.data = data

        ## Setting up the plot
        # fig = plt.figure()

        axis_proj = 'rectilinear'
        if self.dim > 2:
            axis_proj = '%dd' % self.dim

        # ax = fig.add_subplot(111, projection = axis_proj)

        # default noise cluster
        noise = Cluster('Noise', self.dim)
        self.clusters.add(noise)

        for point in data:
            if point not in self.visited:
                self.visited.append(point)
                neighbour_pts = self.region_query(point)
                if len(neighbour_pts) < self.min_pts:
                    noise.add_point(point)
                else:
                    name = 'cluster-%d' % self.cluster_count
                    new_cluster = Cluster(name, self.dim)

                    self.cluster_count += 1
                    self.expand_cluster(new_cluster, point, neighbour_pts)

                    # if self.dim == 2:
                    #     ax.scatter(new_cluster.get_X(), new_cluster.get_Y(), c = self.color[self.cluster_count % len(self.color)],
                    #     marker = 'o', label = name)
                    # elif self.dim == 3:
                    #     ax.scatter(new_cluster.get_X(), new_cluster.get_Y(), new_cluster.get_Z(), marker = 'o',
                    #     c = self.color[self.cluster_count % len(self.color)], label = name)

        # if len(noise.get_points()) != 0:
        #     if self.dim > 2:
        #         ax.scatter(noise.get_X(), noise.get_Y(), noise.get_Z(), marker = 'x', label = noise.name)
        #     else:
        #         ax.scatter(noise.get_X(), noise.get_Y(), marker = 'x', label = noise.name)

        print("Number of clusters found: %d" % self.cluster_count)
コード例 #55
0
ファイル: __init__.py プロジェクト: larsks/lvc
def main():
    opts, args = parse_args()

    if opts.headers:
        defaults['headers'] = 'true'

    config = ConfigParser(defaults)
    config.read(opts.config)

    if not config.has_section('cluster'):
        config.add_section('cluster')

    cluster = Cluster(config)

    if not args:
        args = [ 'list' ]

    cmd = args.pop(0)
    sys.exit(cluster.dispatch(cmd, args))
コード例 #56
0
    def getProcessedCluster(self, cluster):
        """
    If the cluster has been previously processed and cached, returns cached result.
    Otherwise, processes if freshly and returns the result after saving to cache
    """

        clusterId = cluster.id
        try:
            k = Key(self.__getBucket())
            k.key = clusterId
            keyContents = k.get_contents_as_string()
            logging.info("Preprocessed cluster found for: " + clusterId)

            preProcessedCluster = Cluster([])
            preProcessedCluster.deserializeFromString(keyContents)
            return preProcessedCluster
        except S3ResponseError:
            logging.info("Preprocessed cluster not found for: " + clusterId)
            return self.processAndSaveCluster(cluster)
コード例 #57
0
ファイル: main.py プロジェクト: iamkeyur/task-identification
def do_exp(path, start=0):
    with open(path, "r", encoding="utf-8") as log:
        time_slice = log.read().split("#\n")
        # print(time_slice)
        sequence = [s[:-1].split("\n") for s in time_slice]
        # print(sequence)

    cluster = []
    lexical = Lexical()
    semantic = Semantic("/Users/susen/Projects/cs290n/intermediate/index.txt")
    offset = start

    for s in sequence:
        c = Cluster(s, lexical, semantic, start=offset, threshold=0.2)
        c.wcc()
        cluster += c.generate_cluster(c.result, time_only=False)
        offset += len(s)

    return cluster
コード例 #58
0
ファイル: common.py プロジェクト: jsevellec/ccm
def load_current_cluster(path):
    name = current_cluster_name(path)
    if name is None:
        print 'No currently active cluster (use ccm cluster switch)'
        exit(1)
    try:
        return Cluster.load(path, name)
    except common.LoadError as e:
        print str(e)
        exit(1)
コード例 #59
0
ファイル: node.py プロジェクト: Bluelich/redis-ctl
def pick_and_replicate(master_host, master_port, slave_host, slave_port,
                       replicate_node):
    master_node = pick_by(master_host, master_port)
    if master_node.assignee_id is None:
        raise ValueError('node not in cluster')
    cluster = Cluster.lock_by_id(master_node.assignee_id)
    slave_node = pick_by(slave_host, slave_port)
    replicate_node(master_host, master_port, slave_host, slave_port)
    slave_node.assignee = cluster
    db.session.add(slave_node)