예제 #1
0
파일: manager.py 프로젝트: ema/conpaas
    def _start_osd(self, nodes, cloud=None):
        for idx, node in enumerate(nodes):
            osd_uuid = self.__get__uuid(node.id, 'osd')

            volume_associated = osd_uuid in self.osd_uuid_volume_map

            # We need a storage volume for each OSD node. Check if this OSD
            # node needs a new volume to be created.
            if volume_associated:
                # No need to create a new volume.
                volume = self.get_volume(self.osd_uuid_volume_map[osd_uuid])

                self.logger.debug(
                    '%s already has an associated storage volume (%s)' %
                        (osd_uuid, volume.id))
            else:
                # We need to create a new volume.
                volume_name = "osd-%s" % osd_uuid
                volume = self.create_volume(self.osd_volume_size, volume_name,
                        node.id, cloud)
                self.osd_uuid_volume_map[osd_uuid] = volume.id

            try:
                self.attach_volume(volume.id, node.id, "sdb")
            except Exception, err:
                self.logger.error("attach_volume: %s" % err)

            try:
                client.createOSD(node.ip, 5555, self.dirNodes[0].ip, osd_uuid,
                        mkfs=not volume_associated)
            except client.AgentException:
                self.logger.exception('Failed to start OSD at node %s' % node)
                self.state = self.S_ERROR
                raise
예제 #2
0
 def _start_osd(self, nodes):
     for node in nodes:
         try:
             client.createOSD(node.ip, 5555, self.dirNodes[0].ip)
         except client.AgentException:
             self.logger.exception('Failed to start OSD at node %s' % node)
             self.state = self.S_ERROR
             raise
예제 #3
0
 def _start_osd(self, nodes):
     for node in nodes:
         try:
             client.createOSD(node.ip, 5555, self.dirNodes[0].ip)
         except client.AgentException:
             self.logger.exception('Failed to start OSD at node %s' % node)
             self.state = self.S_ERROR
             raise
예제 #4
0
    def _do_add_nodes(self, nr_dir, nr_mrc, nr_osd, cloud):
        startCloud = self._init_cloud(cloud)
        totalNodes = nr_dir + nr_mrc + nr_osd

        # try to create totalNodes new nodes
        try:
            node_instances = self.controller.create_nodes(totalNodes, 
                client.check_agent_process, 5555, startCloud)      
        except:
            self.logger.exception('_do_add_nodes: Failed to request a new node')
            self.state = self.S_STOPPED
            return

        self.nodes += node_instances 

        dirNodesAdded = node_instances[:nr_dir]
        self.dirNodes += dirNodesAdded

        mrcNodesAdded = node_instances[nr_dir:nr_mrc+nr_dir]
        self.mrcNodes += mrcNodesAdded

        osdNodesAdded = node_instances[nr_mrc+nr_dir:] 
        self.osdNodes += osdNodesAdded


        # TODO: maybe re-enable when OSD-removal moves data to another node before shutting down the service.
        #KilledOsdNodes = []
        # The first node will contain the OSD service so it will be removed
        # from there
        #if nr_osd > 0 and self.osdCount == 0:
        #    KilledOsdNodes.append(self.dirNodes[0])
        #self.KillOsd(KilledOsdNodes)
          
        # Startup DIR agents
        for node in dirNodesAdded:
            client.startup(node.ip, 5555)
            data = client.createDIR(node.ip, 5555)
            self.logger.info('Received %s from %s', data, node.id)
            self.dirCount += 1

        # Startup MRC agents
        for node in mrcNodesAdded:
            client.startup(node.ip, 5555)
            data = client.createMRC(node.ip, 5555, self.dirNodes[0].ip)
            self.logger.info('Received %s from %s', data, node.id)
            self.mrcCount += 1

        # Startup OSD agents
        for node in osdNodesAdded:
            client.startup(node.ip, 5555)
            data = client.createOSD(node.ip, 5555, self.dirNodes[0].ip)
            self.logger.info('Received %s from %s', data, node.id)         
            self.osdCount += 1

        self.state = self.S_RUNNING
        return HttpJsonResponse()
예제 #5
0
    def _do_add_nodes(self, nr_dir, nr_mrc, nr_osd, cloud):
        startCloud = self._init_cloud(cloud)
        totalNodes = nr_dir + nr_mrc + nr_osd

        # try to create totalNodes new nodes
        try:
            node_instances = self.controller.create_nodes(
                totalNodes, client.check_agent_process, 5555, startCloud)
        except:
            self.logger.exception(
                '_do_add_nodes: Failed to request a new node')
            self.state = self.S_STOPPED
            return

        self.nodes += node_instances

        dirNodesAdded = node_instances[:nr_dir]
        self.dirNodes += dirNodesAdded

        mrcNodesAdded = node_instances[nr_dir:nr_mrc + nr_dir]
        self.mrcNodes += mrcNodesAdded

        osdNodesAdded = node_instances[nr_mrc + nr_dir:]
        self.osdNodes += osdNodesAdded

        # TODO: maybe re-enable when OSD-removal moves data to another node before shutting down the service.
        #KilledOsdNodes = []
        # The first node will contain the OSD service so it will be removed
        # from there
        #if nr_osd > 0 and self.osdCount == 0:
        #    KilledOsdNodes.append(self.dirNodes[0])
        #self.KillOsd(KilledOsdNodes)

        # Startup DIR agents
        for node in dirNodesAdded:
            client.startup(node.ip, 5555)
            data = client.createDIR(node.ip, 5555)
            self.logger.info('Received %s from %s', data, node.id)
            self.dirCount += 1

        # Startup MRC agents
        for node in mrcNodesAdded:
            client.startup(node.ip, 5555)
            data = client.createMRC(node.ip, 5555, self.dirNodes[0].ip)
            self.logger.info('Received %s from %s', data, node.id)
            self.mrcCount += 1

        # Startup OSD agents
        for node in osdNodesAdded:
            client.startup(node.ip, 5555)
            data = client.createOSD(node.ip, 5555, self.dirNodes[0].ip)
            self.logger.info('Received %s from %s', data, node.id)
            self.osdCount += 1

        self.state = self.S_RUNNING
        return HttpJsonResponse()
예제 #6
0
파일: manager.py 프로젝트: ema/conpaas
 def createOSD(self, kwargs):
     if self.state != self.S_RUNNING:
         return HttpErrorResponse('ERROR: Wrong state to create OSD service')
     # Just createOSD from all the agents
     for node in self.nodes:
         data = client.createOSD(node.ip, 5555, self.dirNodes[0].ip)
         self.logger.info('Received %s from %s', data, node.id)
     return HttpJsonResponse({
         'xtreemfs': [ node.id for node in self.nodes ],
         }) 
예제 #7
0
 def createOSD(self, kwargs):
     if self.state != self.S_RUNNING:
         return HttpErrorResponse(
             'ERROR: Wrong state to create OSD service')
     # Just createOSD from all the agents
     for node in self.nodes:
         data = client.createOSD(node.ip, 5555, self.dirNodes[0].ip)
         self.logger.info('Received %s from %s', data, node.id)
     return HttpJsonResponse({
         'xtreemfs': [node.id for node in self.nodes],
     })
예제 #8
0
    def _start_osd(self, nodes, cloud=None):
        for idx, node in enumerate(nodes):
            osd_uuid = self.__get__uuid(node.id, 'osd')

            volume_associated = osd_uuid in self.osd_uuid_volume_map

            # We need a storage volume for each OSD node. Check if this OSD
            # node needs a new volume to be created.
            if volume_associated:
                # No need to create a new volume.
                volume = self.get_volume(self.osd_uuid_volume_map[osd_uuid])

                self.logger.debug(
                    '%s already has an associated storage volume (%s)' %
                    (osd_uuid, volume.id))
            else:
                # We need to create a new volume.
                volume_name = "osd-%s" % osd_uuid
                volume = self.create_volume(self.osd_volume_size, volume_name,
                                            node.id, cloud)
                self.osd_uuid_volume_map[osd_uuid] = volume.id

            try:
                self.attach_volume(volume.id, node.id, "sdb")
            except Exception, err:
                self.logger.error("attach_volume: %s" % err)

            try:
                client.createOSD(node.ip,
                                 5555,
                                 self.dirNodes[0].ip,
                                 osd_uuid,
                                 mkfs=not volume_associated)
            except client.AgentException:
                self.logger.exception('Failed to start OSD at node %s' % node)
                self.state = self.S_ERROR
                raise