示例#1
0
    def _parseConfigFile(config_file):
        """
        Parse the config file
        :param config_file:
        :return: List of dictionaries with each dictionary containing the failed and failover information??
        """
        rows = []
        with open(config_file) as f:
            for lineno, line in line_reader(f):

                groups = line.split()  # NOT line.split(' ') due to MPP-15675
                if len(groups) not in [1, 2]:
                    msg = "line %d of file %s: expected 1 or 2 groups but found %d" % (
                        lineno, config_file, len(groups))
                    raise ExceptionNoStackTraceNeeded(msg)
                parts = groups[0].split('|')
                if len(parts) != 3:
                    msg = "line %d of file %s: expected 3 parts on failed segment group, obtained %d" % (
                        lineno, config_file, len(parts))
                    raise ExceptionNoStackTraceNeeded(msg)
                address, port, datadir = parts
                check_values(lineno,
                             address=address,
                             port=port,
                             datadir=datadir)
                datadir = normalizeAndValidateInputPath(
                    datadir, f.name, lineno)

                row = {
                    'failedAddress': address,
                    'failedPort': port,
                    'failedDataDirectory': datadir,
                    'lineno': lineno
                }
                if len(groups) == 2:
                    parts2 = groups[1].split('|')
                    if len(parts2) != 3:
                        msg = "line %d of file %s: expected 3 parts on new segment group, obtained %d" % (
                            lineno, config_file, len(parts2))
                        raise ExceptionNoStackTraceNeeded(msg)
                    address2, port2, datadir2 = parts2
                    check_values(lineno,
                                 address=address2,
                                 port=port2,
                                 datadir=datadir2)
                    datadir2 = normalizeAndValidateInputPath(
                        datadir2, f.name, lineno)

                    row.update({
                        'newAddress': address2,
                        'newPort': port2,
                        'newDataDirectory': datadir2
                    })

                rows.append(row)

        RecoveryTripletsUserConfigFile._validate(rows)

        return rows
示例#2
0
    def __getDataDirectoriesForMirrors(self, maxPrimariesPerHost, gpArray):
        dirs = []

        configFile = self.__options.mirrorDataDirConfigFile
        if configFile is not None:

            #
            # load from config file
            #
            lines = readAllLinesFromFile(configFile,
                                         stripLines=True,
                                         skipEmptyLines=True)

            labelOfPathsBeingRead = "data"
            index = 0
            for line in lines:
                if index == maxPrimariesPerHost:
                    raise Exception('Number of %s directories must equal %d but more were read from %s' % \
                                    (labelOfPathsBeingRead, maxPrimariesPerHost, configFile))

                path = normalizeAndValidateInputPath(line, "config file")
                dirs.append(path)
                index += 1
            if index < maxPrimariesPerHost:
                raise Exception('Number of %s directories must equal %d but %d were read from %s' % \
                                (labelOfPathsBeingRead, maxPrimariesPerHost, index, configFile))
        else:

            #
            # get from stdin
            #
            while len(dirs) < maxPrimariesPerHost:
                print(
                    'Enter mirror segment data directory location %d of %d >' %
                    (len(dirs) + 1, maxPrimariesPerHost))
                line = input().strip()
                if len(line) > 0:
                    try:
                        dirs.append(normalizeAndValidateInputPath(line))
                    except PathNormalizationException as e:
                        print("\n%s\n" % e)

        return dirs
示例#3
0
    def __getMirrorsToBuildFromConfigFile(self, gpArray):

        # create fileData object from config file
        #
        filename = self.__options.mirrorConfigFile
        rows = []
        with open(filename) as f:
            for lineno, line in line_reader(f):
                fixed, flexible = parse_gpaddmirrors_line(filename, lineno, line)
                rows.append(ParsedConfigFileRow(fixed, flexible, line))
        fileData = ParsedConfigFile([], rows)

        # validate fileData
        #
        allAddresses = [row.getFixedValuesMap()["address"] for row in fileData.getRows()]
        allNoneArr = [None for a in allAddresses]
        interfaceLookup = GpInterfaceToHostNameCache(self.__pool, allAddresses, allNoneArr)

        #
        # build up the output now
        #
        toBuild = []
        primaries = [seg for seg in gpArray.getDbList() if seg.isSegmentPrimary(current_role=False)]
        segsByContentId = GpArray.getSegmentsByContentId(primaries)

        # note: passed port offset in this call should not matter
        calc = GpMirrorBuildCalculator(gpArray, [], self.__options)

        for row in fileData.getRows():
            fixedValues = row.getFixedValuesMap()
            flexibleValues = row.getFlexibleValuesMap()

            contentId = int(fixedValues['contentId'])
            address = fixedValues['address']
            #
            # read the rest and add the mirror
            #
            port = int(fixedValues['port'])
            dataDir = normalizeAndValidateInputPath(fixedValues['dataDirectory'], "in config file", row.getLine())
            hostName = interfaceLookup.getHostName(address)
            if hostName is None:
                raise Exception("Segment Host Address %s is unreachable" % address)

            primary = segsByContentId[contentId]
            if primary is None:
                raise Exception("Invalid content %d specified in input file" % contentId)
            primary = primary[0]

            calc.addMirror(toBuild, primary, hostName, address, port, dataDir)

        if len(toBuild) != len(primaries):
            raise Exception("Wrong number of mirrors specified (specified %s mirror(s) for %s primarie(s))" % \
                            (len(toBuild), len(primaries)))

        return GpMirrorListToBuild(toBuild, self.__pool, self.__options.quiet, self.__options.parallelDegree)
示例#4
0
    def __getDataDirectoriesForMirrors(self, maxPrimariesPerHost, gpArray):
        dirs = []

        configFile = self.__options.mirrorDataDirConfigFile
        if configFile is not None:

            #
            # load from config file
            #
            lines = readAllLinesFromFile(configFile, stripLines=True, skipEmptyLines=True)

            labelOfPathsBeingRead = "data"
            index = 0
            for line in lines:
                if index == maxPrimariesPerHost:
                    raise Exception('Number of %s directories must equal %d but more were read from %s' % \
                                    (labelOfPathsBeingRead, maxPrimariesPerHost, configFile))

                path = normalizeAndValidateInputPath(line, "config file")
                dirs.append(path)
                index += 1
            if index < maxPrimariesPerHost:
                raise Exception('Number of %s directories must equal %d but %d were read from %s' % \
                                (labelOfPathsBeingRead, maxPrimariesPerHost, index, configFile))
        else:

            #
            # get from stdin
            #
            while len(dirs) < maxPrimariesPerHost:
                print 'Enter mirror segment data directory location %d of %d >' % (len(dirs) + 1, maxPrimariesPerHost)
                line = raw_input().strip()
                if len(line) > 0:
                    try:
                        dirs.append(normalizeAndValidateInputPath(line))
                    except PathNormalizationException, e:
                        print "\n%s\n" % e
示例#5
0
    def __getMirrorsToBuildFromConfigFile(self, gpArray):
        filename = self.__options.mirrorConfigFile
        rows = []
        with open(filename) as f:
            for lineno, line in line_reader(f):
                rows.append(self._getParsedRow(filename, lineno, line))

        allAddresses = [row["address"] for row in rows]
        interfaceLookup = GpInterfaceToHostNameCache(
            self.__pool, allAddresses, [None] * len(allAddresses))

        #
        # build up the output now
        #
        toBuild = []
        primaries = [
            seg for seg in gpArray.getDbList()
            if seg.isSegmentPrimary(current_role=False)
        ]
        segsByContentId = GpArray.getSegmentsByContentId(primaries)

        # note: passed port offset in this call should not matter
        calc = GpMirrorBuildCalculator(gpArray, [], self.__options)

        for row in rows:
            contentId = int(row['contentId'])
            address = row['address']
            dataDir = normalizeAndValidateInputPath(row['dataDirectory'],
                                                    "in config file",
                                                    row['lineno'])
            hostName = interfaceLookup.getHostName(address)
            if hostName is None:
                raise Exception("Segment Host Address %s is unreachable" %
                                address)

            primary = segsByContentId[contentId]
            if primary is None:
                raise Exception("Invalid content %d specified in input file" %
                                contentId)

            calc.addMirror(toBuild, primary[0], hostName, address,
                           int(row['port']), dataDir)

        if len(toBuild) != len(primaries):
            raise Exception("Wrong number of mirrors specified (specified %s mirror(s) for %s primarie(s))" % \
                            (len(toBuild), len(primaries)))

        return GpMirrorListToBuild(toBuild, self.__pool, self.__options.quiet,
                                   self.__options.parallelDegree)
示例#6
0
    def __getMirrorsToBuildFromConfigFile(self, gpArray):
        filename = self.__options.mirrorConfigFile
        rows = []
        with open(filename) as f:
            for lineno, line in line_reader(f):
                rows.append(self._getParsedRow(filename, lineno, line))

        allAddresses = [row["address"] for row in rows]
        #
        # build up the output now
        #
        toBuild = []
        primaries = [
            seg for seg in gpArray.getDbList()
            if seg.isSegmentPrimary(current_role=False)
        ]
        segsByContentId = GpArray.getSegmentsByContentId(primaries)

        # note: passed port offset in this call should not matter
        calc = GpMirrorBuildCalculator(gpArray, [], self.__options)

        for row in rows:
            contentId = int(row['contentId'])
            address = row['address']
            dataDir = normalizeAndValidateInputPath(row['dataDirectory'],
                                                    "in config file",
                                                    row['lineno'])
            # FIXME: hostname probably should not be address, but to do so, "hostname" should be added to gpaddmirrors config file
            hostName = address

            primary = segsByContentId[contentId]
            if primary is None:
                raise Exception("Invalid content %d specified in input file" %
                                contentId)

            calc.addMirror(toBuild, primary[0], hostName, address,
                           int(row['port']), dataDir)

        if len(toBuild) != len(primaries):
            raise Exception("Wrong number of mirrors specified (specified %s mirror(s) for %s primarie(s))" % \
                            (len(toBuild), len(primaries)))

        return GpMirrorListToBuild(
            toBuild,
            self.__pool,
            self.__options.quiet,
            self.__options.batch_size,
            parallelPerHost=self.__options.segment_batch_size)
示例#7
0
    def __getDataDirectoriesForMirrors(self, maxPrimariesPerHost, gpArray):
        dirs = []
        filespaceOidToPathMaps = []
        while len(filespaceOidToPathMaps) < maxPrimariesPerHost:
            filespaceOidToPathMaps.append({})

        filespaceNameToOid = {}
        for fs in gpArray.getFilespaces(False):
            filespaceNameToOid[fs.getName()] = fs.getOid()

        configFile = self.__options.mirrorDataDirConfigFile
        if configFile is not None:

            #
            # load from config file
            #
            lines = readAllLinesFromFile(configFile, stripLines=True, skipEmptyLines=True)

            labelOfPathsBeingRead = "data"
            index = 0
            fsOid = gparray.SYSTEM_FILESPACE
            enteredFilespaces = {}
            for line in lines:
                if line.startswith("filespace "):
                    if index < maxPrimariesPerHost:
                        raise Exception('Number of %s directories must equal %d but %d were read from %s' % \
                            (labelOfPathsBeingRead, maxPrimariesPerHost, index, configFile))

                    fsName = line[len("filespace "):].strip()
                    labelOfPathsBeingRead = fsName

                    if fsName not in filespaceNameToOid:
                        raise Exception("Unknown filespace %s specified in input file %s" % \
                            (fsName, configFile))
                    fsOid = filespaceNameToOid[fsName]
                    
                    if gpArray.isFileSpaceShared(fsOid):
                        raise Exception("Shared filespace %s does not need to specify in input file %s" % \
                            (fsName, configFile))

                    if fsName in enteredFilespaces:
                        raise Exception( "Filespace %s specified twice in input file %s" % \
                            (fsName, configFile))
                    enteredFilespaces[fsName] = True

                    index = 0
                else:
                    if index == maxPrimariesPerHost:
                        raise Exception('Number of %s directories must equal %d but more were read from %s' % \
                            (labelOfPathsBeingRead, maxPrimariesPerHost, configFile))

                    path = normalizeAndValidateInputPath(line, "config file")
                    if fsOid == gparray.SYSTEM_FILESPACE:
                        dirs.append(path)
                    else:
                        filespaceOidToPathMaps[index][fsOid] = path
                    index += 1
            if index < maxPrimariesPerHost:
                raise Exception('Number of %s directories must equal %d but %d were read from %s' % \
                    (labelOfPathsBeingRead, maxPrimariesPerHost, index, configFile))

            # The shared filespace path will set later.
            for (fsName, fsOid) in filespaceNameToOid.iteritems():
                if fsName in enteredFilespaces:
                    continue
                if not gpArray.isFileSpaceShared(fsOid):
                    break
                for index in range(maxPrimariesPerHost):
                    filespaceOidToPathMaps[index][fsOid] = None
                enteredFilespaces[fsName] = True

            if len(enteredFilespaces) != len(filespaceNameToOid):
                raise Exception("Only read directories for %d of %d filespaces from %s" % \
                    (len(enteredFilespaces), len(filespaceNameToOid), configFile))
        else:

            #
            # get from stdin
            #
            while len(dirs) < maxPrimariesPerHost:
                print 'Enter mirror segment data directory location %d of %d >' % (len(dirs)+1, maxPrimariesPerHost)
                line = sys.stdin.readline().strip()
                if len(line) > 0:
                    try:
                        dirs.append(normalizeAndValidateInputPath(line))
                    except PathNormalizationException, e:
                        print "\n%s\n" % e


            for fs in gpArray.getFilespaces(False):
                # Shared storage path can only be set when we know the
                # relationship between the primary and mirror.
                fsys = fs.getFsys()
                delay_set = False
                if fsys != None and fsys.isShared():
                    delay_set = True
                    print 'Skip filespace "%s" stored on the shared filesystem "%s"' % (fs.getName(), fsys.getName())

                index = 0
                while index < maxPrimariesPerHost:
                    if delay_set:
                        filespaceOidToPathMaps[index][fs.getOid()] = None
                        index += 1
                        continue

                    print "Enter mirror filespace '%s' directory location %d of %d >" % \
                                (fs.getName(), index+1, maxPrimariesPerHost)
                    line = sys.stdin.readline().strip()
                    if len(line) > 0:
                        try:
                            filespaceOidToPathMaps[index][fs.getOid()] = normalizeAndValidateInputPath(line)
                            index += 1
                        except PathNormalizationException, e:
                            print "\n%s\n" % e
    def getRecoveryActionsFromConfigFile(self, gpArray):
        """
        getRecoveryActionsFromConfigFile

        returns: a tuple (segments in change tracking disabled mode which are unable to recover, GpMirrorListToBuild object
                 containing information of segments which are able to recover)
        """

        # create fileData object from config file
        #
        filename = self.__options.recoveryConfigFile
        rows = []
        with open(filename) as f:
            for lineno, line in line_reader(f):
                fixed, flexible = parse_gprecoverseg_line(
                    filename, lineno, line)
                rows.append(ParsedConfigFileRow(fixed, flexible, line))
        fileData = ParsedConfigFile(rows)

        allAddresses = [
            row.getFixedValuesMap()["newAddress"]
            for row in fileData.getRows()
            if "newAddress" in row.getFixedValuesMap()
        ]
        allNoneArr = [None] * len(allAddresses)
        interfaceLookup = GpInterfaceToHostNameCache(self.__pool, allAddresses,
                                                     allNoneArr)

        failedSegments = []
        failoverSegments = []
        for row in fileData.getRows():
            fixedValues = row.getFixedValuesMap()
            flexibleValues = row.getFlexibleValuesMap()

            # find the failed segment
            failedAddress = fixedValues['failedAddress']
            failedPort = fixedValues['failedPort']
            failedDataDirectory = normalizeAndValidateInputPath(
                fixedValues['failedDataDirectory'], "config file",
                row.getLine())
            failedSegment = None
            for segment in gpArray.getDbList():
                if segment.getSegmentAddress() == failedAddress and \
                                str(segment.getSegmentPort()) == failedPort and \
                                segment.getSegmentDataDirectory() == failedDataDirectory:

                    if failedSegment is not None:
                        #
                        # this could be an assertion -- configuration should not allow multiple entries!
                        #
                        raise Exception((
                            "A segment to recover was found twice in configuration.  "
                            "This segment is described by address:port:directory '%s:%s:%s' "
                            "on the input line: %s") %
                                        (failedAddress, failedPort,
                                         failedDataDirectory, row.getLine()))
                    failedSegment = segment

            if failedSegment is None:
                raise Exception("A segment to recover was not found in configuration.  " \
                                "This segment is described by address:port:directory '%s:%s:%s' on the input line: %s" %
                                (failedAddress, failedPort, failedDataDirectory, row.getLine()))

            failoverSegment = None
            if "newAddress" in fixedValues:
                """
                When the second set was passed, the caller is going to tell us to where we need to failover, so
                  build a failover segment
                """
                # these two lines make it so that failoverSegment points to the object that is registered in gparray
                failoverSegment = failedSegment
                failedSegment = failoverSegment.copy()

                address = fixedValues["newAddress"]
                try:
                    port = int(fixedValues["newPort"])
                except ValueError:
                    raise Exception(
                        'Config file format error, invalid number value in line: %s'
                        % (row.getLine()))

                dataDirectory = normalizeAndValidateInputPath(
                    fixedValues["newDataDirectory"], "config file",
                    row.getLine())

                hostName = interfaceLookup.getHostName(address)
                if hostName is None:
                    raise Exception(
                        'Unable to find host name for address %s from line:%s'
                        % (address, row.getLine()))

                # now update values in failover segment
                failoverSegment.setSegmentAddress(address)
                failoverSegment.setSegmentHostName(hostName)
                failoverSegment.setSegmentPort(port)
                failoverSegment.setSegmentDataDirectory(dataDirectory)

            # this must come AFTER the if check above because failedSegment can be adjusted to
            #   point to a different object
            failedSegments.append(failedSegment)
            failoverSegments.append(failoverSegment)

        peersForFailedSegments = self.findAndValidatePeersForFailedSegments(
            gpArray, failedSegments)

        segs = []
        segs_with_persistent_mirroring_disabled = []
        for index, failedSegment in enumerate(failedSegments):
            peerForFailedSegment = peersForFailedSegments[index]

            peerForFailedSegmentDbId = peerForFailedSegment.getSegmentDbId()
            segs.append(
                GpMirrorToBuild(failedSegment, peerForFailedSegment,
                                failoverSegments[index],
                                self.__options.forceFullResynchronization))

        self._output_segments_with_persistent_mirroring_disabled(
            segs_with_persistent_mirroring_disabled)

        return GpMirrorListToBuild(segs, self.__pool, self.__options.quiet,
                                   self.__options.parallelDegree)
示例#9
0
    def __getDataDirectoriesForMirrors(self, maxPrimariesPerHost, gpArray):
        dirs = []
        filespaceOidToPathMaps = []
        while len(filespaceOidToPathMaps) < maxPrimariesPerHost:
            filespaceOidToPathMaps.append({})

        filespaceNameToOid = {}
        for fs in gpArray.getFilespaces(False):
            filespaceNameToOid[fs.getName()] = fs.getOid()

        configFile = self.__options.mirrorDataDirConfigFile
        if configFile is not None:

            #
            # load from config file
            #
            lines = readAllLinesFromFile(configFile,
                                         stripLines=True,
                                         skipEmptyLines=True)

            labelOfPathsBeingRead = "data"
            index = 0
            fsOid = gparray.SYSTEM_FILESPACE
            enteredFilespaces = {}
            for line in lines:
                if line.startswith("filespace "):
                    if index < maxPrimariesPerHost:
                        raise Exception('Number of %s directories must equal %d but %d were read from %s' % \
                            (labelOfPathsBeingRead, maxPrimariesPerHost, index, configFile))

                    fsName = line[len("filespace "):].strip()
                    labelOfPathsBeingRead = fsName

                    if fsName not in filespaceNameToOid:
                        raise Exception("Unknown filespace %s specified in input file %s" % \
                            (fsName, configFile))
                    fsOid = filespaceNameToOid[fsName]

                    if fsName in enteredFilespaces:
                        raise Exception( "Filespace %s specified twice in input file %s" % \
                            (fsName, configFile))
                    enteredFilespaces[fsName] = True

                    index = 0
                else:
                    if index == maxPrimariesPerHost:
                        raise Exception('Number of %s directories must equal %d but more were read from %s' % \
                            (labelOfPathsBeingRead, maxPrimariesPerHost, configFile))

                    path = normalizeAndValidateInputPath(line, "config file")
                    if fsOid == gparray.SYSTEM_FILESPACE:
                        dirs.append(path)
                    else:
                        filespaceOidToPathMaps[index][fsOid] = path
                    index += 1
            if index < maxPrimariesPerHost:
                raise Exception('Number of %s directories must equal %d but %d were read from %s' % \
                    (labelOfPathsBeingRead, maxPrimariesPerHost, index, configFile))

            if len(enteredFilespaces) != len(filespaceNameToOid):
                raise Exception("Only read directories for %d of %d filespaces from %s" % \
                    (len(enteredFilespaces), len(filespaceNameToOid), configFile))
        else:

            #
            # get from stdin
            #
            while len(dirs) < maxPrimariesPerHost:
                print 'Enter mirror segment data directory location %d of %d >' % (
                    len(dirs) + 1, maxPrimariesPerHost)
                line = sys.stdin.readline().strip()
                if len(line) > 0:
                    try:
                        dirs.append(normalizeAndValidateInputPath(line))
                    except PathNormalizationException, e:
                        print "\n%s\n" % e

            for fs in gpArray.getFilespaces(False):
                index = 0
                while index < maxPrimariesPerHost:
                    print "Enter mirror filespace '%s' directory location %d of %d >" % \
                                (fs.getName(), index+1, maxPrimariesPerHost)
                    line = sys.stdin.readline().strip()
                    if len(line) > 0:
                        try:
                            filespaceOidToPathMaps[index][fs.getOid(
                            )] = normalizeAndValidateInputPath(line)
                            index += 1
                        except PathNormalizationException, e:
                            print "\n%s\n" % e
示例#10
0
    def __getDataDirectoriesForMirrors(self, maxPrimariesPerHost, gpArray):
        dirs = []
        filespaceOidToPathMaps = []
        while len(filespaceOidToPathMaps) < maxPrimariesPerHost:
            filespaceOidToPathMaps.append({})

        filespaceNameToOid = {}
        for fs in gpArray.getFilespaces(False):
            filespaceNameToOid[fs.getName()] = fs.getOid()

        configFile = self.__options.mirrorDataDirConfigFile
        if configFile is not None:

            #
            # load from config file
            #
            lines = readAllLinesFromFile(configFile,
                                         stripLines=True,
                                         skipEmptyLines=True)

            labelOfPathsBeingRead = "data"
            index = 0
            fsOid = gparray.SYSTEM_FILESPACE
            enteredFilespaces = {}
            for line in lines:
                if line.startswith("filespace "):
                    if index < maxPrimariesPerHost:
                        raise Exception('Number of %s directories must equal %d but %d were read from %s' % \
                            (labelOfPathsBeingRead, maxPrimariesPerHost, index, configFile))

                    fsName = line[len("filespace "):].strip()
                    labelOfPathsBeingRead = fsName

                    if fsName not in filespaceNameToOid:
                        raise Exception("Unknown filespace %s specified in input file %s" % \
                            (fsName, configFile))
                    fsOid = filespaceNameToOid[fsName]

                    if gpArray.isFileSpaceShared(fsOid):
                        raise Exception("Shared filespace %s does not need to specify in input file %s" % \
                            (fsName, configFile))

                    if fsName in enteredFilespaces:
                        raise Exception( "Filespace %s specified twice in input file %s" % \
                            (fsName, configFile))
                    enteredFilespaces[fsName] = True

                    index = 0
                else:
                    if index == maxPrimariesPerHost:
                        raise Exception('Number of %s directories must equal %d but more were read from %s' % \
                            (labelOfPathsBeingRead, maxPrimariesPerHost, configFile))

                    path = normalizeAndValidateInputPath(line, "config file")
                    if fsOid == gparray.SYSTEM_FILESPACE:
                        dirs.append(path)
                    else:
                        filespaceOidToPathMaps[index][fsOid] = path
                    index += 1
            if index < maxPrimariesPerHost:
                raise Exception('Number of %s directories must equal %d but %d were read from %s' % \
                    (labelOfPathsBeingRead, maxPrimariesPerHost, index, configFile))

            # The shared filespace path will set later.
            for (fsName, fsOid) in filespaceNameToOid.iteritems():
                if fsName in enteredFilespaces:
                    continue
                if not gpArray.isFileSpaceShared(fsOid):
                    break
                for index in range(maxPrimariesPerHost):
                    filespaceOidToPathMaps[index][fsOid] = None
                enteredFilespaces[fsName] = True

            if len(enteredFilespaces) != len(filespaceNameToOid):
                raise Exception("Only read directories for %d of %d filespaces from %s" % \
                    (len(enteredFilespaces), len(filespaceNameToOid), configFile))
        else:

            #
            # get from stdin
            #
            while len(dirs) < maxPrimariesPerHost:
                print 'Enter mirror segment data directory location %d of %d >' % (
                    len(dirs) + 1, maxPrimariesPerHost)
                line = sys.stdin.readline().strip()
                if len(line) > 0:
                    try:
                        dirs.append(normalizeAndValidateInputPath(line))
                    except PathNormalizationException, e:
                        print "\n%s\n" % e

            for fs in gpArray.getFilespaces(False):
                # Shared storage path can only be set when we know the
                # relationship between the primary and mirror.
                fsys = fs.getFsys()
                delay_set = False
                if fsys != None and fsys.isShared():
                    delay_set = True
                    print 'Skip filespace "%s" stored on the shared filesystem "%s"' % (
                        fs.getName(), fsys.getName())

                index = 0
                while index < maxPrimariesPerHost:
                    if delay_set:
                        filespaceOidToPathMaps[index][fs.getOid()] = None
                        index += 1
                        continue

                    print "Enter mirror filespace '%s' directory location %d of %d >" % \
                                (fs.getName(), index+1, maxPrimariesPerHost)
                    line = sys.stdin.readline().strip()
                    if len(line) > 0:
                        try:
                            filespaceOidToPathMaps[index][fs.getOid(
                            )] = normalizeAndValidateInputPath(line)
                            index += 1
                        except PathNormalizationException, e:
                            print "\n%s\n" % e
示例#11
0
    def __getDataDirectoriesForMirrors(self, maxPrimariesPerHost, gpArray):
        dirs = []
        filespaceOidToPathMaps = []
        while len(filespaceOidToPathMaps) < maxPrimariesPerHost:
            filespaceOidToPathMaps.append({})

        filespaceNameToOid = {}
        for fs in gpArray.getFilespaces(False):
            filespaceNameToOid[fs.getName()] = fs.getOid()

        configFile = self.__options.mirrorDataDirConfigFile
        if configFile is not None:

            #
            # load from config file
            #
            lines = readAllLinesFromFile(configFile, stripLines=True, skipEmptyLines=True)

            labelOfPathsBeingRead = "data"
            index = 0
            fsOid = gparray.SYSTEM_FILESPACE
            enteredFilespaces = {}
            for line in lines:
                if line.startswith("filespace "):
                    if index < maxPrimariesPerHost:
                        raise Exception(
                            "Number of %s directories must equal %d but %d were read from %s"
                            % (labelOfPathsBeingRead, maxPrimariesPerHost, index, configFile)
                        )

                    fsName = line[len("filespace ") :].strip()
                    labelOfPathsBeingRead = fsName

                    if fsName not in filespaceNameToOid:
                        raise Exception("Unknown filespace %s specified in input file %s" % (fsName, configFile))
                    fsOid = filespaceNameToOid[fsName]

                    if fsName in enteredFilespaces:
                        raise Exception("Filespace %s specified twice in input file %s" % (fsName, configFile))
                    enteredFilespaces[fsName] = True

                    index = 0
                else:
                    if index == maxPrimariesPerHost:
                        raise Exception(
                            "Number of %s directories must equal %d but more were read from %s"
                            % (labelOfPathsBeingRead, maxPrimariesPerHost, configFile)
                        )

                    path = normalizeAndValidateInputPath(line, "config file")
                    if fsOid == gparray.SYSTEM_FILESPACE:
                        dirs.append(path)
                    else:
                        filespaceOidToPathMaps[index][fsOid] = path
                    index += 1
            if index < maxPrimariesPerHost:
                raise Exception(
                    "Number of %s directories must equal %d but %d were read from %s"
                    % (labelOfPathsBeingRead, maxPrimariesPerHost, index, configFile)
                )

            if len(enteredFilespaces) != len(filespaceNameToOid):
                raise Exception(
                    "Only read directories for %d of %d filespaces from %s"
                    % (len(enteredFilespaces), len(filespaceNameToOid), configFile)
                )
        else:

            #
            # get from stdin
            #
            while len(dirs) < maxPrimariesPerHost:
                print "Enter mirror segment data directory location %d of %d >" % (len(dirs) + 1, maxPrimariesPerHost)
                line = sys.stdin.readline().strip()
                if len(line) > 0:
                    try:
                        dirs.append(normalizeAndValidateInputPath(line))
                    except PathNormalizationException, e:
                        print "\n%s\n" % e

            for fs in gpArray.getFilespaces(False):
                index = 0
                while index < maxPrimariesPerHost:
                    print "Enter mirror filespace '%s' directory location %d of %d >" % (
                        fs.getName(),
                        index + 1,
                        maxPrimariesPerHost,
                    )
                    line = sys.stdin.readline().strip()
                    if len(line) > 0:
                        try:
                            filespaceOidToPathMaps[index][fs.getOid()] = normalizeAndValidateInputPath(line)
                            index += 1
                        except PathNormalizationException, e:
                            print "\n%s\n" % e
示例#12
0
    def __getMirrorsToBuildFromConfigFile(self, gpArray):

        # create fileData object from config file
        #
        filename = self.__options.mirrorConfigFile
        fslist = None
        rows = []
        with open(filename) as f:
            for lineno, line in line_reader(f):
                if fslist is None:
                    fslist = parse_filespace_order(filename, lineno, line)
                else:
                    fixed, flexible = parse_gpaddmirrors_line(filename, lineno, line, fslist)
                    rows.append(ParsedConfigFileRow(fixed, flexible, line))
        fileData = ParsedConfigFile(fslist, rows)

        # validate fileData
        #
        validateFlexibleHeadersListAllFilespaces("Mirror config", gpArray, fileData)
        filespaceNameToFilespace = dict([(fs.getName(), fs) for fs in gpArray.getFilespaces(False)])

        allAddresses = [row.getFixedValuesMap()["address"] for row in fileData.getRows()]
        allNoneArr = [None for a in allAddresses]
        interfaceLookup = GpInterfaceToHostNameCache(self.__pool, allAddresses, allNoneArr)

        #
        # build up the output now
        #
        toBuild = []
        primaries = [seg for seg in gpArray.getDbList() if seg.isSegmentPrimary(current_role=False)]
        segsByContentId = GpArray.getSegmentsByContentId(primaries)

        # note: passed port offset in this call should not matter
        calc = GpMirrorBuildCalculator(gpArray, self.__options.mirrorOffset, [], [])

        for row in fileData.getRows():
            fixedValues = row.getFixedValuesMap()
            flexibleValues = row.getFlexibleValuesMap()

            contentId = int(fixedValues["contentId"])
            address = fixedValues["address"]
            #
            # read the rest and add the mirror
            #
            port = int(fixedValues["port"])
            replicationPort = int(fixedValues["replicationPort"])
            primarySegmentReplicationPort = int(fixedValues["primarySegmentReplicationPort"])
            dataDir = normalizeAndValidateInputPath(fixedValues["dataDirectory"], "in config file", row.getLine())
            hostName = interfaceLookup.getHostName(address)
            if hostName is None:
                raise Exception("Segment Host Address %s is unreachable" % address)

            filespaceOidToPathMap = {}
            for fsName, path in flexibleValues.iteritems():
                path = normalizeAndValidateInputPath(path, "in config file", row.getLine())
                filespaceOidToPathMap[filespaceNameToFilespace[fsName].getOid()] = path

            primary = segsByContentId[contentId]
            if primary is None:
                raise Exception("Invalid content %d specified in input file" % contentId)
            primary = primary[0]

            calc.addMirror(
                toBuild,
                primary,
                hostName,
                address,
                port,
                dataDir,
                replicationPort,
                primarySegmentReplicationPort,
                filespaceOidToPathMap,
            )

        if len(toBuild) != len(primaries):
            raise Exception(
                "Wrong number of mirrors specified (specified %s mirror(s) for %s primarie(s))"
                % (len(toBuild), len(primaries))
            )

        return GpMirrorListToBuild(toBuild, self.__pool, self.__options.quiet, self.__options.parallelDegree)
示例#13
0
    def getRecoveryActionsFromConfigFile(self, gpArray):
        """
        getRecoveryActionsFromConfigFile

        returns: a tuple (segments in change tracking disabled mode which are unable to recover, GpMirrorListToBuild object
                 containing information of segments which are able to recover)
        """
        filename = self.__options.recoveryConfigFile
        rows = []
        with open(filename) as f:
            for lineno, line in line_reader(f):
                rows.append(self._getParsedRow(filename, lineno, line))

        allAddresses = [
            row["newAddress"] for row in rows if "newAddress" in row
        ]

        failedSegments = []
        failoverSegments = []
        for row in rows:
            # find the failed segment
            failedAddress = row['failedAddress']
            failedPort = row['failedPort']
            failedDataDirectory = normalizeAndValidateInputPath(
                row['failedDataDirectory'], "config file", row['lineno'])
            failedSegment = None
            for segment in gpArray.getDbList():
                if (segment.getSegmentAddress() == failedAddress
                        and str(segment.getSegmentPort()) == failedPort
                        and segment.getSegmentDataDirectory()
                        == failedDataDirectory):

                    if failedSegment is not None:
                        # this could be an assertion -- configuration should not allow multiple entries!
                        raise Exception((
                            "A segment to recover was found twice in configuration.  "
                            "This segment is described by address|port|directory '%s|%s|%s' "
                            "on the input line: %s") %
                                        (failedAddress, failedPort,
                                         failedDataDirectory, row['lineno']))
                    failedSegment = segment

            if failedSegment is None:
                raise Exception("A segment to recover was not found in configuration.  " \
                                "This segment is described by address|port|directory '%s|%s|%s' on the input line: %s" %
                                (failedAddress, failedPort, failedDataDirectory, row['lineno']))

            failoverSegment = None
            if "newAddress" in row:
                """
                When the second set was passed, the caller is going to tell us to where we need to failover, so
                  build a failover segment
                """
                # these two lines make it so that failoverSegment points to the object that is registered in gparray
                failoverSegment = failedSegment
                failedSegment = failoverSegment.copy()

                address = row["newAddress"]
                try:
                    port = int(row["newPort"])
                except ValueError:
                    raise Exception(
                        'Config file format error, invalid number value in line: %s'
                        % (row['lineno']))

                dataDirectory = normalizeAndValidateInputPath(
                    row["newDataDirectory"], "config file", row['lineno'])
                # FIXME: hostname probably should not be address, but to do so, "hostname" should be added to gpaddmirrors config file
                # FIXME: This appears identical to __getMirrorsToBuildFromConfigFilein clsAddMirrors
                hostName = address

                # now update values in failover segment
                failoverSegment.setSegmentAddress(address)
                failoverSegment.setSegmentHostName(hostName)
                failoverSegment.setSegmentPort(port)
                failoverSegment.setSegmentDataDirectory(dataDirectory)

            # this must come AFTER the if check above because failedSegment can be adjusted to
            #   point to a different object
            failedSegments.append(failedSegment)
            failoverSegments.append(failoverSegment)

        peersForFailedSegments = self.findAndValidatePeersForFailedSegments(
            gpArray, failedSegments)

        segs = []
        segs_with_persistent_mirroring_disabled = []
        for index, failedSegment in enumerate(failedSegments):
            peerForFailedSegment = peersForFailedSegments[index]

            peerForFailedSegmentDbId = peerForFailedSegment.getSegmentDbId()

            if failedSegment.unreachable:
                continue

            segs.append(
                GpMirrorToBuild(failedSegment, peerForFailedSegment,
                                failoverSegments[index],
                                self.__options.forceFullResynchronization))

        self._output_segments_with_persistent_mirroring_disabled(
            segs_with_persistent_mirroring_disabled)

        return GpMirrorListToBuild(
            segs,
            self.__pool,
            self.__options.quiet,
            self.__options.parallelDegree,
            forceoverwrite=True,
            progressMode=self.getProgressMode(),
            parallelPerHost=self.__options.parallelPerHost)
示例#14
0
    def getRecoveryActionsFromConfigFile(self, gpArray):
        """
        getRecoveryActionsFromConfigFile

        returns: a tuple (segments in change tracking disabled mode which are unable to recover, GpMirrorListToBuild object
                 containing information of segments which are able to recover)
        """

        # create fileData object from config file
        #
        filename = self.__options.recoveryConfigFile
        rows = []
        with open(filename) as f:
            for lineno, line in line_reader(f):
                fixed, flexible = parse_gprecoverseg_line(filename, lineno, line)
                rows.append(ParsedConfigFileRow(fixed, flexible, line))
        fileData = ParsedConfigFile([], rows)

        allAddresses = [row.getFixedValuesMap()["newAddress"] for row in fileData.getRows()
                        if "newAddress" in row.getFixedValuesMap()]
        allNoneArr = [None] * len(allAddresses)
        interfaceLookup = GpInterfaceToHostNameCache(self.__pool, allAddresses, allNoneArr)

        failedSegments = []
        failoverSegments = []
        for row in fileData.getRows():
            fixedValues = row.getFixedValuesMap()
            flexibleValues = row.getFlexibleValuesMap()

            # find the failed segment
            failedAddress = fixedValues['failedAddress']
            failedPort = fixedValues['failedPort']
            failedDataDirectory = normalizeAndValidateInputPath(fixedValues['failedDataDirectory'],
                                                                "config file", row.getLine())
            failedSegment = None
            for segment in gpArray.getDbList():
                if segment.getSegmentAddress() == failedAddress and \
                                str(segment.getSegmentPort()) == failedPort and \
                                segment.getSegmentDataDirectory() == failedDataDirectory:

                    if failedSegment is not None:
                        #
                        # this could be an assertion -- configuration should not allow multiple entries!
                        #
                        raise Exception(("A segment to recover was found twice in configuration.  "
                                         "This segment is described by address:port:directory '%s:%s:%s' "
                                         "on the input line: %s") %
                                        (failedAddress, failedPort, failedDataDirectory, row.getLine()))
                    failedSegment = segment

            if failedSegment is None:
                raise Exception("A segment to recover was not found in configuration.  " \
                                "This segment is described by address:port:directory '%s:%s:%s' on the input line: %s" %
                                (failedAddress, failedPort, failedDataDirectory, row.getLine()))

            failoverSegment = None
            if "newAddress" in fixedValues:
                """
                When the second set was passed, the caller is going to tell us to where we need to failover, so
                  build a failover segment
                """
                # these two lines make it so that failoverSegment points to the object that is registered in gparray
                failoverSegment = failedSegment
                failedSegment = failoverSegment.copy()

                address = fixedValues["newAddress"]
                try:
                    port = int(fixedValues["newPort"])
                except ValueError:
                    raise Exception('Config file format error, invalid number value in line: %s' % (row.getLine()))

                dataDirectory = normalizeAndValidateInputPath(fixedValues["newDataDirectory"], "config file",
                                                              row.getLine())

                hostName = interfaceLookup.getHostName(address)
                if hostName is None:
                    raise Exception('Unable to find host name for address %s from line:%s' % (address, row.getLine()))

                # now update values in failover segment
                failoverSegment.setSegmentAddress(address)
                failoverSegment.setSegmentHostName(hostName)
                failoverSegment.setSegmentPort(port)
                failoverSegment.setSegmentDataDirectory(dataDirectory)

            # this must come AFTER the if check above because failedSegment can be adjusted to
            #   point to a different object
            failedSegments.append(failedSegment)
            failoverSegments.append(failoverSegment)

        peersForFailedSegments = self.findAndValidatePeersForFailedSegments(gpArray, failedSegments)

        segs = []
        segs_with_persistent_mirroring_disabled = []
        for index, failedSegment in enumerate(failedSegments):
            peerForFailedSegment = peersForFailedSegments[index]

            peerForFailedSegmentDbId = peerForFailedSegment.getSegmentDbId()
            segs.append(GpMirrorToBuild(failedSegment, peerForFailedSegment, failoverSegments[index],
                                        self.__options.forceFullResynchronization))

        self._output_segments_with_persistent_mirroring_disabled(segs_with_persistent_mirroring_disabled)

        return GpMirrorListToBuild(segs, self.__pool, self.__options.quiet,
                                   self.__options.parallelDegree, forceoverwrite=True)
示例#15
0
    def getMirrorTriples(self):
        failedSegments = []
        failoverSegments = []
        for row in self.rows:
            # find the failed segment
            failedAddress = row['failedAddress']
            failedPort = row['failedPort']
            failedDataDirectory = normalizeAndValidateInputPath(
                row['failedDataDirectory'], "config file", row['lineno'])
            failedSegment = None
            for segment in self.gpArray.getDbList():
                if (segment.getSegmentAddress() == failedAddress
                        and str(segment.getSegmentPort()) == failedPort
                        and segment.getSegmentDataDirectory()
                        == failedDataDirectory):

                    if failedSegment is not None:
                        # this could be an assertion -- configuration should not allow multiple entries!
                        raise Exception((
                            "A segment to recover was found twice in configuration.  "
                            "This segment is described by address|port|directory '%s|%s|%s' "
                            "on the input line: %s") %
                                        (failedAddress, failedPort,
                                         failedDataDirectory, row['lineno']))
                    failedSegment = segment

            if failedSegment is None:
                raise Exception("A segment to recover was not found in configuration.  " \
                                "This segment is described by address|port|directory '%s|%s|%s' on the input line: %s" %
                                (failedAddress, failedPort, failedDataDirectory, row['lineno']))

            # TODO: These 2 cases have different behavior which might be confusing to the user.
            # "<failed_address>|<port>|<data_dir> <recovery_address>|<port>|<data_dir>" does full recovery
            # "<failed_address>|<port>|<data_dir>" does incremental recovery
            failoverSegment = None
            if "newAddress" in row:
                """
                When the second set was passed, the caller is going to tell us to where we need to failover, so
                  build a failover segment
                """
                # these two lines make it so that failoverSegment points to the object that is registered in gparray
                failoverSegment = failedSegment
                failedSegment = failoverSegment.copy()

                address = row["newAddress"]
                try:
                    port = int(row["newPort"])
                except ValueError:
                    raise Exception(
                        'Config file format error, invalid number value in line: %s'
                        % (row['lineno']))

                dataDirectory = normalizeAndValidateInputPath(
                    row["newDataDirectory"], "config file", row['lineno'])
                # TODO: hostname probably should not be address, but to do so, "hostname" should be added to gpaddmirrors config file
                # TODO: This appears identical to __getMirrorsToBuildFromConfigFilein clsAddMirrors
                hostName = address

                # now update values in failover segment
                failoverSegment.setSegmentAddress(address)
                failoverSegment.setSegmentHostName(hostName)
                failoverSegment.setSegmentPort(port)
                failoverSegment.setSegmentDataDirectory(dataDirectory)

            # this must come AFTER the if check above because failedSegment can be adjusted to
            #   point to a different object
            failedSegments.append(failedSegment)
            failoverSegments.append(failoverSegment)

        peersForFailedSegments = _findAndValidatePeersForFailedSegments(
            self.gpArray, failedSegments)

        for index, failedSegment in enumerate(failedSegments):
            peerForFailedSegment = peersForFailedSegments[index]

            if failedSegment.unreachable:
                continue

            self.recoveryTriples.append(
                RecoverTriplet(failedSegment, peerForFailedSegment,
                               failoverSegments[index]))

        return self.recoveryTriples