Пример #1
0
def photos(ctx):
    """ List members who either don't have a photo or their photo it too small. """
    config = ctx.obj['config']
    directory = Directory(config.path('data:path', 'directory.json'))

    missing = []
    too_small = []
    for family in directory.families():
        for member in family.members():
            if not member.photo or member.photo == 'unknown.jpeg':
                missing.append(member.name)
            else:
                # print config.path('data:path','photos',member.photo)
                img = Image.open(
                    config.path('data:path', 'photos', member.photo))
                size = img.size
                if size[0] < 300 or size[1] < 300:
                    too_small.append(member.name)

    print("----- Missing Photo -----")
    for name in missing:
        print(name)

    print("\n")

    print("----- Photo Too Small -----")
    for name in too_small:
        print(name)
Пример #2
0
def main():
    names = []
    user = User()

    client = gdata.spreadsheet.service.SpreadsheetsService()
    client.ClientLogin(user.email, user.ticket)

    # Spreadsheets
    feed = client.GetSpreadsheetsFeed()
    
    logging.info(feed)
    
    id_parts = feed.entry[0].id.text.split('/')
    spreadsheet = id_parts[len(id_parts) - 1]

    # Worksheets within the Spreadsheet
    wsFeed = client.GetWorksheetsFeed(spreadsheet)
    id_parts = wsFeed.entry[0].id.text.split('/')
    worksheet = id_parts[len(id_parts) - 1]

    # Get cell data
    feed = client.GetCellsFeed(spreadsheet, worksheet)

    directory = Directory()
    for entry in feed.entry:
        # Only pull data from the 'Name' column ('A' column)
        if entry.title.text[0] == 'A':
            person = entry.content.text.strip()
            directory.add(person)
Пример #3
0
def edit_member(ctx, name, family_name):
    """ Edit a Family Member """
    directory = Directory(ctx.obj['config'].path('data:path',
                                                 'directory.json'))

    family, person = __find_member(directory, name, family_name)

    if person:
        need_to_save = False
        person_data = person.to_json()
        fields = sorted(person_data.keys())
        for field in fields:
            if field in ('name', 'photo'):
                continue

            if field == 'phone':
                new_value = Prompt.phone()
                if new_value:
                    person_data['phone'].update(new_value)
                    new_value = person_data['phone']
            elif field == 'relationships':
                new_value = Prompt.relationships()
            else:
                old_value = person_data[field]
                new_value = Prompt.input("%s (%s): " % (field, old_value))

            if new_value:
                need_to_save = True
                setattr(person, field, new_value)

        if need_to_save:
            directory.save()
    else:
        print("Member not found: '%s'" % (name))
Пример #4
0
def edit(ctx, name):
    """ Edit a Family's Data """
    directory = Directory(ctx.obj['config'].path('data:path',
                                                 'directory.json'))
    family = directory.get(name)

    family = Prompt.choose_from_list(family, "Which Family? ")

    need_to_save = False
    fam_data = family.to_json()
    fields = sorted(fam_data.keys())
    for name in fields:
        if name in ('id', 'name', 'members'):
            continue

        # TODO: what if notes already exist?
        if name == 'notes':
            new_value = Prompt.notes()
        else:
            old_value = fam_data[name]
            new_value = Prompt.input("%s (%s): " % (name, old_value))

        if new_value:
            need_to_save = True
            setattr(family, name, new_value)

    if need_to_save:
        directory.save()
Пример #5
0
def add_member(ctx, name):
    """ Add a Family Member """
    directory = Directory(ctx.obj['config'].path('data:path',
                                                 'directory.json'))

    family = directory.get(name)
    __add_members(family)

    directory.save()
Пример #6
0
def view(ctx, name):
    """ View a Family """
    directory = Directory(ctx.obj['config'].path('data:path',
                                                 'directory.json'))
    family = directory.get(name)

    family = Prompt.choose_from_list(family, "Which Family? ")

    print(repr(family))
 def get_backup_name_list(self):
     self.log.debug("Metadata - Get backup name list.")
     directory = Directory(self.log)
     file_list = directory.get_file_list(self.path,
                                         self.BACKUP_EXPORT_INFO_META)
     if isinstance(file_list, list):
         return sorted(file_list)
     self.log.debug("Metadata - Invalid backup name list.")
     return []
Пример #8
0
    def __init__(self, data_path, file_name):
        self.__data_path = data_path

        # Create and Prep page
        self.__pdf = Canvas("%s/%s.pdf" % (data_path, file_name),
                            pagesize=landscape(letter),
                            pageCompression=0)

        # Read Directory Data
        self.__directory = Directory("%s/directory.json" % (data_path))
Пример #9
0
def vcf(ctx, vcf_file):
    """Import VCF Card"""

    config = ctx.obj['config']
    directory = Directory(config.path('data:path', 'directory.json'))

    vcf = VCFParser(photo_dir=config.path('data:path', 'photos'))
    data = vcf.parse(vcf_file)

    print("----- %s - %s -----" % (data['last_name'], data['address']))

    family = directory.get(data['last_name'])
    family = Prompt.choose_from_list(family, "Which Family")
    if family:
        person = family.get(data['name'])
        if person:
            print("%s already exists." % (data['name']))
        else:
            new_person = Person(**data)
            family.add(new_person)
            directory.save()
    else:
        new_family = Family(name=data['last_name'],
                            address=data['address'],
                            city=data['city'],
                            state=data['state'],
                            zip=data['zip'],
                            members=[data])
        directory.add(new_family)
        directory.save()
 def get_backup_rbd_info_list(self):
     self.log.debug("Metadata - Get backup rbd info list.")
     directory = Directory(self.log)
     dir_name_list = directory.get_dir_list(self.path)
     rbd_info_list = []
     for dir_name in dir_name_list:
         if dir_name != self.BACKUP_EXPORT_INFO_META:
             rbd_name_list = directory.get_dir_list(self.path, dir_name)
             for rbd_name in rbd_name_list:
                 rbd_info = self.get_rbd_info(dir_name, rbd_name)
                 rbd_info_list.append((dir_name, rbd_name, rbd_info))
     return rbd_info_list
Пример #11
0
 def _load_directory(self):
     """
     """
     _spreadsheet = "Matsumoto Family Directory"
     _worksheet = "Current"
     
     directory = Directory()
     drive = Drive()
     
     data = drive.get_data(_spreadsheet, _worksheet)
     name_list = data[1][1:]
     
     directory.adds(name_list)
     
     return directory
Пример #12
0
class Exporter:
    def __init__(self, data_path):
        # Read Directory Data
        self.__data_path = data_path
        self.__directory = Directory(F"{data_path}/directory.json")

    def export_markdown(self, filename):
        with open(F"{self.__data_path}/{filename}.md", "w") as output:

            output.write("# Massey's Chapel UMC - Member Directory\n\n")

            # Family -> Members -> Person
            for family in self.__directory.families():
                output.write(F"## {family.name}\n")
                output.write(F"""{family.address}
{family.city}, {family.state} {family.zip}
\n""")

                for member in family.members():
                    bday = "N/A" if member.birthday == None else datetime.strftime(
                        member.birthday, "%b %d")
                    email = "N/A" if member.email == None else member.email

                    phone = "N/A"
                    if len(member.phone) > 0:
                        phones = []
                        for type, num in member.phone.items():
                            phones.append(F"{type}: {num}")
                        phone = " | ".join(phones)

                    person = "* %-25s\t%s\t%-25s\t%s\n" % (member.name, bday,
                                                           email, phone)
                    output.write(person)

                output.write("\n")
    def __init__(self, log, path, meta_dir=None):
        self.log = log

        if meta_dir == None:
            meta_dir = const.META_DIRNAME
        self.path = "%s/%s" % (path, meta_dir)

        self.metafile = MetaFile(log)
        self.directory = Directory(log)

        self.CLUSTER_INFO_META = const.META_CLUSTER_INFO
        self.CLUSTER_RBD_INFO_META = const.META_CLUSTER_RBD_INFO
        self.CLUSTER_RBD_SNAPSHOT_INFO_META = const.META_CLUSTER_RBD_SNAPSHOT_INFO

        self.BACKUP_EXPORT_INFO_META = const.META_BACKUP_EXPORT_INFO
        self.BACKUP_CIRCLE_INFO_META = const.META_BACKUP_CIRCLE_INFO
        self.BACKUP_INCREMENTAL_INFO_META = const.META_BACKUP_INCREMENTAL_INFO

        self.LIST_COUNTER_KEY = const.META_LIST_COUNTER_KEY
        self.MAX_BACKUP_INFO_RETAIN_COUNT = const.META_ROTATION_LENGTH
Пример #14
0
def delete(ctx, name):
    """ Delete a Family from the Directory """
    directory = Directory(ctx.obj['config'].path('data:path',
                                                 'directory.json'))
    family = directory.get(name)

    family = Prompt.choose_from_list(family, "Which Family? ")

    choice = Prompt.input("Confirm Delete: %s - %s (yes|no)? " %
                          (family.name, family.address))
    if choice == 'yes':
        directory.delete(family)
        directory.save()
    else:
        print("Not Deleting Family: %s - %s" % (family.name, family.address))
Пример #15
0
def del_member(ctx, name, family_name):
    """ Remove a Family Member from a Family """
    directory = Directory(ctx.obj['config'].path('data:path',
                                                 'directory.json'))

    family, person = __find_member(directory, name, family_name)

    if person:
        family.delete(person)
        directory.delete_photo(person)

        directory.save()
        print("'%s' successfully deleted." % (name))
    else:
        print("Member not found: '%s'" % (name))
Пример #16
0
def add(ctx, name):
    """ Add a new Family to the Directory """
    directory = Directory(ctx.obj['config'].path('data:path',
                                                 'directory.json'))

    family_data = Family(name=name).to_json()
    fields = sorted(family_data.keys())
    print("----- Adding the '%s' Family -----" % (family_data['name']))
    for name in fields:
        if name in ('id', 'name', 'members'):
            continue

        new_value = Prompt.input("%s: " % (name))
        family_data[name] = new_value if new_value else ""

    new_family = Family(**family_data)
    print(new_family)
    directory.add(new_family)

    __add_members(new_family)

    directory.save()
Пример #17
0
def fix(ctx, thing):
    """ Run bulk data fix commands """
    directory = Directory(ctx.obj['config'].path('data:path',
                                                 'directory.json'))

    # Fix relationships structure
    if thing == "relationships":
        for fam in directory.families():
            for person in fam.members():
                new_rels = []
                for rel in person.relationships:
                    new_rels.append({
                        'type': rel['type'].capitalize(),
                        'name': rel['name']
                    })

                person.relationships = new_rels
        directory.save()
    # Fix email addr -- set to None if none
    elif thing == "email":
        for fam in directory.families():
            for person in fam.members():
                if person.email == "N/A":
                    person.email = None
        directory.save()
    elif thing == "add_notes":
        directory.save()
    elif thing == "family_member_to_list":
        directory.save()
    elif thing == "directory_to_list":
        directory.save()
    elif thing == "format":
        directory.save()
    else:
        print("I'm sorry Dave, I'm afraid I can't fix %s!" % (thing))
Пример #18
0
if __name__ == '__main__':
    config = Config().local()

    files = []

    # TODO use argparse
    # TODO add command to diff 2 files
    # TODO add summary flag
    try:
        path = sys.argv[1]
    except IndexError:
        path = ''

    progress = '--progress' in sys.argv

    dir = Directory(path or './')
    for file in dir.files_iter(config.skip):
        file.snippets = list(file.parse(config.snippet))
        files.append(file)

    print('Analyzing content of "{}"'.format(dir.path))
    print('Satisfactory files found: {}'.format(len(files)))
    print('Please be patient ...')

    paired = []
    similars = []

    total = sum([len(file.snippets) for file in files])**2
    i = 0

    for file_a in files:
class RBD_Backup_Metadata(object):
    def __init__(self, log, path, meta_dir=None):
        self.log = log

        if meta_dir == None:
            meta_dir = const.META_DIRNAME
        self.path = "%s/%s" % (path, meta_dir)

        self.metafile = MetaFile(log)
        self.directory = Directory(log)

        self.CLUSTER_INFO_META = const.META_CLUSTER_INFO
        self.CLUSTER_RBD_INFO_META = const.META_CLUSTER_RBD_INFO
        self.CLUSTER_RBD_SNAPSHOT_INFO_META = const.META_CLUSTER_RBD_SNAPSHOT_INFO

        self.BACKUP_EXPORT_INFO_META = const.META_BACKUP_EXPORT_INFO
        self.BACKUP_CIRCLE_INFO_META = const.META_BACKUP_CIRCLE_INFO
        self.BACKUP_INCREMENTAL_INFO_META = const.META_BACKUP_INCREMENTAL_INFO

        self.LIST_COUNTER_KEY = const.META_LIST_COUNTER_KEY
        self.MAX_BACKUP_INFO_RETAIN_COUNT = const.META_ROTATION_LENGTH

    def set_cluster_info(self, meta):
        filepath = os.path.join(self.path, self.CLUSTER_INFO_META)
        return self.metafile.write(meta, filepath=filepath)

    def set_backup_export_info(self, export_name, meta):
        filepath = os.path.join(self.path, self.BACKUP_EXPORT_INFO_META,
                                export_name)
        return self.metafile.write(meta, filepath=filepath)

    def set_rbd_info(self, pool_name, rbd_name, meta):
        filepath = os.path.join(self.path, pool_name, rbd_name,
                                self.CLUSTER_RBD_INFO_META)
        return self.metafile.write(meta, filepath=filepath)

    def set_backup_circle_list(self, pool_name, rbd_name, meta):
        filepath = os.path.join(self.path, pool_name, rbd_name,
                                self.BACKUP_CIRCLE_INFO_META)
        return self.metafile.write(meta, filepath=filepath)

    def set_backup_snapshot_list(self, pool_name, rbd_name, meta):
        filepath = os.path.join(self.path, pool_name, rbd_name,
                                self.CLUSTER_RBD_SNAPSHOT_INFO_META)
        return self.metafile.write(meta, filepath=filepath)

    def set_backup_incremental_list(self, pool_name, rbd_name, circle_name,
                                    meta):
        filepath = os.path.join(self.path, pool_name, rbd_name,
                                self.BACKUP_INCREMENTAL_INFO_META, circle_name)
        return self.metafile.write(meta, filepath=filepath)

    def get_cluster_info(self):
        self.log.debug("Metadata - Get backup cluster info.")
        filepath = os.path.join(self.path, self.CLUSTER_INFO_META)
        cluster_info = self.metafile.read(filepath=filepath)
        if isinstance(cluster_info, dict):
            return cluster_info
        self.log.debug("Metadata - Invalid backup cluster info.")
        return {}

    def get_rbd_info(self, pool_name, rbd_name):
        self.log.debug("Metadata - Get backup RBD info.")
        filepath = os.path.join(self.path, pool_name, rbd_name,
                                self.CLUSTER_RBD_INFO_META)
        rbd_info = self.metafile.read(filepath=filepath)
        if isinstance(rbd_info, dict):
            return rbd_info
        self.log.debug("Metadata - Invalid backup RBD info.")
        return {}

    def get_backup_name_list(self):
        self.log.debug("Metadata - Get backup name list.")
        directory = Directory(self.log)
        file_list = directory.get_file_list(self.path,
                                            self.BACKUP_EXPORT_INFO_META)
        if isinstance(file_list, list):
            return sorted(file_list)
        self.log.debug("Metadata - Invalid backup name list.")
        return []

    def get_backup_rbd_list(self, backup_name):
        self.log.debug("Metadata - Get backup rbd list.")
        filepath = os.path.join(self.path, self.BACKUP_EXPORT_INFO_META,
                                backup_name)
        rbd_list = self.metafile.read(filepath=filepath)
        if isinstance(rbd_list, list):
            return rbd_list
        self.log.debug("Metadata - Invalid backup rbd list.")
        return []

    def get_backup_rbd_info_list(self):
        self.log.debug("Metadata - Get backup rbd info list.")
        directory = Directory(self.log)
        dir_name_list = directory.get_dir_list(self.path)
        rbd_info_list = []
        for dir_name in dir_name_list:
            if dir_name != self.BACKUP_EXPORT_INFO_META:
                rbd_name_list = directory.get_dir_list(self.path, dir_name)
                for rbd_name in rbd_name_list:
                    rbd_info = self.get_rbd_info(dir_name, rbd_name)
                    rbd_info_list.append((dir_name, rbd_name, rbd_info))
        return rbd_info_list

    def get_backup_circle_list(self, pool_name, rbd_name):
        self.log.debug("Metadata - Get backup circle list.")
        filepath = os.path.join(self.path, pool_name, rbd_name,
                                self.BACKUP_CIRCLE_INFO_META)
        circle_list = self.metafile.read(filepath=filepath)
        if isinstance(circle_list, list):
            sorted_circle_list = sort_dict_list(circle_list,
                                                self.LIST_COUNTER_KEY,
                                                reverse=False)
            if sorted_circle_list == False:
                self.log.debug("Metadata - Unable to sort backup circle list.")
            else:
                return sorted_circle_list
        self.log.debug("Metadata - Invalid backup circle list.")
        return []

    def get_backup_snapshot_list(self, pool_name, rbd_name):
        self.log.debug("Metadata - Get backup snapshot list.")
        filepath = os.path.join(self.path, pool_name, rbd_name,
                                self.CLUSTER_RBD_SNAPSHOT_INFO_META)
        snapshot_list = self.metafile.read(filepath=filepath)
        if isinstance(snapshot_list, list):
            sorted_snapshot_list = sort_dict_list(snapshot_list,
                                                  self.LIST_COUNTER_KEY,
                                                  reverse=False)
            if sorted_snapshot_list == False:
                self.log.debug(
                    "Metadata - Unable to sort backup snapshot list.")
            else:
                return sorted_snapshot_list
        self.log.debug("Metadata - Invalid backup snapshot list.")
        return []

    def get_backup_incremental_list(self, pool_name, rbd_name, circle_name):
        self.log.debug("Metadata - Get backup incremental list.")
        filepath = os.path.join(self.path, pool_name, rbd_name,
                                self.BACKUP_INCREMENTAL_INFO_META, circle_name)
        incremental_list = self.metafile.read(filepath=filepath)
        if isinstance(incremental_list, list):
            sorted_incremental_list = sort_dict_list(incremental_list,
                                                     self.LIST_COUNTER_KEY,
                                                     reverse=False)
            if sorted_incremental_list == False:
                self.log.debug(
                    "Metadata - Unable to sort backup incremental list.")
            else:
                return sorted_incremental_list
        self.log.debug("Metadata - Invalid backup incremental list.")
        return []

    def del_backup_circle_info(self,
                               pool_name,
                               rbd_name,
                               delete_circle_info,
                               key=None):
        try:
            self.log.debug("Metadata - Delete a backup circle.")
            if key == None:
                d_key = self.LIST_COUNTER_KEY
            else:
                d_key = key

            circle_list = self.get_backup_circle_list(pool_name, rbd_name)
            new_circle_list = []
            for circle_info in circle_list:
                if delete_circle_info[d_key] != circle_info[d_key]:
                    new_circle_list.append(circle_info)
            return self.set_backup_circle_list(pool_name, rbd_name,
                                               new_circle_list)
        except Exception as e:
            self.log.error("Metadata - Unable to delete a backup circle. %s" %
                           e)
            return False

    def del_backup_snapshot_info(self,
                                 pool_name,
                                 rbd_name,
                                 delete_snapshot_info,
                                 key=None):
        try:
            self.log.debug("Metadata - Delete a backup snapshot.")
            if key == None:
                d_key = self.LIST_COUNTER_KEY
            else:
                d_key = key

            snapshot_list = self.get_backup_snapshot_list(pool_name, rbd_name)
            new_snapshot_list = []
            for snapshot_info in snapshot_list:
                if delete_snapshot_info[d_key] != snapshot_info[d_key]:
                    new_snapshot_list.append(snapshot_info)
            return self.set_backup_snapshot_list(pool_name, rbd_name,
                                                 new_snapshot_list)
        except Exception as e:
            self.log.error(
                "Metadata - Unable to delete a backup snapshot. %s" % e)
            return False

    def del_backup_incremental_info(self,
                                    pool_name,
                                    rbd_name,
                                    circle_name,
                                    incr_info=None,
                                    key=None):
        try:
            self.log.debug("Metadata - Delete a backup incremental.")
            filepath = os.path.join(self.path, pool_name, rbd_name,
                                    self.BACKUP_INCREMENTAL_INFO_META,
                                    circle_name)

            if incr_info == None:
                self.log.debug(
                    "Metadata - Delete backup incremental file '%s'." %
                    filepath)
                self.directory.delete(filepath)
                return True

            incremental_list = self.metafile.read(filepath=filepath)
            new_incremental_list = []
            if isinstance(incremental_list, list):
                for incremental_name in incremental_list:
                    if incremental_name != incr_info:
                        new_incremental_list.append(incremental_name)
            return self.set_backup_incremental_list(pool_name, rbd_name,
                                                    circle_name,
                                                    new_incremental_list)
        except Exception as e:
            self.log.error(
                "Metadata - Unable to delete a backup incremental. %s" % e)
            return False

    def del_backup_info(self, backup_name):
        try:
            self.log.debug("Metadata - Delete a backup info.")
            filepath = os.path.join(self.path, self.BACKUP_EXPORT_INFO_META,
                                    backup_name)
            self.directory.delete(filepath)
            return True
        except Exception as e:
            self.log.error("Metadata - Unable to delete a backup info. %s" % e)
            return False

    def del_rbd_info(self, pool_name, rbd_name):
        try:
            self.log.debug("Metadata - Delete a rbd meta.")
            filepath = os.path.join(self.path, pool_name, rbd_name)
            self.directory.delete(filepath)
            return True
        except Exception as e:
            self.log.error("Metadata - Unable to delete a rbd meta. %s" % e)
            return False

    def add_backup_circle_info(self, pool_name, rbd_name, new_circle_info):
        try:
            self.log.debug("Metadata - Add new backup circle.")
            circle_list = self.get_backup_circle_list(pool_name, rbd_name)
            add_circle_info = new_circle_info
            if len(circle_list) == 0:
                add_circle_info[self.LIST_COUNTER_KEY] = 1
            else:
                last_circle_info = circle_list[-1]
                add_circle_info[self.LIST_COUNTER_KEY] = int(
                    last_circle_info[self.LIST_COUNTER_KEY]) + 1
            circle_list.append(add_circle_info)
            return self.set_backup_circle_list(pool_name, rbd_name,
                                               circle_list)
        except Exception as e:
            self.log.error("Metadata - Unable to add new backup circle. %s" %
                           e)
            return False

    def add_backup_info(self, backup_name, meta):
        try:
            self.log.debug("Metadata - Add new backup export.")
            self.set_backup_export_info(backup_name, meta)

            rotation_length = self.MAX_BACKUP_INFO_RETAIN_COUNT

            backup_name_list = self.get_backup_name_list()
            diff_count = len(backup_name_list) - int(rotation_length)
            if diff_count > 0:
                sorted_export_list = sorted(backup_name_list)
                for i in range(0, diff_count):
                    self.del_backup_info(backup_name_list[i])
        except Exception as e:
            self.log.error(
                "Metadata - Unable to add new backup export info. %s" % e)
            return False

    def add_backup_snapshot_info(self, pool_name, rbd_name, new_snapshot_info):
        try:
            self.log.debug("Metadata - Add new backup snapshot.")
            snapshot_list = self.get_backup_snapshot_list(pool_name, rbd_name)
            add_snapshot_info = new_snapshot_info
            if len(snapshot_list) == 0:
                add_snapshot_info[self.LIST_COUNTER_KEY] = 1
            else:
                last_snapshot_info = snapshot_list[-1]
                new_counter = int(
                    last_snapshot_info[self.LIST_COUNTER_KEY]) + 1
                add_snapshot_info[self.LIST_COUNTER_KEY] = new_counter
            snapshot_list.append(add_snapshot_info)
            return self.set_backup_snapshot_list(pool_name, rbd_name,
                                                 snapshot_list)
        except Exception as e:
            self.log.error("Metadata - Unable to add new backup snapshot. %s" %
                           e)
            return False

    def add_backup_incremental_info(self, pool_name, rbd_name, circle_name,
                                    new_incr_info):
        try:
            self.log.debug("Metadata - Add new backup incremental.")
            incremental_list = self.get_backup_incremental_list(
                pool_name, rbd_name, circle_name)
            add_incremental_info = new_incr_info
            if len(incremental_list) == 0:
                add_incremental_info[self.LIST_COUNTER_KEY] = 1
            else:
                last_incremental_info = incremental_list[-1]
                new_counter = int(
                    last_incremental_info[self.LIST_COUNTER_KEY]) + 1
                add_incremental_info[self.LIST_COUNTER_KEY] = new_counter
            incremental_list.append(add_incremental_info)
            return self.set_backup_incremental_list(pool_name, rbd_name,
                                                    circle_name,
                                                    incremental_list)
        except Exception as e:
            self.log.error(
                "Metadata - Unable to add new backup incremental. %s" % e)
            return False
Пример #20
0
def main(argument_list):

    DEFAULT_CONFIG_PATH = const.CONFIG_PATH
    DEFAULT_CONFIG_SECTION = const.CONFIG_SECTION

    try:
        parser = ArgumentParser(add_help=False)

        parser.add_argument('--config-file')
        parser.add_argument('--config-section')
        parser.add_argument('--cluster-name')
        parser.add_argument('--backup-directory')
        parser.add_argument('--yes', action='store_true')
        parser.add_argument('options', nargs='+')
        args = vars(parser.parse_args(argument_list[1:]))

        backup_config_file = DEFAULT_CONFIG_PATH
        backup_config_section = DEFAULT_CONFIG_SECTION
        if args['config_file'] is not None:
            backup_config_file = args['config_file']
        if args['config_section'] is not None:
            backup_config_section = args['config_section']

        # create config obj and read config file data
        cfg = Config(backup_config_file, backup_config_section)
        if not cfg.is_valid():
            print("Error, fail to initialize config.")
            sys.exit(2)
        if not cfg.set_options(print_options=False):
            print("Error, fail to set config.")
            sys.exit(2)

        backup_directory = cfg.backup_destination_path
        cluster_name = cfg.ceph_cluster_name
        if args['backup_directory'] is not None:
            backup_directory = args['backup_directory']
        if args['cluster_name'] is not None:
            cluster_name = args['cluster_name']

        ask_yes_no = True
        if args['yes']:
            ask_yes_no = False

        opts = args['options']

        # initial backup logging
        log = Logger(cfg.log_file_path,
                     cfg.log_level,
                     cfg.log_max_bytes,
                     cfg.log_backup_count,
                     cfg.log_delay,
                     name=const.LOG_DELETE_LOGGER_NAME)
        if not log.set_log(log_module=cfg.log_module_name):
            print("Error, unable to set logger.")
            sys.exit(2)

        backup_cluster_directory = os.path.join(backup_directory, cluster_name)
        backup_meta = RBD_Backup_Metadata(log, backup_cluster_directory)
        directory = Directory(log)

        if opts[0] == 'rbd':
            if len(opts) == 3:
                pool_name = opts[1]
                rbd_name = opts[2]

                log.info("Delete backuped RBD.", "pool name: %s" % pool_name,
                         "rbd name: %s" % rbd_name)

                print("- delete backuped RBD:")
                print("  pool name: %s" % pool_name)
                print("  rbd name: %s" % rbd_name)

                if ask_yes_no:
                    response = raw_input("Are you sure to delete this RBD backup "\
                                         "(yes or no)? : ")
                    response = response.lower()
                    if response != 'yes':
                        sys.exit(0)

                dir_ret = directory.delete(backup_cluster_directory, pool_name,
                                           rbd_name)
                meta_ret = backup_meta.del_rbd_info(pool_name, rbd_name)

                if dir_ret != False and meta_ret != False:
                    log.info("The Backuped RBD is deleted successfully.")
                    print("- the backuped RBD is deleted successfully.")
                else:
                    print("Error, error occur while deleting backuped RBD.")

    except KeyboardInterrupt as ke:
        print("- Operation canceled.")
        sys.exit(2)

    except Exception as e:

        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type,
                                  exc_value,
                                  exc_traceback,
                                  file=sys.stdout)

        sys.exit(2)
Пример #21
0
 def __init__(self, data_path):
     # Read Directory Data
     self.__data_path = data_path
     self.__directory = Directory(F"{data_path}/directory.json")
def main(argument_list):

    DEFAULT_CONFIG_PATH = const.CONFIG_PATH
    DEFAULT_CONFIG_SECTION = const.CONFIG_SECTION

    try:
        parser = ArgumentParser(add_help=False)

        parser.add_argument('--config-file')
        parser.add_argument('--config-section')
        parser.add_argument('--cluster-name')
        parser.add_argument('--backup-directory')
        parser.add_argument('--show-details', action='store_true')

        parser.add_argument('options', nargs='+')

        #args = vars(parser.parse_args(argument_list[1:]))
        known, unknown = parser.parse_known_args(argument_list[1:])
        args = vars(known)

        # get config file path and config section name
        restore_config_file = DEFAULT_CONFIG_PATH
        restore_config_section = DEFAULT_CONFIG_SECTION

        if args['config_file'] is not None:
            restore_config_file = args['config_file']
        if args['config_section'] is not None:
            restore_config_section = args['config_section']

        # create config obj and read config file data
        cfg = Config(restore_config_file, restore_config_section)
        if not cfg.is_valid():
            print("Error, fail to initialize config.")
            sys.exit(2)
        if not cfg.set_options(print_options=False):
            print("Error, fail to set config.")
            sys.exit(2)

        # get backup directory path and cluster name
        backup_directory = cfg.backup_destination_path
        cluster_name = cfg.ceph_cluster_name
        if args['backup_directory'] is not None:
            backup_directory = args['backup_directory']
        if args['cluster_name'] is not None:
            cluster_name = args['cluster_name']

        show_details = False
        if args['show_details']:
            show_details = True

        opts = args['options']

        # initial backup logging
        log = Logger(cfg.log_file_path,
                     cfg.log_level,
                     cfg.log_max_bytes,
                     cfg.log_backup_count,
                     cfg.log_delay,
                     name=const.LOG_SHOW_LOGGER_NAME)
        if not log.set_log(log_module=cfg.log_module_name):
            print("Error, unable to set logger.")
            sys.exit(2)

        if opts[0] != 'config':
            rbd_restore_list = RBD_Restore_List(log)
            backup_cluster_directory = os.path.join(backup_directory,
                                                    cluster_name)
            if not rbd_restore_list.read_meta(backup_cluster_directory):
                print("Error, unable to get cluster info.")
                sys.exit(2)

        directory = Directory(log)

        print("")
        if opts[0] == 'backup':

            if len(opts) == 1:
                print("*Show all backup name.\n")
                backup_name_list = rbd_restore_list.get_backup_name_list()
                if show_details:
                    print("[Backup Name]       [RBD Count]")
                    print("-" * 31)
                else:
                    print("[Backup Name List]")
                    print("-" * 20)

                for backup_name in backup_name_list:
                    if show_details:
                        rbd_list = rbd_restore_list.get_backup_rbd_list(
                            backup_name)

                        print("%s %s" % (backup_name, len(rbd_list)))
                    else:
                        print("%s" % backup_name)

            elif len(opts) >= 2:
                backup_name = normalize_datetime(' '.join(opts[1:]))
                rbd_list = rbd_restore_list.get_backup_rbd_list(backup_name)

                print("*Show RBD list in backup name '%s'.\n" % backup_name)
                if show_details:
                    print("[Backup Time]       [Circle name]       " \
                          "[Pool name/RBD name]  ... [Status]")
                    print("-" * 74)
                else:
                    print("[Backup RBD List]")
                    print("-" * 20)

                for rbd_info in rbd_list:
                    if show_details:
                        if rbd_info[0] == 0:
                            status = 'OK'

                        else:
                            status = 'FAIL'
                        if status == 'OK':
                            exist = directory.exist(backup_cluster_directory,
                                                    rbd_info[1], rbd_info[2],
                                                    rbd_info[3])
                            if exist == False:
                                status = 'Deleted'

                        print("%s %s %s/%s ... %s" %
                              (rbd_info[4], rbd_info[3], rbd_info[1],
                               rbd_info[2], status))
                    else:
                        print("%s/%s" % (rbd_info[1], rbd_info[2]))

        elif opts[0] == 'rbd':
            if len(opts) == 1:
                print("*Show all backuped RBD name.\n")
                rbd_list = rbd_restore_list.get_backup_rbd_list()
                if len(rbd_list) == 0:
                    return 0

                arranged_rbd_info = {}

                for rbd_info in rbd_list:
                    pool_name = rbd_info[0]
                    rbd_name = rbd_info[1]
                    info = rbd_info[2]

                    if not arranged_rbd_info.has_key(pool_name):
                        rbd_info_list = []
                    else:
                        rbd_info_list = arranged_rbd_info[pool_name]
                    info['name'] = rbd_name
                    rbd_info_list.append(info)
                    arranged_rbd_info[pool_name] = rbd_info_list

                if show_details:
                    print("[Pool name]")
                    print(
                        "  [RBD name] [block name prefix] [Num objects] [size (bytes)]"
                    )
                    print("-" * 74)
                else:
                    print("[Pool name]")
                    print("  [RBD name]")
                    print("-" * 20)
                for pool_name, rbd_list in arranged_rbd_info.iteritems():
                    print("%s" % pool_name)
                    if show_details:
                        for rbd_info in rbd_list:
                            rbd_name = rbd_info['name']
                            rbd_size = rbd_info['size']
                            rbd_objs = rbd_info['num_objs']
                            rbd_prix = rbd_info['block_name_prefix']
                            print("  %s %s %s %s" %
                                  (rbd_name, rbd_prix, rbd_objs, rbd_size))
                    else:
                        for rbd_info in rbd_list:
                            print("  %s" % rbd_info['name'])

            if len(opts) == 3:
                pool_name = opts[1]
                rbd_name = opts[2]

                print("*Show backup time of RBD '%s/%s'.\n" %
                      (pool_name, rbd_name))

                if show_details:
                    backup_info_list = rbd_restore_list.get_rbd_backup_info_list(
                        pool_name, rbd_name)
                    if len(backup_info_list) == 0:
                        return 0
                    print("[Backup time]       [Backup name]       " \
                          "[Backup circle]     [Backup size]")
                    print("-" * 74)
                    for backup_info in backup_info_list:
                        backup_file = backup_info[0]
                        backup_time = backup_info[1]
                        backup_name = backup_info[2]
                        backup_circ = backup_info[3]
                        backup_size = directory.get_used_size(
                            backup_cluster_directory, pool_name, rbd_name,
                            backup_circ, backup_file)
                        print("%s %s %s %s" % (backup_time, backup_name,
                                               backup_circ, backup_size))
                else:
                    backup_time_list = rbd_restore_list.get_rbd_backup_time_list(
                        pool_name, rbd_name)
                    if len(backup_time_list) == 0:
                        return 0

                    print("[Backup time]")
                    print("-" * 20)
                    for backup_time in backup_time_list:
                        print("%s" % backup_time)

        elif opts[0] == 'cluster':
            print("*Show backuped cluster info.\n")
            cluster_info = rbd_restore_list.get_cluster_info()
            for key, value in cluster_info.iteritems():
                print("%s: %s" % (key, value))

        elif opts[0] == 'config':
            if len(opts) == 1:
                print("*Show backup config.\n")

                cfg_opts = cfg.get_option()
                for key, value in cfg_opts.iteritems():
                    print("%s = %s" % (key, value))

            elif len(opts) == 2:
                if opts[1] == 'openstack':
                    print("*Show openstack yaml. (constructing)\n")

            elif len(opts) == 3:
                if opts[1] == 'rbd' and opts[2] == 'list':
                    print("*Show RBD backup list.")
                    print("*Yaml file: %s" % cfg.backup_list_file_path)
                    print("*Cluster name: %s" % cluster_name)
                    print("")

                    rbd_backup_list = RBD_Backup_List(log)
                    rbd_backup_list.read_yaml(cfg.backup_list_file_path)
                    rbd_name_list = rbd_backup_list.get_rbd_name_list(
                        cluster_name)

                    if show_details:
                        print("[Pool name]")
                        print(
                            "  [RBD name] [backup_type] [max_incr] [max_circ] [max_snap]"
                        )
                        print("-" * 74)
                        backup_option = RBD_Backup_Option(log)
                        for pool_name, rbd_name_list in rbd_name_list.iteritems(
                        ):
                            print("%s" % pool_name)
                            for rbd_name in rbd_name_list:
                                options = rbd_backup_list.get_rbd_options(
                                    cluster_name, pool_name, rbd_name)
                                backup_option.add_option(
                                    pool_name, rbd_name, options)
                                backup_type = backup_option.get_backup_type(
                                    pool_name, rbd_name)
                                max_incr = backup_option.get_backup_max_incr_count(
                                    pool_name, rbd_name)
                                max_circ = backup_option.get_backup_circle_retain_count(
                                    pool_name, rbd_name)
                                max_snap = backup_option.get_snapshot_retain_count(
                                    pool_name, rbd_name)
                                print("  %s %s %s %s %s" %
                                      (rbd_name, backup_type, max_incr,
                                       max_circ, max_snap))
                    else:
                        print("[Pool name]")
                        print("  [RBD name]")
                        print("-" * 24)
                        for pool_name, rbd_name_list in rbd_name_list.iteritems(
                        ):
                            print("%s" % pool_name)
                            for rbd_name in rbd_name_list:
                                print("  %s" % rbd_name)

        print("")
    except Exception as e:

        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type,
                                  exc_value,
                                  exc_traceback,
                                  file=sys.stdout)

        sys.exit(2)
Пример #23
0
class PrintDirectory:
    FONT = "Times-Roman"
    FONT_BOLD = "Times-Bold"

    SIDE_LEFT = 0
    SIDE_RIGHT = 1
    MAX_MEMBERS_PER_SIDE = 3

    def __init__(self, data_path, file_name):
        self.__data_path = data_path

        # Create and Prep page
        self.__pdf = Canvas("%s/%s.pdf" % (data_path, file_name),
                            pagesize=landscape(letter),
                            pageCompression=0)

        # Read Directory Data
        self.__directory = Directory("%s/directory.json" % (data_path))

    def render(self, **kwargs):
        if kwargs.get("bdays_only", False):
            self.render_bdays()
        else:
            self.render_title()
            self.render_bdays()
            self.render_families()

        # Save PDF
        self.__pdf.save()

    def render_title(self):
        x = 4.0 * inch
        y = 8.0 * inch

        # Border
        self.__pdf.setStrokeColorRGB(0, 0, 0)
        self.__pdf.setFillColor(lightgrey)
        self.__pdf.roundRect(3.0 * inch,
                             7.4 * inch,
                             5 * inch,
                             1 * inch,
                             10,
                             fill=1)

        # Top Title
        self.__pdf.setFillColorRGB(0, 0, 0)
        self.__pdf.setFont(PrintDirectory.FONT, 32)
        self.__pdf.drawString(x, y, "Massey's Chapel")
        self.__pdf.drawString(x - (.75 * inch), y - (.50 * inch),
                              "United Methodist Church")

        # Church Photo
        photo = "%s/photos/title_photo.jpg" % (self.__data_path)
        self.__pdf.drawImage(photo,
                             1.0 * inch,
                             1.25 * inch,
                             width=650,
                             height=434)

        # Bottom - Sub-title
        x = 4.25 * inch
        y = .75 * inch
        self.__pdf.setFont(PrintDirectory.FONT, 24)
        self.__pdf.drawString(x, y, "Member Directory")
        self.__pdf.drawString(
            x, y - (.35 * inch),
            datetime.strftime(datetime.now(), "~~~ %B %Y ~~~"))

        self.__pdf.showPage()

    def render_bdays(self):
        # Month placement adjustment
        placement = [0, 0, 0, 0, 0, 0, 0, 0, 0, .15 * inch, 0, 0]
        bdays = [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}]

        for family in self.__directory.families():
            for person in family.members():
                if person.birthday:
                    month = person.birthday.month
                    day = person.birthday.day
                    name = person.name

                    bucket = bdays[month].get(day, [])
                    if not bucket:
                        bdays[month][day] = bucket
                    bucket.append(name)

        self.__pdf.setFillColorRGB(0, 0, 0)
        self.__pdf.setFont(PrintDirectory.FONT, 24)
        self.__pdf.drawString(4.85 * inch, 8.2 * inch, "Birthdays")

        # Vertical Line Down Center
        self.__pdf.setStrokeColorRGB(0, 0, 0)
        self.__pdf.line(5.5 * inch, 0.10 * inch, 5.5 * inch, 8.0 * inch)

        origin_x = 0.5 * inch
        origin_y = 7.75 * inch
        width = 2.25 * inch
        height = 2.5 * inch
        row = 1

        for num, month in enumerate(bdays):
            if not month:
                continue

            if num % 2 != 0:
                # ODD
                offset = row - 1
                pos_x = origin_x
                pos_y = (origin_y - (offset * height)) + placement[num - 1]
                # print "ODD - %d] x:%d y:%d" % (num, pos_x,pos_y)
            else:
                # EVEN
                offset = row - 1
                row += 1
                if row > 3:
                    row = 1
                pos_x = origin_x + width
                pos_y = (origin_y - (offset * height)) + placement[num - 1]
                # print "EVEN - %d] x:%d y:%d" % (num, pos_x,pos_y)

            if num > 6:
                pos_x += 5.5 * inch

            self.__pdf.setFont(PrintDirectory.FONT_BOLD, 9)
            header = self.__pdf.beginText(pos_x, pos_y)
            header.textLine("~~~ %s ~~~" %
                            (datetime(1900, num, 1).strftime("%B").upper()))
            self.__pdf.drawText(header)

            self.__pdf.setFont(PrintDirectory.FONT, 9)
            info = self.__pdf.beginText(pos_x, pos_y - 0.15 * inch)
            for day in sorted(month.keys()):
                names = month.get(day)
                for n in names:
                    info.textLine("%02d) %s" % (day, n))

            self.__pdf.drawText(info)

        self.__pdf.showPage()

    def render_families(self):
        side_it = itertools.cycle(
            (PrintDirectory.SIDE_LEFT, PrintDirectory.SIDE_RIGHT))

        families = self.__directory.families()
        family_index = 0
        family_count = len(families)
        while family_index < family_count:
            # Vertical Line Down Center
            self.__pdf.setStrokeColorRGB(0, 0, 0)
            self.__pdf.line(5.5 * inch, 0.10 * inch, 5.5 * inch, 8.4 * inch)

            # Render the Family
            family1 = families[family_index]
            family_index += 1
            family1_args = {}
            family1_side = next(side_it)

            family2_args = {}
            family2_side = next(side_it)
            if len(family1.members()) > PrintDirectory.MAX_MEMBERS_PER_SIDE:
                family1_args = {
                    'm_start': 0,
                    'm_end': PrintDirectory.MAX_MEMBERS_PER_SIDE
                }
                family2 = family1
                family2_args = {
                    'm_start': PrintDirectory.MAX_MEMBERS_PER_SIDE + 1,
                    'm_end': PrintDirectory.MAX_MEMBERS_PER_SIDE * 2,
                    'family_cont': True
                }
            else:
                if family_index < family_count:
                    family2 = families[family_index]
                    # Don't start rendering a 2 page side family on the LEFT
                    if len(family2.members()
                           ) > PrintDirectory.MAX_MEMBERS_PER_SIDE:
                        family2 = None
                    else:
                        family_index += 1
                else:
                    family2 = None

            self.render_family(family1, family1_side, **family1_args)
            if family2:
                self.render_family(family2, family2_side, **family2_args)

            # End the Page
            self.__pdf.showPage()

    def render_family(self, family, side, **kwargs):
        left_margin = 0
        right_margin = 0
        if side == PrintDirectory.SIDE_LEFT:
            left_margin = .25 * inch
            right_margin = 5.5 * inch
        elif side == PrintDirectory.SIDE_RIGHT:
            left_margin = 5.5 * inch + .25 * inch
            right_margin = 11 * inch
        else:
            raise ValueError("Invalid value [%s] for 'side' parameter." %
                             (side))

        # Family Name
        self.__pdf.setFont(PrintDirectory.FONT, 18)
        draw_pos = {'x': left_margin, 'y': 8.125 * inch}
        self.__pdf.drawString(draw_pos['x'], draw_pos['y'], family.name)

        # Family Address
        if not kwargs.get('family_cont', False):
            self.__pdf.setFont(PrintDirectory.FONT, 16)
            draw_pos['y'] -= .20 * inch
            addr = self.__pdf.beginText(draw_pos['x'], draw_pos['y'])
            addr.setLeading(15)
            addr.textLine(family.address)
            addr.textLine("%s, %s %s" %
                          (family.city, family.state, family.zip))
            self.__pdf.drawText(addr)

        # Family Name/Address Divider
        self.__pdf.setStrokeColorRGB(.5, .5, .5)
        self.__pdf.line(left_margin, 7.5 * inch, right_margin - (0.25 * inch),
                        7.5 * inch)

        # Member Photo, Info
        photo_dir = "%s/photos" % (self.__data_path)
        pos_x = left_margin
        pos_y = 5.35 * inch

        all_members = family.members()
        m_start = kwargs.get('m_start', 0)
        m_end = kwargs.get('m_end', len(all_members))

        members = all_members[m_start:m_end + 1]
        for person in members:
            self.render_person(person, pos_x, pos_y, photo_dir)
            pos_x += 2.80 * inch
            if pos_x >= right_margin:
                pos_x = left_margin
                pos_y = 1.90 * inch

    def render_person(self, person, pos_x, pos_y, photo_dir):
        # Photo
        photo_name = person.photo if person.photo else 'unknown.jpeg'
        photo = "%s/%s" % (photo_dir, photo_name)
        self.__pdf.drawImage(photo, pos_x, pos_y, width=150, height=150)

        # Info
        info = self.__pdf.beginText(pos_x, pos_y - .25 * inch)

        # Name
        info.setFont(PrintDirectory.FONT, 16)
        info.textLine(person.name)

        info.setFont(PrintDirectory.FONT, 12)

        # Bday
        date_str = "N/A"
        if person.birthday:
            date_str = datetime.strftime(person.birthday, "%b %d")
        info.textLine("Birthday: %s" % date_str)

        # Email
        email_addr = person.email if person.email else "N/A"
        info.textLine("Email: %s" % email_addr)

        # Phones
        if person.phone:
            for type, number in person.phone.items():
                info.textLine("%s: %s" % (type.capitalize(), number))

        # Relationships
        if person.relationships:
            if len(person.relationships) > 2:
                spouse = None
                children = []
                for rel in person.relationships:
                    if rel['type'] == 'Spouse':
                        spouse = rel['name']
                    elif rel['type'] == 'Child':
                        children.append(rel['name'])

                text_lines = []
                if spouse:
                    text_lines.append("Spouse: %s" % (spouse))
                if children:
                    batch1 = children[:2]
                    batch2 = children[2:]

                    text_lines.append("Children: %s" % (", ".join(batch1)))
                    if batch2:
                        text_lines.append("                %s" %
                                          (", ".join(batch2)))

                if text_lines:
                    info.textLine("_________________________")
                    for line in text_lines:
                        info.textLine(line)
            else:
                info.textLine("_________________________")
                for rel in person.relationships:
                    info.textLine("%s: %s" % (rel['type'], rel['name']))

        self.__pdf.drawText(info)
Пример #24
0
def main(argument_list):

    #const.EXPORT_TYPE = const.EXPORT_TYPE

    backup_name = None

    # total size of RBD image to backup (provisioned size)
    total_rbd_size = 0

    # available and used size of backup directory
    backup_dir_avai_bytes = 0
    backup_dir_used_bytes = 0

    # store RBD info list (list of dict)
    backup_rbd_info_list   = []

    # task counter
    submitted_snap_create_task_count = 0
    submitted_snap_delete_task_count = 0
    submitted_rbd_export_task_count = 0
    backup_circle_delete_count = 0

    is_rbd_list_from_command_line = False

    manager = None
    ceph = None

    try:
        # parse arguments
        parser = ArgumentParser(add_help=False)
        parser.add_argument('--config-file')
        parser.add_argument('--config-section')
        parser.add_argument('--backup-name')
        parser.add_argument('rbd_list', nargs='*')
        args = vars(parser.parse_args(argument_list[1:]))

        backup_config_file = const.CONFIG_PATH
        backup_config_section = const.CONFIG_SECTION
        if args['config_file'] is not None:
            backup_config_file = args['config_file']
        if args['config_section'] is not None:
            backup_config_section = args['config_section']
        if args['backup_name'] is not None:
            backup_name = args['backup_name']
        if len(args['rbd_list']) != 0:
            is_rbd_list_from_command_line = True

        # create config obj and read config file data
        cfg = Config(backup_config_file, backup_config_section)
        if not cfg.is_valid():
            print("Error, fail to initialize config.")
            sys.exit(2)
        if not cfg.set_options(print_options=False):
            print("Error, fail to set config.")
            sys.exit(2)

        # initial backup logging
        log = Logger(cfg.log_file_path,
                     cfg.log_level,
                     cfg.log_max_bytes,
                     cfg.log_backup_count,
                     cfg.log_delay,
                     name=const.LOG_BACKUP_LOGGER_NAME)
        if not log.set_log(log_module=cfg.log_module_name):
            print("Error, unable to set logger.")
            sys.exit(2)

        # set name of this backup
        begin_backup_datetime = get_datetime()
        if backup_name == None:
            backup_name = normalize_datetime(begin_backup_datetime)
        print("- backup name: %s" % backup_name)

        # start RBD backup
        log.blank(line_count=4)
        log.info("******** Start Ceph RBD backup ********",
                 "pid = %s" % os.getpid(),
                 "config file = %s" % backup_config_file,
                 "section = %s" % backup_config_section)
        log.info('Config settings:', cfg.get_option())

        # ==================================================================
        # check backup directory environment, space size and metafile if exists.
        # ==================================================================
        log.info("________ Check Backup Directory ________")
        print("- check backup directory.")

        # Path structure of backup directory:
        # Dest. Backup Dir/Ceph Name/Pool Name/RBD name/Circle Name/Backup Files
        directory = Directory(log)
        log.info("Set backup path:",
                 " - backup destination path = %s" % cfg.backup_destination_path,
                 " - ceph cluster name = %s" % cfg.ceph_cluster_name)

        cluster_backup_path = os.path.join(cfg.backup_destination_path,
                                           cfg.ceph_cluster_name)
        if not directory.exist(cluster_backup_path):
            cluster_backup_path = directory.create(cluster_backup_path)
            if cluster_backup_path == False:
                log.error("Fail to create directory path.")
                sys.exit(2)
        print("  set backup directory: %s" % cluster_backup_path)

        log.info("Get space size info '%s'." % cluster_backup_path)
        backup_dir_avai_bytes = directory.get_available_size(cluster_backup_path)
        backup_dir_used_bytes = directory.get_used_size(cluster_backup_path)

        if backup_dir_avai_bytes == False:
            log.error("Fail to get space size of directory '%s'." % cluster_backup_path)
            sys.exit(2)

        log.info("Available %s bytes, used %s bytes." % (backup_dir_avai_bytes,
                                                         backup_dir_used_bytes))

        print("  %s Mbytes available." % int(backup_dir_avai_bytes/1024/1024))
        print("  %s Mbytes used." % int(backup_dir_used_bytes/1024/1024))

        # read metadata file in backup directory
        #   get last snapshot name and backup circle directory name
        log.info("Check metadata of the backup directory.")

        backup_meta = RBD_Backup_Metadata(log, cluster_backup_path)
        meta_cluster_info = backup_meta.get_cluster_info()
        if all (k in meta_cluster_info for k in ('name', 'fsid')):
            if meta_cluster_info['name'] != cfg.ceph_cluster_name:
                log.error("Cluster name is not match.",
                          "name from backup directory: %s" % meta_cluster_info['name'],
                          "name from backup config file: %s" % cfg.ceph_cluster_name)
                sys.exit(2)

            ceph_cfg = Config(cfg.ceph_conf_file, const.CEPH_CONFIG_SECTION)
            if not ceph_cfg.is_valid():
                log.error("Unable to read ceph config.")
                sys.exit(2)
            ceph_cfg_fsid = ceph_cfg.get_option(key='fsid')
            if meta_cluster_info['fsid'] !=  ceph_cfg_fsid:
                log.error("Cluster fsid is not match.",
                          "fsid from backup directory: %s" % meta_cluster_info['fsid'],
                          "fsid from ceph config file: %s" % ceph_cfg_fsid)
                sys.exit(2)
        else:
            # this maybe the first time of backup
            # copy ceph config and keyring files to backup directory
            directory.copy_file(cfg.ceph_conf_file, cluster_backup_path)
            directory.copy_file(cfg.ceph_keyring_file, cluster_backup_path)

        # ==================================================================
        # read rbd backup list, backup list source might either from
        # Openstack yaml file or backup list file (yaml format) or command line
        # ==================================================================
        log.info("________ Read RBD Backup List ________")
        print("- check backup rbd list.")

        backup_option = RBD_Backup_Option(log)
        rbd_backup_list = RBD_Backup_List(log)

        rbd_name_list = {}  # rbd name list : {'pool_name': ['rbd_name', ...], ...}
        pool_count = 0
        rbd_count = 0

        if is_rbd_list_from_command_line == True:
            log.info("Read backup list from command line.")
            print("  get backup list from command line input.")

            for rbd_list_input in args['rbd_list']:
                rbd_info = rbd_list_input.split("/")

                if len(rbd_info) == 2:
                    pool_name = rbd_info[0]
                    rbd_name = rbd_info[1]

                    if not rbd_name_list.has_key(pool_name):
                        rbd_name_list[pool_name] = [rbd_name]
                    else:
                        rbd_list = rbd_name_list[pool_name]
                        if not rbd_name in rbd_list:
                            rbd_list.append(rbd_name)
                            rbd_name_list[pool_name] = rbd_list
                        else:
                            log.warning("Duplicated RBD name '%s'." % rbd_name)
                            continue

                    rbd_count += 1
                    print("  %s - %s %s" % (rbd_count, pool_name, rbd_name))
                else:
                    log.error("Invalid rbd input list. %s" % rbd_list_input)
                    print("Error, Please input RBD name as '<pool_name>/<rbd_name>'\n" \
                          "For example, 3 RBDs to backup:\n" \
                          "  rbd/rbd_a rbd/rbd_b volume/rbd_1")
                    sys.exit(2)
        else:
            if cfg.backup_list_from_openstack_yaml_file == 'True':
                log.info("Read backup list from OpenStack YAML file.")
                print("  get backup list from OpenStack YAML file %s." % cfg.openstack_yaml_file_path)

                file_path = cfg.openstack_yaml_file_path
                section_name = cfg.openstack_yaml_section
                distribution = cfg.openstack_distribution
                ceph_pool = cfg.openstack_ceph_pool

                if not os.path.exists(cfg.openstack_yaml_file_path):
                    log.error("Openstack Yaml file '%s' not exists." % cfg.backup_list_file_path)
                    sys.exit(2)

                rbd_backup_list.read_openstack_yaml(cfg.openstack_yaml_file_path,
                                                    cfg.openstack_yaml_section)
                if rbd_backup_list.set_cinder_client(distribution=cfg.openstack_distribution,
                                                     api_version=cfg.api_version)
                    volume_list = rbd_backup_list.get_cinder_volume_list()

                    if len(volume_list) == 0:
                        log.warning("No any matched volume ID found.")
                        print("Info, No any matched volume ID found.")
                        sys.exit(0)
                    if volume_list == False:
                        log.warning("Unable to get cinder volume ID list from openstack.")
                        print("Error, unable to get cinder volume ID list from openstack")
                        sys.exit(2)

                    rbd_name_list[cfg.openstack_ceph_pool] = volume_list
            else:
                log.info("Read RBD list from backup list file.")
                print("  get RBD backup list from %s." % cfg.backup_list_file_path)

                if not os.path.exists(cfg.backup_list_file_path):
                    log.error("Backup list file '%s' not exists." % cfg.backup_list_file_path)
                    sys.exit(2)

                rbd_backup_list.read_yaml(cfg.backup_list_file_path)
                rbd_name_list = rbd_backup_list.get_rbd_name_list(cfg.ceph_cluster_name)

                if rbd_name_list == {}:
                    log.warning("No any item in RBD backup list.")
                    print("Info, No any item in RBD backup list.")
                    sys.exit(0)
                if rbd_name_list == False:
                    log.error("Unable to get rbd name list from backup list file.")
                    print("Error, unable to get rbd name list from backup list file.")
                    sys.exit(2)

                for pool_name, rbd_list in rbd_name_list.iteritems():
                    pool_count += 1

                    if cfg.backup_read_options == 'True':
                        for rbd_name in rbd_list:
                            options = rbd_backup_list.get_rbd_options(cfg.ceph_cluster_name,
                                                                      pool_name,
                                                                      rbd_name)
                            backup_option.add_option(pool_name, rbd_name, options)

            rbd_count += len(rbd_list)
            log.info("%s RBD images to backup in pool '%s'." % (rbd_count, pool_name))

        log.info("Total %s RBD images configured to backup." % rbd_count)
        print("  %s RBD(s) to be backuped." % rbd_count)

        if rbd_count == 0:
            sys.exit(0)

        # ==================================================================
        # check ceph cluster
        # examine the RBD backup list in CEPH cluster.
        # ignore not exist RBD in the backup list
        # ==================================================================
        log.info("________ Verify RBD Backup List ________")
        print("- verify RBD backup list.")

        valid_rbd_count = 0

        #ceph_conff_file = cfg.ceph_conf_file
        ceph = Ceph(log, cfg.ceph_cluster_name, conffile=cfg.ceph_conf_file)
        if not ceph.connect_cluster():
            log.error("Unable to connect ceph cluster.")
            # you may check user or permission to /etc/ceph directory
            print("Error, unable to connect ceph cluster.")
            sys.exit(2)

        ceph_fsid = ceph.get_fsid()
        ceph_stats = ceph.get_cluster_stats()

        if meta_cluster_info.has_key('fsid'):
            if ceph_fsid != meta_cluster_info['fsid']:
                log.error("Ceph fsid is not matching to the backup directory.")
                print("Error, the fsid from the ceph cluster is not matching " \
                      "to the backup directory.")
                sys.exit(2)

        log.info("Update cluster info metadata.")
        ceph_stats['fsid'] = ceph_fsid
        ceph_stats['name'] = cfg.ceph_cluster_name
        ceph_stats['conf'] = os.path.basename(cfg.ceph_conf_file)
        ceph_stats['keyr'] = os.path.basename(cfg.ceph_keyring_file)
        backup_meta.set_cluster_info(ceph_stats)

        all_pool_name_list = ceph.get_pool_list()
        if all_pool_name_list == False:
            log.error("Unable to get pool name list from ceph cluster.")
            print("Error, unable to get pool name list from ceph cluster.")
            sys.exit(2)
        log.info("Pool name in Ceph cluster:", all_pool_name_list)

        for pool_name, rbd_list in rbd_name_list.iteritems():
            log.info("Check RBDs in Ceph pool '%s'." % pool_name)

            if pool_name not in all_pool_name_list:
                log.warning("Pool '%s' is not found, " \
                            "skip backup of the pool." % pool_name)
                continue
            if not ceph.open_ioctx(pool_name):
                log.warning("Unable to open ioctx of pool '%s', " \
                            "skip backup of the pool." % pool_name)
                continue
            pool_rbd_name_list = ceph.get_rbd_list()  # rbd name list in a pool
            if pool_rbd_name_list == False:
                log.warning("Unable to get RBD list from ceph cluster, " \
                            "skip backup of the pool")
                continue

            # just log pool stat first
            pool_stat = ceph.get_pool_stat()
            log.info("Pool stat:", pool_stat)

            for rbd_name in rbd_list:
                log.info("Check RBD '%s'." % rbd_name)

                if rbd_name not in pool_rbd_name_list:
                    log.warning("RBD '%s' is not exist." % rbd_name)
                    continue
                rbd_size = ceph.get_rbd_size(rbd_name)
                rbd_snap = ceph.get_snap_info_list(rbd_name)    # return list of (snap id, snap size, snap name)
                if rbd_size == False or rbd_snap == False:
                    log.warning("Unable to get size or snapshot list of the RBD, "
                                "skip backup of the RBD.")
                    continue

                # build rbd backup list
                rbd_info = pack_rbd_info(pool_name, rbd_name, rbd_size, rbd_snap)
                backup_rbd_info_list.append(rbd_info)
                total_rbd_size += rbd_size

                valid_rbd_count += 1
                print("  %s/%s - %s bytes." % (pool_name, rbd_name, rbd_size))

                # compare rbd stat
                rbd_stat = ceph.get_rbd_stat(rbd_name)
                meta_rbd_stat = backup_meta.get_rbd_info(pool_name, rbd_name)
                if not cmp(rbd_stat, meta_rbd_stat):
                    log.info("RBD stat has been changed.",
                             "Old: %s" % meta_rbd_stat,
                             "New: %s" % rbd_stat)

                backup_meta.set_rbd_info(pool_name, rbd_name, rbd_stat)

            ceph.close_ioctx()

        print("  %s RBD(s) can be backuped." % valid_rbd_count)
        log.info("Total %s bytes of RBD images size to backup." % total_rbd_size)
        print("  total RBDs has %s Mbytes." % int(total_rbd_size/1024/1024))

        if valid_rbd_count == 0:
            sys.exit(0)

        reserve_space = backup_dir_avai_bytes * 0.01
        usable_space_size = backup_dir_avai_bytes - reserve_space
        if total_rbd_size > usable_space_size:
            log.error("No enough space size for backup, stop backup work.",
                      " - %s bytes of RBD images to backup." % total_rbd_size,
                      " - %s bytes of usable space size (99 percents of available bytes)." % usable_space_size,
                      " - %s bytes more required." % (total_rbd_size-usable_space_size))

            print("Error, No enough space size to backup.")
            sys.exit(2)

        # ==================================================================
        # Verify backup types.
        # Set backup type of RBD, change from incremental to full backup if
        #   a. backup_type is configured as 'full' in backup list file
        #   b. no last snapshot record found in metafile or metafile not exist
        #   c. last snapshot is not found in ceph cluster
        #   d. reached max incremental backup count
        # ==================================================================
        log.info("________ Check RBD backup type ________")
        print("- check rbd backup type.")

        # store backup options
        rbd_backup_type = {}    # RBD backup type { rbd_id: 'full' or 'incr' }

        full_weekday = cfg.weekly_full_backup.replace(" ","")
        incr_weekday = cfg.weekly_incr_backup.replace(" ","")
        full_weekdays = full_weekday.split(',')
        incr_weekdays = incr_weekday.split(',')

        if len(full_weekdays) == 0:
            log.warning("There is no full backup weekday configured.")

        log.info("Check default backup type for today.")
        weekday = str(int(datetime.datetime.today().weekday()) + 1)
        if weekday in full_weekdays:
            weekday_backup_type = const.EXPORT_TYPE[0]
        elif weekday in incr_weekday:
            weekday_backup_type = const.EXPORT_TYPE[1]
        else:
            log.info("No bacakup triggered on today (weekday=%s)." % weekday)
            print("Info, No bacakup triggered on today.")
            sys.exit(0)

        log.info("Backup type for today is '%s'." % weekday_backup_type)

        for rbd_info in backup_rbd_info_list:
            # you may do further manipulation of each rbd backup.
            # control attributed can be defined in rbd backup list file
            # and write your control logic in this block to overwrite configured setting

            pool_name, rbd_name, rbd_size, rbd_snap = unpack_rbd_info(rbd_info)
            rbd_id = convert_rbd_id(pool_name, rbd_name)

            # verify backup type
            # --------------------------------------
            log.info("Check backup type of '%s'." % rbd_id)

            if cfg.backup_read_options == 'True':
                log.info("Check backup type form backup option.")
                option_backup_type = backup_option.get_backup_type(pool_name,
                                                                   rbd_name)
                if option_backup_type == False:
                    rbd_backup_type[rbd_id] = weekday_backup_type
                else:
                    rbd_backup_type[rbd_id] = option_backup_type

            if rbd_backup_type[rbd_id] == const.EXPORT_TYPE[1]:
                # rbd snapshot check
                log.info("Check last backup snapshot.")
                meta_snap_info_list = backup_meta.get_backup_snapshot_list(pool_name,
                                                                           rbd_name)
                if len(meta_snap_info_list) == 0:
                    log.warning("No snapshot list metadata found.")
                    rbd_backup_type[rbd_id] = const.EXPORT_TYPE[0]
                else:
                    meta_last_snap_info = meta_snap_info_list[-1]
                    meta_last_snap_name = meta_last_snap_info['name']
                    ceph_snap_name_list = [i['name'] for i in rbd_snap]   # get snap name list
                    if meta_last_snap_name not in ceph_snap_name_list:
                        log.warning("Snapshot name '%s' is not found in ceph cluster." % meta_last_snap_name)
                        rbd_backup_type[rbd_id] = const.EXPORT_TYPE[0]

            if rbd_backup_type[rbd_id] == const.EXPORT_TYPE[1]:
                # backup circle directory check
                log.info("Check last backup circle.")
                meta_circle_info_list = backup_meta.get_backup_circle_list(pool_name,
                                                                           rbd_name)
                if len(meta_circle_info_list) == 0:
                    log.warning("No circle list metadata found.")
                    rbd_backup_type[rbd_id] = const.EXPORT_TYPE[0]
                else:
                    meta_last_circle_info = meta_circle_info_list[-1]
                    meta_last_circle_name = meta_last_circle_info['name']
                    if not directory.exist(cluster_backup_path,
                                           pool_name,
                                           rbd_name,
                                           meta_last_circle_name):
                        log.warning("Last backup circle directory is not exist.")
                        rbd_backup_type[rbd_id] = const.EXPORT_TYPE[0]

            if rbd_backup_type[rbd_id] == const.EXPORT_TYPE[1]:
                # max incremental backup count check
                log.info("Check max incremental backup.")
                max_incr_count = backup_option.get_backup_max_incr_count(pool_name,
                                                                         rbd_name)
                if max_incr_count == False:
                    max_incr_count = cfg.backup_max_incremental

                if max_incr_count in [0, '0', False, 'False', None]:
                    log.info("No max incremental backup limited.")
                else:
                    file_list = directory.get_file_list(cluster_backup_path,
                                                        pool_name,
                                                        rbd_name,
                                                        meta_last_circle_name)
                    if len(file_list) > int(max_incr_count):
                        log.info("Max incremental backup reached (%s/%s)." % (len(file_list),
                                                                              max_incr_count))
                        rbd_backup_type[rbd_id] = const.EXPORT_TYPE[0]

            log.info("Set backup type of '%s/%s' to '%s'." % (pool_name,
                                                              rbd_name,
                                                              rbd_backup_type[rbd_id]))
            print("  %s %s - %s backup." % (pool_name,
                                            rbd_name,
                                            rbd_backup_type[rbd_id]))

        # ==================================================================
        # sort rbd backup list by provisioned size of RBD, from large to small.
        # may implement other sorting method,
        # by type(full or incr first), priority, or others
        #(rbd loading or exist object count or created snapshot size)
        # ==================================================================
        log.info("________ Sort RBD backup list ________")
        print("- sort rbd backup list order.")

        list_sort = RBD_Backup_List_Sort(log)
        sorted_rbd_backup_list = list_sort.sort_by_rbd_size(backup_rbd_info_list)

        backup_priority = []
        count = 0
        for rbd_info in backup_rbd_info_list:
            pool_name, rbd_name, rbd_size, rbd_snap = unpack_rbd_info(rbd_info)
            count += 1
            backup_pos = "%s : %s/%s : %s" % (count, pool_name, rbd_name, rbd_size)
            backup_priority.append(backup_pos)
            print("  %s %s %s" % (pool_name, rbd_name, rbd_size))
        log.info("RBD backup priority:", backup_priority)

        # ==================================================================
        # initial worker manager, task maker
        # ==================================================================
        log.info("________ Initial task worker manager and task maker ________")
        print("- start task workers.")

        manager = Manager(log, tmp_dir=const.TMP_DIR)
        manager.set_worker_logger(cfg.log_file_path, name=const.LOG_WORKER_LOGGER_NAME)
        manager.set_monitor_logger(cfg.log_file_path, name=const.LOG_MONITOR_LOGGER_NAME)
        manager.run_worker(count=cfg.backup_concurrent_worker_count)

        print("  %s worker(s) started." % manager.worker_count)

        # initial task maker
        task_maker = RBD_Backup_Task_Maker(log,
                                           cfg.ceph_cluster_name,
                                           cfg.ceph_conf_file,
                                           cfg.ceph_keyring_file)
        task_maker.set_export_full_type(const.EXPORT_TYPE[0])
        task_maker.set_export_diff_type(const.EXPORT_TYPE[1])

        # for storing task result and write to file
        task_result = RBD_Backup_Task_Result(log)

        # ==================================================================
        # start RBD backup procedure
        # ==================================================================

        # create and submit rbd snapshot create tasks
        # ----------------------------------------------------------
        log.info("________ Create and submit RBD snapshot create task ________")
        print("- start RBD snapshot procedure.")

        for rbd_info in backup_rbd_info_list:
            pool_name, rbd_name, rbd_size, rbd_snap = unpack_rbd_info(rbd_info)
            log.info("Creating RBD snapshot create task of '%s/%s'." % (pool_name,
                                                                        rbd_name))
            snap_create_task = task_maker.get_rbd_snapshot_create_task(pool_name, rbd_name)

            if snap_create_task != False:
                log.info("Submit SnapshotCreateTask '%s'." % snap_create_task)
                if manager.add_task(snap_create_task):
                    submitted_snap_create_task_count += 1
                    print("  take snapshot of %s/%s" % (pool_name, rbd_name))
            else:
                log.error("Unable to get RBD snapshot create task.")
        log.info("Submitted %s snapshot create tasks." % submitted_snap_create_task_count)

        # verify finished snapshot tasks
        # ----------------------------------------------------------
        log.info("________ Verify finished RBD snapshot create task ________")

        for i in xrange(0, submitted_snap_create_task_count):
            try:
                finished_task = manager.get_finished_task()
                log.info("Received finished task %s." % finished_task)

                pool_name = finished_task.pool_name
                rbd_name = finished_task.rbd_name
                created_snapshot_name = finished_task.snap_name
                created_datetime = finished_task.create_datetime

                result = finished_task.get_result()
                task_result.add_snapshort_create_result(result)
                log.info("Task Result: ", result)

                # remove rbd backup item from backup list because of failure of snapshot create.
                if result['return_code'] != 0:
                    log.warning("%s is not completed. " % result['task_name'],
                                "remove the RBD from backup list.")
                    print("  snapshot of %s/%s failed." % (pool_name, rbd_name))
                    temp_rbd_backup_list = []
                    for rbd_info in backup_rbd_info_list:
                        if rbd_info['pool_name'] != pool_name and rbd_info['rbd_name'] != rbd_name:
                            temp_rbd_backup_list.append(rbd_info)
                    backup_rbd_info_list = temp_rbd_backup_list
                else:
                    log.info("%s is completed, " % result['task_name'])
                    print("  snapshot of %s/%s completed." % (pool_name, rbd_name))

                    # just set default snapshot info
                    created_snapshot_info = {'id': None,
                                             'size': None,
                                             'name': created_snapshot_name,
                                             'datetime': created_datetime}
                    if ceph.open_ioctx(pool_name):
                        snapshot_info = ceph.get_snap_info(rbd_name, created_snapshot_name)
                        if snapshot_info == False:
                            log.warning("Unable to get snapshot info")
                        else:
                            # replace with detail info
                            created_snapshot_info = snapshot_info
                    else:
                        log.error("Unable to open ioctx of pool '%s'." % pool_name)

                    log.info("Update backup snapshot info metadata.")
                    backup_meta.add_backup_snapshot_info(pool_name, rbd_name, created_snapshot_info)

            except Exception as e:
                log.error("Unable to verify snapshot create task. %s" % e)
                continue

        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
        # todo: after create new snapshot, we may sort again the rbd_backup list
        # base on the size of the new created snapshot. (if supported to get used size)
        #
        #list_sort = RBD_Backup_List_Sort(log)
        #sorted_rbd_backup_list = list_sort.sort_by_snap_size(backup_rbd_info_list)


        # Create and submit export tasks
        # ----------------------------------------------------------
        log.info("________ Create and submit RBD export task ________")
        print("- start RBD export procedure.")

        for rbd_info in backup_rbd_info_list:
            pool_name, rbd_name, rbd_size, rbd_snap = unpack_rbd_info(rbd_info)
            log.info("Creating RBD export task of '%s/%s'." % (pool_name,
                                                               rbd_name))
            rbd_id = convert_rbd_id(pool_name, rbd_name)
            backup_type = rbd_backup_type[rbd_id]

            # get snapshot name form metadata
            meta_snap_info_list = backup_meta.get_backup_snapshot_list(pool_name, rbd_name)
            if len(meta_snap_info_list) == 0:
                log.warning("No last snapshot found.")
                continue
            new_created_snap_info = meta_snap_info_list[-1]  # from this backup
            new_created_snap_name = new_created_snap_info['name']

            log.info("Backup type is %s" % backup_type)
            if backup_type == const.EXPORT_TYPE[0]:  # full
                # create the circle dir and get the path
                new_circle_name = new_created_snap_name
                backup_circle_path = directory.create(cluster_backup_path,
                                                      pool_name,
                                                      rbd_name,
                                                      new_circle_name)
                if backup_circle_path == False:
                    log.error("Unable to create RBD backup destination path, " \
                              "skip this RBD export.")
                    continue

                export_task = task_maker.get_rbd_export_full_task(pool_name,
                                                                  rbd_name,
                                                                  new_created_snap_name,
                                                                  backup_circle_path)
            elif backup_type == const.EXPORT_TYPE[1]:  # incr
                # get snapshot created from last backup
                last_created_snap_info = meta_snap_info_list[-2]  # from last backup
                last_created_snap_name = last_created_snap_info['name']

                # get belonging circle dir name
                meta_circle_info_list = backup_meta.get_backup_circle_list(pool_name, rbd_name)
                meta_last_circle_info = meta_circle_info_list[-1]
                meta_last_circle_path = meta_last_circle_info['path']

                # get the circle dir path
                backup_circle_path = os.path.join(cluster_backup_path,
                                                  pool_name,
                                                  rbd_name,
                                                  meta_last_circle_path)

                export_task = task_maker.get_rbd_export_diff_task(pool_name,
                                                                  rbd_name,
                                                                  new_created_snap_name,
                                                                  last_created_snap_name,
                                                                  backup_circle_path)
            else:
                log.warning("Unknown backup type '%s'. skip." % rbd_backup_type[rbd_id])
                continue

            if export_task != False:
                log.info("Submit RBD export task '%s'" % export_task)
                if manager.add_task(export_task):
                    submitted_rbd_export_task_count += 1
                    print("  export %s/%s" % (pool_name, rbd_name))
            else:
                log.error("Unable to get RBD export task.")

        log.info("Submitted %s RBD export tasks." % submitted_rbd_export_task_count)

        # verify finished export tasks
        # ----------------------------------------------------------
        log.info("________ Verify finished RBD export task ________")
        backup_list_info = []
        for i in xrange(0, submitted_rbd_export_task_count):
            try:
                finished_task = manager.get_finished_task()
                log.info("Received finished task %s." % finished_task)

                pool_name = finished_task.pool_name
                rbd_name = finished_task.rbd_name
                snap_name = finished_task.snap_name
                created_datetime = finished_task.create_datetime
                circle_dir_name = directory.get_basename(finished_task.dest_path)

                result = finished_task.get_result()
                task_result.add_export_task_result(result)
                log.info("Task Result: ", result)

                if result['return_code'] != 0:
                    log.warning("%s is not completed, " % result['task_name'])
                    print("  export %s/%s failed." % (pool_name, rbd_name))
                    # remove export incompleted file if exist
                    if finished_task.export_type == const.EXPORT_TYPE[0]:
                        directory.delete(finished_task.dest_path)
                    elif finished_task.export_type == const.EXPORT_TYPE[1]:
                        directory.delete(finished_task.dest_filepath)
                else:
                    log.info("%s is completed, " % result['task_name'])
                    print("  export of %s/%s completed." % (pool_name, rbd_name))
                    if finished_task.export_type == const.EXPORT_TYPE[0]:
                        log.info("Update backup circle info metadata.")
                        circle_info =  {'backup_name': backup_name,
                                        'name': finished_task.dest_file,
                                        'path': circle_dir_name,
                                        'datetime': created_datetime}
                        backup_meta.add_backup_circle_info(pool_name, rbd_name, circle_info)
                    elif finished_task.export_type == const.EXPORT_TYPE[1]:
                        log.info("Update incremental backup info metadata.")
                        incr_info = {'backup_name': backup_name,
                                     'name': finished_task.dest_file,
                                     'from': finished_task.from_snap,
                                     'to': finished_task.snap_name,
                                     'datetime': created_datetime}
                        backup_meta.add_backup_incremental_info(pool_name,
                                                                rbd_name,
                                                                circle_dir_name,
                                                                incr_info)
                backup_list_info.append((result['return_code'],
                                         pool_name,
                                         rbd_name,
                                         circle_dir_name,
                                         snap_name))
            except Exception as e:
                log.error("Unable to verify export task. %s" % e)
                continue

            log.info("Update backup export info metadata.")
            backup_meta.add_backup_info(backup_name, backup_list_info)

        # remove exceed snapshot
        # ----------------------------------------------------------
        log.info("________ Delete exceed RBD snapshots ________")
        print("- check exceed RBD snapshot.")

        # reduce number of worker to 1 only for sequence exec of snapshot delete task
        stop_worker_count = int(cfg.backup_concurrent_worker_count) - 1
        if stop_worker_count != 0:
            manager.stop_worker(count=stop_worker_count)

        for rbd_info in backup_rbd_info_list:
            pool_name, rbd_name, rbd_size, rbd_snap = unpack_rbd_info(rbd_info)
            log.info("Check snapshots of RBD '%s/%s'." % (pool_name,
                                                          rbd_name))

            max_snap_count = backup_option.get_snapshot_retain_count(pool_name,
                                                                     rbd_name)
            if max_snap_count == False:
                snap_retain_count = cfg.backup_snapshot_retain_count
            else:
                snap_retain_count = max_snap_count

            # retrieve snapshot name to a list
            ceph_snap_name_list = [i['name'] for i in rbd_snap]

            matched_snapshot_naem_list = []
            meta_snap_info_list = backup_meta.get_backup_snapshot_list(pool_name, rbd_name)

            for meta_snap_info in meta_snap_info_list:
                meta_snap_name = meta_snap_info['name']
                if meta_snap_name in ceph_snap_name_list:
                    matched_snapshot_naem_list.append(meta_snap_name)

            # do a trick to correct count of matched snapshot name list
            # add one more count for the new created snapshot before.
            matched_snapshot_count = (len(matched_snapshot_naem_list)+1)

            # create snapshot delete task.
            diff_count = (matched_snapshot_count - int(snap_retain_count))
            if diff_count > 0:
                log.info("%s exceed snapshot to be deleted." % diff_count)
                for i in range(0, diff_count):

                    snap_name = matched_snapshot_naem_list[i]    # get snap name for matched snapshot name
                    snap_delete_task = task_maker.get_rbd_snapshot_delete_task(pool_name,
                                                                               rbd_name,
                                                                               snap_name)
                    if snap_create_task != False:
                        log.info("Submit SnapshotDeleteTask '%s'" % snap_delete_task)

                        if manager.add_task(snap_delete_task):

                            # check result after submit the task
                            finished_task = manager.get_finished_task()
                            log.info("%s is completed." % (finished_task))
                            pool_name = finished_task.pool_name
                            rbd_name = finished_task.rbd_name
                            deleted_snap_name = finished_task.snap_name

                            result = finished_task.get_result()
                            task_result.add_snapshort_delete_result(result)
                            log.info("Task Result: ", result)

                            # mark deleted snapshot,
                            if result['return_code'] != 0:
                                log.error("%s is not completed." % result['task_name'])
                                continue
                            else:
                                log.info("Update backup snapshot info metadata.")
                                meta_snap_info_list = backup_meta.get_backup_snapshot_list(pool_name,
                                                                                           rbd_name)
                                for meta_snap_info in meta_snap_info_list:
                                    if meta_snap_info['name'] == deleted_snap_name:
                                        backup_meta.del_backup_snapshot_info(pool_name,
                                                                             rbd_name,
                                                                             meta_snap_info,
                                                                             key='name')
                                        break

                                print("  delete snapshot %s of %s/%s" % (snap_name,
                                                                         pool_name,
                                                                         rbd_name))
                                submitted_snap_delete_task_count += 1
                    else:
                        log.error("Unable to get RBD snapshot delete task.")
            else:
                log.info("No snapshot to be deleted.")

        log.info("Total deleted %s RBD snapshots." % submitted_snap_delete_task_count)

        # remove exceed backup circle
        # ----------------------------------------------------------
        log.info("________ Delete exceed backup circle ________")
        print("- check execeed RBD backup circle.")

        for rbd_info in backup_rbd_info_list:
            try:
                pool_name, rbd_name, rbd_size, rbd_snap = unpack_rbd_info(rbd_info)
                log.info("Check backup circle of RBD '%s/%s'." % (pool_name,
                                                                  rbd_name))

                max_circ_count = backup_option.get_backup_circle_retain_count(pool_name,
                                                                              rbd_name)
                if max_circ_count == False:
                    circle_retain_count = cfg.backup_circle_retain_count
                else:
                    circle_retain_count = max_circ_count

                backup_circle_dir_list = directory.get_dir_list(cluster_backup_path,
                                                                pool_name,
                                                                rbd_name)
                meta_circle_info_list = backup_meta.get_backup_circle_list(pool_name,
                                                                           rbd_name)

                circle_counter = 0
                matched_circle_dir_list = []
                for meta_circle_info in meta_circle_info_list:
                    if meta_circle_info['path'] in backup_circle_dir_list:
                        matched_circle_dir_list.append(meta_circle_info)
                    else:
                        log.warning("Missing circle directory '%s'." % meta_circle_info['path'])

                matched_circle_dir_count = len(matched_circle_dir_list)

                log.info("%s matched backup circle, " \
                         "%s backup circle to retain." %
                         (matched_circle_dir_count, circle_retain_count))

                diff_count = (matched_circle_dir_count - int(circle_retain_count))
                if diff_count <= 0:
                    log.info("No backup circle to be deleted.")
                    continue

                log.info("%s exceed backup circle to be deleted." % diff_count)
                for i in range(0, diff_count):
                    delete_backup_circle_info = matched_circle_dir_list[i]
                    circle_path = delete_backup_circle_info['path']
                    circle_name = delete_backup_circle_info['name']
                    log.info("Delete backup circle dir '%s'." % circle_path)
                    delete_circle_path = directory.delete(cluster_backup_path,
                                                          pool_name,
                                                          rbd_name,
                                                          circle_path)
                    if delete_circle_path == False:
                        log.warning("Unable to delete the backup circle dir.")
                        continue

                    log.info("Update backup circle info metadata.")
                    backup_meta.del_backup_circle_info(pool_name,
                                                       rbd_name,
                                                       delete_backup_circle_info,
                                                       key='name')
                    log.info("Update incremental backup info metadata.")
                    backup_meta.del_backup_incremental_info(pool_name,
                                                            rbd_name,
                                                            circle_name)

                    print("  delete backup circle %s of %s/%s" % (circle_name,
                                                                  pool_name,
                                                                  rbd_name))
                    task_result.add_backup_circle_delete_result(delete_circle_path)
                    backup_circle_delete_count += 1
            except Exception as e:
                log.error("Unable to complete delete of exceed backup circle. %s" % e)
                continue

        log.info("Total deleted %s backup circle directory." % backup_circle_delete_count)


        # finalize RBD backup
        # ----------------------------------------------------------
        log.info("________ Finalize RBD backup ________")

        task_result.write_to_file(backup_name)

        manager.stop()
        ceph.disconnect_cluster()

        begin_backup_timestamp = get_timestamp(begin_backup_datetime)
        log.info("******** Ceph RBD backup complete ********",
                 "use %s seconds " % get_elapsed_time(begin_backup_timestamp))


    except Exception as e:

        exc_type,exc_value,exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)

        if manager != None:
            manager.stop()
        if ceph  != None:
            ceph.disconnect_cluster()

        sys.exit(2)
Пример #25
0
def stats(ctx):
    """ Show some directory stats """
    directory = Directory(ctx.obj['config'].path('data:path',
                                                 'directory.json'))
    directory.stats()