def main():
    LogHelper.start_logging("logdownloader.log")
    parser = argparse.ArgumentParser(
        description="AWS bootstrapper log downloader" +
        "Downloads instances logs from AWS S3")
    parser.add_argument(
        "--manifestPath",
        help=
        "path to a manifest file describing the jobs and data requirements for the application",
        required=True)
    parser.add_argument("--outputPath",
                        help="directory to where instance logs will be copied",
                        required=True)

    try:
        args = vars(parser.parse_args())
        manifestPath = os.path.abspath(args["manifestPath"])
        outputdir = os.path.abspath(args["outputPath"])
        s3 = boto3.resource('s3')
        app = Application(s3, manifestPath, outputdir)
        app.downloadLogs(outputdir)

    except Exception as ex:
        logging.exception("error in log downloader")
        sys.exit(1)
Beispiel #2
0
 def uploadInstanceLog(self, instanceId):
     """
     uploads the log file to the s3 key for the specified instance
     """
     s3DocumentName = LogHelper.instanceLogFileName(instanceId)
     instanceLogPath = LogHelper.instanceLogPath(
         self.s3Interface.localTempDir, instanceId)
     self.uploadInstanceData(instanceId, s3DocumentName, instanceLogPath,
                             False)
Beispiel #3
0
 def removefrombuffer(self, hash, backup_group):
     path = self.buffer_path_from_hash(hash, backup_group)
     try:
         os.remove(path)
     except Exception as e:
         log_helper = LogHelper()
         log = log_helper.getLogger()
         log.warn(
             {
                 'action': 'Could not remove File from Buffer',
                 'hash': hash,
                 'exception_type': type(e),
                 'exception': e
             },
             exc_info=True)
Beispiel #4
0
    def request(self):
        """
        请求短信websevices
        :return:
        """
        log = LogHelper()
        try:
            webservice = httplib.HTTP(self.Url)
            webservice.putrequest("POST", self.MethodName)
            webservice.putheader("Host", self.Host)
            webservice.putheader("Content-type", self.ContentType)
            webservice.putheader("Content-length",
                                 "%d" % len(self.SoapMessage))
            webservice.putheader("SOAPAction", self.SOAPAction)
            webservice.endheaders()
            webservice.send(self.SoapMessage)

            print(self.SoapMessage)
            log.info(self.SoapMessage)
            statuscode, statusmessage, header = webservice.getreply()

            print("Response: ", statuscode, statusmessage)
            # print "headers: "
            # print header
            res = webservice.getfile().read()
            log.info(res)
            print(res.decode('utf-8'))
            try:
                dom = minidom.parseString(res)
                nodes = dom.documentElement.getElementsByTagName(
                    'SendSmsSaveLogResult')
                if nodes is None or nodes[0] is None:
                    return "发送失败".decode('utf-8')
                else:
                    return nodes[0].firstChild.data
            except BaseException as ex:
                log.error("解析返回结果出错【%s】" % ex)
                print(ex)
                return "发送失败".decode('utf-8')
        except BaseException as ex:
            log.error("请求短信发送接口出错【%s】" % ex)
            print(ex)
            return "发送失败".decode('utf-8')
Beispiel #5
0
 def sendsms(self, mobile, content):
     if mobile is None or mobile is '':
         msg = "手机号为空!!!".decode('utf-8')
         LogHelper().info(msg)
         print(msg)
         return
     if content is None or content is '':
         msg = "短信内容为空!!!".decode('utf-8')
         LogHelper().info(msg)
         print(msg)
         return
     # print("手机号:%s\n短信内容:%s\n" % (mobile,content))
     SOAPMessage = self.RequestXml % (mobile, content, self.SmsSource,
                                      self.SupplierID, self.IsVoiceSms)
     # print(SOAPMessage)
     # print("\n")
     ws = WsRequest(SOAPMessage)
     result = ws.request()
     print(result)
Beispiel #6
0
def main():

    LogHelper.start_logging("uploader.log")
    parser = argparse.ArgumentParser(
        description="AWS bootstrapper uploader" +
                    "Uploades manifest and LocalToAWS documents described in manifest to S3")

    parser.add_argument("--manifestPath", help = "path to a manifest file describing the jobs and data requirements for the application", required=True)
    parser.add_argument("--localWorkingDir", help = "path to dir with read/write for processing data for upload and download to AWS S3", required=True)
    try:
        args = vars(parser.parse_args())
        manifestPath = os.path.abspath(args["manifestPath"])
        localWorkingDir = os.path.abspath(args["localWorkingDir"])

        s3 = boto3.resource('s3')

        app = Application(s3, manifestPath, localWorkingDir)
        app.uploadS3Documents()
    except Exception as ex:
        logging.exception("error in launcher")
        sys.exit(1)
Beispiel #7
0
def main():

    LogHelper.start_logging("launcher.log")
    parser = argparse.ArgumentParser(
        description="AWS bootstrapper launcher" +
        "Uploades manifest which contains data and commands to run on specified instances,"
        + "initializes and launches instances with jobs specified in manifest")

    parser.add_argument(
        "--manifestPath",
        help=
        "path to a manifest file describing the jobs and data requirements for the application",
        required=True)
    parser.add_argument(
        "--instanceConfigPath",
        help="file path to a json file with EC2 instance configuration",
        required=True)
    parser.add_argument(
        "--localWorkingDir",
        help=
        "path to dir with read/write for processing data for upload and download to AWS S3",
        required=True)
    try:
        args = vars(parser.parse_args())
        manifestPath = os.path.abspath(args["manifestPath"])
        instanceConfigPath = os.path.abspath(args["instanceConfigPath"])
        localWorkingDir = os.path.abspath(args["localWorkingDir"])
        instanceConfig = {}
        with open(instanceConfigPath) as f:
            instanceConfig = json.load(f)

        s3 = boto3.resource('s3')
        ec2 = boto3.resource('ec2',
                             region_name=instanceConfig["EC2Config"]["Region"])

        app = Application(s3, manifestPath, localWorkingDir)
        app.runInstances(ec2, instanceConfig)
    except Exception as ex:
        logging.exception("error in launcher")
        sys.exit(1)
Beispiel #8
0
def main():

    LogHelper.start_logging("downloader.log")
    parser = argparse.ArgumentParser(
        description="AWS bootstrapper downloader: " +
        "Downloads AWSToLocal documents as specified in manifest, " +
        "or optionally the specified AWSToLocal documentName")

    parser.add_argument(
        "--manifestPath",
        help=
        "path to a manifest file describing the jobs and data requirements for the application",
        required=True)
    parser.add_argument(
        "--localWorkingDir",
        help=
        "path to dir with read/write for processing data for upload and download to AWS S3",
        required=True)
    parser.add_argument(
        "--documentName",
        help=
        "optional name of document to download (if unspecified all 'AWSToLocal' documents are downloaded)",
        required=False)
    try:
        args = vars(parser.parse_args())
        manifestPath = os.path.abspath(args["manifestPath"])
        localWorkingDir = os.path.abspath(args["localWorkingDir"])

        s3 = boto3.resource('s3')

        app = Application(s3, manifestPath, localWorkingDir)

        if "documentName" in args and not args["documentName"] is None:
            app.downloadS3Document(args["documentName"])
        else:
            app.downloadS3Documents()

    except Exception as ex:
        logging.exception("error in downloader")
        sys.exit(1)
Beispiel #9
0
from loghelper import LogHelper
from tracehelper.tracehelper import Trace
from matplotlib import pyplot as plt


if __name__ == '__main__':
    # Initial log
    LOG = LogHelper.AppLog().LOGGER
    LOG.debug("init successful")
    trace = Trace([r'./test_data/a.log', r'./test_data/b.log', r'./test_data/c.log'])
    # trace = Trace([r'./test_data/c.log'])

    ###############################
    # develop
    _select = 'actimas/phs'

    my_list = trace.normalize_controller_log_dict(_select, suggestion=True)
    plt.plot(my_list)
    plt.show()
Beispiel #10
0
class ScanFiles:
    backup_group = 1
    run_id = -1
    log_helper = LogHelper()
    log = log_helper.getLogger()
    db_helper = DBHelper()
    db_data = db_helper.getDictCursor()
    cursor = db_data["cursor"]
    file_helper = FileHelper()
    file_filter = []
    dir_filter = []

    def __init__(self, backup_group_id):
        self.backup_group = backup_group_id
        self.create_run()
        self.load_filters()

    def load_filters(self):
        cursor = self.cursor
        sql_loadfilefilter = 'Select expression from FILTERS ' \
                             'where (BACKUPGROUP_ID = %s OR BACKUPGROUP_ID is null) ' \
                             'and file = 1'
        sql_loaddirfilter = 'Select expression from FILTERS ' \
                            'where (BACKUPGROUP_ID = %s OR BACKUPGROUP_ID is null) ' \
                            'and dir = 1'
        try:
            cursor.execute(sql_loaddirfilter, (self.backup_group))
            result = cursor.fetchall()
            self.dir_filter = self.compile_filters(result)

            cursor.execute(sql_loadfilefilter, (self.backup_group))
            result = cursor.fetchall()
            self.file_filter = self.compile_filters(result)

        except Exception as e:
            print("Exception")  # sql error
            print(e)
            tb = e.__traceback__
            traceback.print_tb(tb)

    def compile_filters(self, result_set):
        result = []
        for data in result_set:
            raw_filter = '^(?=.*' + data["expression"].replace('*',
                                                               '(.*)') + ').*'
            print(raw_filter)
            filter = re.compile(raw_filter)
            result.append(filter)
        return result

    def check_filter(self, filters, path):
        for filter in filters:
            match = filter.match(path)
            if match:
                return True
        return False

    def create_run(self):
        cursor = self.cursor

        sql = "INSERT INTO RUNS (BACKUPGROUP_ID, TIME_STARTED) VALUES (%s, CURRENT_TIMESTAMP)"
        try:
            cursor.execute(sql, (self.backup_group))
            self.run_id = cursor.lastrowid

            self.log.info({
                'action': 'Create Run_ID',
                'run_id': self.run_id,
                'backup_group': self.backup_group
            })
        except Exception as e:
            print("Exception")  # sql error
            print(e)
            tb = e.__traceback__
            traceback.print_tb(tb)

    def scan_for_files(self):
        cursor = self.cursor

        sql_insert_file = 'INSERT IGNORE INTO FILES (backupgroup_id, path, path_hash) ' \
                          'VALUES (%s, %s, md5(concat(%s, "-", %s)))'
        sql_insert_bu = """
        INSERT INTO BACKUPITEMS (RUN_ID, FILE_ID, FILESIZE, LASTMODIFIED, BACKUPGROUP_ID)
        Select %s, id, %s, %s, %s
        from FILES where path_hash = md5(concat(%s, '-', %s))
        """

        dirs = self.get_basedirs(cursor)

        # ---------------- Scan Dirs
        totalfiles = 0
        for dir in dirs:
            filesperdir = 0
            filterdfiles = 0
            started = int(round(time.time() * 1000))
            self.log.info({
                'action': 'Start scanning Dir',
                'run_id': self.run_id,
                'backup_group': self.backup_group,
                'dir': dir['PATH']
            })
            for root, dirs, files in os.walk(dir['PATH']):
                for file in files:
                    filesperdir += 1
                    file_hash = ""

                    if filesperdir % 1000 == 0:
                        cursor = self.new_connection()

                    try:
                        filedata = {}
                        filedata['filepath'] = os.path.join(root, file)
                        filedata['mtime'] = int(
                            round(
                                os.path.getmtime(filedata['filepath']) * 1000))
                        filedata['size'] = os.stat(
                            filedata['filepath']).st_size

                        # file filter
                        filename = self.file_helper.get_filename(
                            filedata['filepath'])
                        if self.check_filter(self.file_filter, filename):
                            print("Filtered (file) out " +
                                  filedata['filepath'] + ' (' + filename + ')')
                            filterdfiles += 1
                            continue

                        # dir filter
                        parent = self.file_helper.get_parent(
                            filedata['filepath'])
                        if self.check_filter(self.dir_filter, parent):
                            print("Filtered (dir) out " +
                                  filedata['filepath'] + ' (' + parent + ')')
                            filterdfiles += 1
                            continue

                        totalfiles += 1
                        with warnings.catch_warnings():
                            warnings.simplefilter("ignore")
                            cursor.execute(
                                sql_insert_file,
                                (self.backup_group, filedata['filepath'],
                                 self.backup_group, filedata['filepath']))
                        cursor.execute(
                            sql_insert_bu,
                            (self.run_id, filedata['size'], filedata['mtime'],
                             self.backup_group, self.backup_group,
                             filedata['filepath']))

                        new_id = cursor.lastrowid

                        affected_rows, file_hash = self.map_unchganged(
                            cursor, filedata, new_id)

                        if affected_rows > 0:
                            self.log.debug({
                                'action': 'Unchanged File',
                                'path': filedata['filepath'],
                                'run_id': self.run_id,
                                'backup_group': self.backup_group,
                                'count': affected_rows
                            })
                        else:
                            file_hash = self.hash_match_or_create_item(
                                cursor, filedata, new_id)

                        if file_hash is not None:

                            buffer_status = self.check_buffer_status(
                                cursor, file_hash)

                            if buffer_status <= 0:

                                self.buffer_file(cursor, filedata, file_hash,
                                                 new_id)
                            else:
                                self.log.debug({
                                    'action': 'File already Buffered',
                                    'path': filedata['filepath'],
                                    'run_id': self.run_id,
                                    'backup_group': self.backup_group,
                                    'hash': file_hash,
                                    'backup item': new_id
                                })

                    except Exception as e:
                        cursor = self.new_connection()
                        print("Exception")  # sql error
                        print(e)
                        tb = e.__traceback__
                        traceback.print_tb(tb)

                    if totalfiles % 10000 == 0:
                        print("%s Files Scanned. Last Scanned: %s" %
                              (totalfiles, filedata))

                    # print(filedata)
            finished = int(round(time.time() * 1000))
            duration = finished - started
            divider = 1
            if filesperdir > 0:
                divider = filesperdir
            per_file = duration / divider
            self.log.info({
                'action': 'End scanning Dir',
                'run_id': self.run_id,
                'backup_group': self.backup_group,
                'dir': dir['PATH'],
                'count': filesperdir,
                'duration': duration,
                'per_file': per_file,
                'filtered': filterdfiles
            })
            cursor = self.new_connection()

        self.log.info({
            'action': 'End scanning Dirs',
            'run_id': self.run_id,
            'backup_group': self.backup_group,
            'count': totalfiles
        })

        # ------------------ SET Hashing Complete
        cursor = self.new_connection()
        sql_sethashingsuccess = 'UPDATE RUNS SET SUCESSFUL = 1 WHERE ID = %s'

        try:
            cursor.execute(sql_sethashingsuccess, (self.run_id))
            self.log.info({
                'action': 'Scanning and Hashing successful',
                'run_id': self.run_id,
                'backup_group': self.backup_group
            })

        except Exception as e:
            print("Exception")  # sql error
            print(e)
            tb = e.__traceback__
            traceback.print_tb(tb)

    def buffer_file(self, cursor, filedata, new_hash, new_id):
        sql_update_buffer_status = "Update ITEMS Set BUFFER_STATUS=%s where hash = %s and backupgroup_id = %s"
        sql_check_hash_exists = "select count(*) as count, max(id) as item_id from ITEMS where hash = %s and backupgroup_id = %s"
        sql_updatebuitem = 'update BACKUPITEMS  set item_id  = %s, hash = %s where id = %s '
        # Build Target Path
        bufferpath = self.file_helper.buffer_path_from_hash(
            new_hash, self.backup_group)
        self.file_helper.create_parent_if_not_exist(bufferpath)
        # Copy File
        self.file_helper.copy_file(filedata['filepath'], bufferpath)
        # Validate Hash
        tgt_hash = self.file_helper.hash_file(bufferpath)
        if tgt_hash == new_hash:
            # Set Bufferstatus to 1
            cursor.execute(sql_update_buffer_status,
                           (1, new_hash, self.backup_group))
            self.log.info({
                'action': 'File Buffered Successfully',
                'path': filedata['filepath'],
                'run_id': self.run_id,
                'backup_group': self.backup_group,
                'hash': new_hash,
                'backup item': new_id
            })

        else:
            # hash original again
            src_hash = self.file_helper.hash_file(filedata['filepath'])

            if src_hash != tgt_hash:
                # delete target and  set buffer code to -1
                self.file_helper.delete_file(bufferpath)
                cursor.execute(sql_update_buffer_status,
                               (-1, new_hash, self.backup_group))
                self.log.info({
                    'action': 'Could not Buffer: Fast Changing',
                    'path': filedata['filepath'],
                    'run_id': self.run_id,
                    'backup_group': self.backup_group,
                    'hash': new_hash,
                    'backup item': new_id
                })
            else:
                # Check if entry for new Hash exists
                cursor.execute(sql_check_hash_exists,
                               (tgt_hash, self.backup_group))
                rs2 = cursor.fetchone()
                if rs2["count"] == 0:
                    # set orig Item Entry to -2
                    cursor.execute(sql_update_buffer_status,
                                   (-2, new_hash, self.backup_group))
                    # create items entry
                    sql_insertitems = "Insert into ITEMS(backupgroup_id, hash, filesize) VALUES (%s, %s, %s)"
                    cursor.execute(sql_insertitems,
                                   (self.backup_group, tgt_hash,
                                    os.stat(bufferpath).st_size))
                    # move file
                    tgtpath2 = self.file_helper.buffer_path_from_hash(
                        tgt_hash, self.backup_group)
                    self.file_helper.create_parent_if_not_exist(tgtpath2)
                    self.file_helper.move_file(bufferpath, tgtpath2)
                    moved_hash = self.file_helper.hash_file(tgtpath2)
                    if tgt_hash == moved_hash:
                        # update BUI with new item and set buffer_status = 1
                        cursor.execute(sql_updatebuitem,
                                       (rs2["item_id"], tgt_hash, new_id))
                        cursor.execute(sql_update_buffer_status,
                                       (1, tgt_hash, self.backup_group))
                        self.log.info({
                            'action':
                            'File Buffered Successfully but in Changed Version',
                            'path': filedata['filepath'],
                            'run_id': self.run_id,
                            'backup_group': self.backup_group,
                            'hash': tgt_hash,
                            'old hash': new_hash,
                            'backup item': new_id
                        })
                    else:
                        # Delete file and update  item bufferstatus -4
                        self.file_helper.delete_file(tgtpath2)
                        cursor.execute(sql_update_buffer_status,
                                       (-4, new_hash, self.backup_group))
                        self.log.info({
                            'action':
                            'Could not Buffer: Changed and Fast Changing',
                            'path': filedata['filepath'],
                            'run_id': self.run_id,
                            'backup_group': self.backup_group,
                            'hash': new_hash,
                            'backup item': new_id
                        })
                else:
                    buffer_status = self.check_buffer_status(tgt_hash)
                    if buffer_status > 0:
                        # delete target and change bui entry
                        self.file_helper.delete_file(bufferpath)
                        cursor.execute(sql_updatebuitem,
                                       (rs2["item_id"], tgt_hash, new_id))
                        cursor.execute(sql_update_buffer_status,
                                       (1, tgt_hash, self.backup_group))
                        self.log.info({
                            'action':
                            'File Buffered Successfully Changed Version already in Buffer',
                            'path': filedata['filepath'],
                            'run_id': self.run_id,
                            'backup_group': self.backup_group,
                            'hash': tgt_hash,
                            'old hash': new_hash,
                            'backup item': new_id
                        })
                    else:
                        # move target
                        tgtpath2 = self.file_helper.buffer_path_from_hash(
                            tgt_hash, self.backup_group)
                        self.file_helper.create_parent_if_not_exist(tgtpath2)
                        self.file_helper.move_file(bufferpath, tgtpath2)
                        moved_hash = self.file_helper.hash_file(tgtpath2)
                        # validate new target
                        if tgt_hash == moved_hash:
                            cursor.execute(sql_updatebuitem,
                                           (rs2["item_id"], tgt_hash, new_id))
                            self.log.info({
                                'action':
                                'File Buffered Successfully Changed Version in existing Item',
                                'path': filedata['filepath'],
                                'run_id': self.run_id,
                                'backup_group': self.backup_group,
                                'hash': tgt_hash,
                                'old hash': new_hash,
                                'backup item': new_id
                            })
                        else:
                            # Delete target and set buffer status -3
                            self.file_helper.delete_file(tgtpath2)
                            cursor.execute(sql_update_buffer_status,
                                           (-3, new_hash, self.backup_group))
                            self.log.info({
                                'action':
                                'Could not Buffer: Fast Changing in existing item',
                                'path': filedata['filepath'],
                                'run_id': self.run_id,
                                'backup_group': self.backup_group,
                                'hash': new_hash,
                                'backup item': new_id
                            })

    def check_buffer_status(self, cursor, new_hash):
        sql_check_buffer_status = "SELECT BUFFER_STATUS FROM ITEMS I where hash = %s and backupgroup_id = %s"
        # print('[%s | %s]' % (new_hash, self.backup_group))
        cursor.execute(sql_check_buffer_status, (new_hash, self.backup_group))
        rs = cursor.fetchone()
        buffer_status = rs["BUFFER_STATUS"]
        return buffer_status

    def hash_match_or_create_item(self, cursor, filedata, new_id):
        sql_insertitems = "Insert into ITEMS(backupgroup_id, hash, filesize) VALUES (%s, %s, %s)"
        # set hash and create item where necesarry                            #
        sql_sethash = 'UPDATE BACKUPITEMS SET HASH = %s WHERE id = %s'
        new_hash = self.file_helper.hash_file(filedata['filepath'])
        if new_hash is None:
            self.log.warn({
                'action': 'Could not hash',
                'path': filedata['filepath'],
                'run_id': self.run_id,
                'backup_group': self.backup_group,
            })
            return new_hash
        cursor.execute(sql_sethash, (new_hash, new_id))
        sql_matchwithitems = """
                                     UPDATE BACKUPITEMS t
                                     inner join BACKUPITEMS b
                                     on t.id = b.id
                                     inner join ITEMS i
                                     on i.hash = b.hash
                                     SET b.ITEM_ID = i.id
                                     where b.id = %s and i.backupgroup_id = %s
                                 """
        matched = cursor.execute(sql_matchwithitems,
                                 (new_id, self.backup_group))
        if matched == 0:

            inserted = cursor.execute(
                sql_insertitems,
                (self.backup_group, new_hash, filedata['size']))
            matched = cursor.execute(sql_matchwithitems,
                                     (new_id, self.backup_group))
        else:
            self.log.info({
                'action': 'File Unchanged',
                'path': filedata['filepath'],
                'run_id': self.run_id,
                'backup_group': self.backup_group,
                'count': matched,
                'hash': new_hash
            })
        return new_hash

    def map_unchganged(self, cursor, filedata, new_id):
        # check if file is unchanges
        sql_updateunchanged = """
                                           Update BACKUPITEMS t
                                           inner join
                                           BACKUPITEMS as n
                                           on  t.id = n.id
                                           inner join BACKUPITEMS as c
                                           on c.file_id = n.file_id and c.FILESIZE = n.FILESIZE
                                           and c.lastmodified = n.lastmodified
                                           inner join (select max(id) as id from BACKUPITEMS
                                           where file_id =
                                              (Select id from FILES where path_hash = md5(concat(%s, '-', %s)))
                                           and hash is not null) x
                                           on c.id = x.id
                                           SET t.item_id = c.item_id, t.hash=c.hash
                                           where n.id = %s
                                       """
        sql_gethash = "select hash from BACKUPITEMS as b where b.id = %s"
        affected_rows = cursor.execute(
            sql_updateunchanged,
            (self.backup_group, filedata['filepath'], new_id))
        mapped_hash = None
        if affected_rows > 0:
            cursor.execute(sql_gethash, new_id)
            rs = cursor.fetchone()
            mapped_hash = rs["hash"]
        return affected_rows, mapped_hash

    def get_basedirs(self, cursor):
        sql_dirs = 'Select PATH from DIRECTORY where BACKUPGROUP_ID = %s'
        # ---------------- Get Rlevant Base Dirs
        try:
            cursor.execute(sql_dirs, (self.backup_group))
            dirs = cursor.fetchall()
        except Exception as e:
            print("Exception")  # sql error
            print(e)
            tb = e.__traceback__
            traceback.print_tb(tb)
        return dirs

    def new_connection(self):
        self.db_helper.close(self.db_data)
        self.db_data = self.db_helper.getDictCursor()
        self.cursor = self.db_data["cursor"]
        return self.cursor
Beispiel #11
0
def main():
    # get db cursor
    db_helper = DBHelper()
    file_helper = FileHelper()
    log_helper = LogHelper()
    log = log_helper.getLogger()
    db_data = db_helper.getDictCursor()
    cursor = db_data["cursor"]

    log.info({
        'action': 'Restore started',
        'BMU_PATH_SEARCH': os.getenv('BMU_PATH_SEARCH'),
        'BMU_PATH_REPLACE': os.getenv('BMU_PATH_REPLACE'),
        'BMU_PATH_RUNID': os.getenv('BMU_PATH_RUNID'),
        'BMU_PATH_DELIM': os.getenv('BMU_PATH_DELIM'),
        'BMU_PATH_DEPTH': os.getenv('BMU_PATH_DEPTH'),
        'BMU_PATH_SELECT': os.getenv('BMU_PATH_SELECT')
    })

    sql = """
        select REPLACE(PATH, '%s', '%s') AS PATH, d.NAME as DRIVE, FILESIZE, i.HASH from BACKUPITEMS b
        inner join ITEMS i
        on b.item_id = i.id
        inner join DRIVES d
        on COALESCE(DRIVE1_ID, DRIVE2_ID) = d.ID
        where b.run_id = %s
        and SUBSTRING_INDEX(path,'%s',%s) = '%s'
        order by COALESCE(DRIVE1_ID, DRIVE2_ID) asc, filesize desc
    """ % (os.getenv('BMU_PATH_SEARCH'), os.getenv('BMU_PATH_REPLACE'),
           os.getenv('BMU_PATH_RUNID'), os.getenv('BMU_PATH_DELIM'),
           os.getenv('BMU_PATH_DEPTH'), os.getenv('BMU_PATH_SELECT'))
    print(sql)
    cursor.execute(sql)
    files_to_restore = cursor.fetchall()

    count = 0
    errors = ""
    error_list = []
    for file_to_restore in files_to_restore:
        # print(file_to_restore)
        unescaped_path = file_to_restore['PATH'].replace('\\\\', '\\')
        # dirty hack: adds second backslash if path starts with backslash
        if str.startswith(unescaped_path, '\\'):
            unescaped_path = '\\' + unescaped_path
        file_to_restore['PATH'] = unescaped_path
        tgt = file_to_restore['PATH']
        src = file_helper.path_from_hash(os.getenv('BMU_INT_ROOT'),
                                         file_to_restore['DRIVE'],
                                         file_to_restore['HASH'])
        if not file_helper.file_exists(tgt):
            while not file_helper.file_exists(src):
                print("Missing: " + src)
                input("Press Enter to continue...")
            if file_helper.file_exists(src):
                try:
                    file_helper.create_parent_if_not_exist(tgt)
                    file_helper.copy_file(src, tgt)
                except Exception as e:
                    print("Exception")  # sql error
                    print(e)
                    tb = e.__traceback__
                    traceback.print_tb(tb)
                    errors += "Could not Copy " + src + " to " + tgt + ": " + str(
                        e)
                    error_list.append({
                        "source": src,
                        "target": tgt,
                        "exception": str(e)
                    })
                count += 1
                print(tgt + " sucessfully restored [" + str(count) + "]")
        else:
            print(tgt + "allready exists, skipping")
        if count % 1000 == 0:
            log.info({
                'action': 'Restore finished',
                'BMU_PATH_SELECT': os.getenv('BMU_PATH_SELECT'),
                'BMU_PATH_RUNID': os.getenv('BMU_PATH_RUNID'),
                'count': count,
                'total': len(files_to_restore)
            })

    log.info({
        'action': 'Restore finished',
        'BMU_PATH_SEARCH': os.getenv('BMU_PATH_SEARCH'),
        'BMU_PATH_REPLACE': os.getenv('BMU_PATH_REPLACE'),
        'BMU_PATH_RUNID': os.getenv('BMU_PATH_RUNID'),
        'BMU_PATH_DELIM': os.getenv('BMU_PATH_DELIM'),
        'BMU_PATH_DEPTH': os.getenv('BMU_PATH_DEPTH'),
        'BMU_PATH_SELECT': os.getenv('BMU_PATH_SELECT'),
        'count': count,
        'errors': error_list
    })
Beispiel #12
0
 def downloadInstanceLog(self, instanceId, localDir):
     s3DocumentName = LogHelper.instanceLogFileName(instanceId)
     self.downloadInstanceData(instanceId, s3DocumentName,
                               os.path.join(localDir, s3DocumentName))
Beispiel #13
0
        # print("手机号:%s\n短信内容:%s\n" % (mobile,content))
        SOAPMessage = self.RequestXml % (mobile, content, self.SmsSource,
                                         self.SupplierID, self.IsVoiceSms)
        # print(SOAPMessage)
        # print("\n")
        ws = WsRequest(SOAPMessage)
        result = ws.request()
        print(result)


if __name__ == "__main__":
    argvLen = len(sys.argv)
    if argvLen < 2:
        msg = "没有设置参数".decode('utf-8')
        # print(msg)
        LogHelper().info(msg)
        sys.exit()
    elif argvLen < 3:
        msg = "参数设置不全".decode('utf-8')
        print(msg)
        LogHelper().info(msg)
        sys.exit()
    else:
        print(sys.argv[1])
        print(sys.argv[2])
    mobiles = sys.argv[1].split(',')  # ['18888888888',]
    if cmp(platform.system(), 'Windows') == 0:
        content = sys.argv[2].decode('gbk').encode('utf-8')
    elif cmp(platform.system(), 'Linux') == 0:
        content = sys.argv[2]
    else:
Beispiel #14
0
def main():
    """to be run on by each instance as a startup command"""
    import argparse, sys
    import boto3
    #from powershell_s3 import powershell_s3
    from s3interface import S3Interface
    from manifest import Manifest
    from instancemanager import InstanceManager
    from instancemetadatafactory import InstanceMetadataFactory
    from loghelper import LogHelper
    parser = argparse.ArgumentParser(
        description="AWS Instance bootstrapper" +
        "Loads manifest which contains data and commands to run on this instance,"
        + "downloads data from S3, runs commands, and uploads results to S3")

    parser.add_argument("--bucketName",
                        help="the name of the S3 bucket to work with",
                        required=True)
    parser.add_argument(
        "--manifestKey",
        help="the key pointing to the manifest file in the s3 bucket",
        required=True)
    parser.add_argument(
        "--instanceId",
        help="the id of this instance as defined in the manifest file",
        required=True)
    parser.add_argument(
        "--localWorkingDir",
        help=
        "a directory to store working files, it will be created if it does not exist on the instance",
        required=True)

    try:
        #boto3.set_stream_logger(name='botocore')
        args = vars(parser.parse_args())
        bootstrapper = None

        bucketName = args["bucketName"]
        manifestKey = args["manifestKey"]
        instanceId = int(args["instanceId"])
        localWorkingDir = args["localWorkingDir"]

        if not os.path.exists(localWorkingDir):
            os.makedirs(localWorkingDir)
        logPath = LogHelper.instanceLogPath(localWorkingDir, instanceId)
        LogHelper.start_logging(logPath)
        logging.info("startup")
        logging.info("creating boto3 s3 resource")
        s3 = boto3.resource('s3')

        logging.info("creating S3Interface")
        s3interface = S3Interface(s3, bucketName, localWorkingDir)

        localManifestPath = os.path.join(localWorkingDir, "manifest.json")
        logging.info("downloading manifest from S3")
        s3interface.downloadFile(manifestKey, localManifestPath)
        manifest = Manifest(localManifestPath)
        metafac = InstanceMetadataFactory(manifest)
        instancemanager = InstanceManager(s3interface, manifest, metafac)
        metadata = instancemanager.downloadMetaData(instanceId)
        bootstrapper = AWSInstanceBootStrapper(instanceId, manifest,
                                               s3interface, instancemanager,
                                               metadata)
        bootstrapper.DownloadS3Documents()
        bootstrapper.RunCommands()
        bootstrapper.UploadS3Documents()
    except Exception as ex:
        logging.exception("error in bootstrapper")
        if bootstrapper is not None:
            bootstrapper.UploadStatus()
        sys.exit(1)
Beispiel #15
0
class BackupFiles:
    drivepathinternal = os.getenv('BMU_INT_ROOT')
    drivepathexternal = os.getenv('BMU_EXT_ROOT')
    log_helper = LogHelper()
    log = log_helper.getLogger()
    db_helper = DBHelper()
    db_data = db_helper.getDictCursor()
    cursor = db_data["cursor"]
    file_helper = FileHelper()

    def __init__(self):
        pass

    def backup_files(self, backupgroup_id, external):
        logger = self.log
        filehelper = self.file_helper
        if external:
            drivepath = self.drivepathexternal
        else:
            drivepath = self.drivepathinternal

        drive_info = self.get_drive(backupgroup_id, external)

        logger.info({
            'action': 'Starting Backuping Files',
            'backup_group': backupgroup_id,
            'external': external,
            'Drive Info': drive_info
        })

        free_disk_space, free_quota = self.get_free_space(
            drive_info, drivepath)
        logger.info({
            'action': 'Free Space',
            'backup_group': backupgroup_id,
            'external': external,
            'Drive Info': drive_info,
            'free_quota': free_quota,
            'free_space': free_disk_space
        })

        if free_disk_space <= 0 or free_quota <= 0:
            logger.warn({
                'action': 'Disk Full, Aborting',
                'backup_group': backupgroup_id,
                'external': external,
                'Drive Info': drive_info,
                'free_quota': free_quota,
                'free_space': free_disk_space
            })
            return drive_info["id"]
        files_to_save = self.get_filestosave(backupgroup_id, external)
        total_files = len(files_to_save)
        files_saved = 0
        logger.info({
            'action': 'Files To backup',
            'backup_group': backupgroup_id,
            'external': external,
            'files_to_backup': total_files
        })
        skip_big = 0
        for file_to_save in files_to_save:
            # # temporaray code for testing
            #
            # if file_to_save["filesize"] > 5000000000:
            #    logger.info("Skipping File to big because of temporary file Size limit 5GB : %s" % file_to_save)
            #    continue
            # # End of Temporary Code
            if free_disk_space < file_to_save[
                    "filesize"] or free_quota < file_to_save["filesize"]:
                logger.info({
                    'action': 'Skipping File to big for remaining Space',
                    'backup_group': backupgroup_id,
                    'external': external,
                    'file_to_backup': file_to_save
                })
                skip_big += 1
                continue
            target = filehelper.path_from_hash(drivepath, drive_info["name"],
                                               file_to_save["hash"])
            source = filehelper.buffer_path_from_hash(file_to_save["hash"],
                                                      backupgroup_id)

            logger.info({
                'action': 'Copying File',
                'backup_group': backupgroup_id,
                'external': external,
                'file_to_backup': file_to_save
            })
            if not filehelper.copy_file(source, target):
                logger.error({
                    'action': 'Copying File',
                    'backup_group': backupgroup_id,
                    'external': external,
                    'file_to_backup': file_to_save,
                    'source': source,
                    'target': target
                })
                self.mark_item(backupgroup_id, file_to_save["hash"], external,
                               -9)
                continue
            hash_tgt = filehelper.hash_file(target)
            if hash_tgt != file_to_save["hash"]:
                logger.error({
                    'action': 'Hash not Matching',
                    'backup_group': backupgroup_id,
                    'external': external,
                    'file_to_backup': file_to_save,
                    'hash_target': hash_tgt,
                    'target': target
                })
                hash_src_new = filehelper.hash_file(source)
                if file_to_save["hash"] == hash_src_new:
                    filehelper.delete_file(target)
                    self.mark_item(backupgroup_id, file_to_save["hash"],
                                   external, -1)
                    logger.error(
                        "File changed during copying from buffer %s : %s != %s"
                        % (target, hash_tgt, hash_src_new))
                    logger.error({
                        'action': 'File changed during copying from buffer',
                        'backup_group': backupgroup_id,
                        'external': external,
                        'file_to_backup': file_to_save,
                        'hash_target': hash_tgt,
                        'target': target,
                        'hash_src_new': hash_src_new
                    })
                    continue
                else:
                    filehelper.delete_file(target)
                    self.mark_item(backupgroup_id, file_to_save["hash"],
                                   external, -2)
                    logger.error({
                        'action':
                        'Buffered File does not produce correct hash',
                        'backup_group': backupgroup_id,
                        'external': external,
                        'file_to_backup': file_to_save,
                        'hash_target': hash_tgt,
                        'target': target,
                        'hash_src_new': hash_src_new
                    })
                    continue
            else:
                self.mark_item(backupgroup_id, file_to_save["hash"], external,
                               drive_info["id"])
                logger.info({
                    'action': 'Backup File Successful',
                    'backup_group': backupgroup_id,
                    'external': external,
                    'file_to_backup': file_to_save,
                    'hash_target': hash_tgt,
                    'target': target
                })
                files_saved += 1

            free_quota = free_quota - file_to_save["filesize"]
            free_disk_space = filehelper.freespace(drivepath)
            logger.info({
                'action': 'Remaining Free Space',
                'backup_group': backupgroup_id,
                'external': external,
                'Drive Info': drive_info,
                'free_quota': free_quota,
                'free_space': free_disk_space
            })
        logger.info({
            'action': 'Finished Backup',
            'backup_group': backupgroup_id,
            'external': external,
            'Drive Info': drive_info,
            'free_quota': free_quota,
            'free_space': free_disk_space,
            'Files_To_Save': total_files,
            'Files_Saved': files_saved
        })
        if skip_big > 0:
            return drive_info["id"]
        else:
            return 0

    def get_filestosave(self, backupgroup_id: int, external: bool):
        cursor = self.cursor
        tracking_field = 'DRIVE1_ID'
        if external:
            tracking_field = 'DRIVE2_ID'
        sql_getfilesforrun = """
        Select i.id as item_id, i.hash as hash,
            i.filesize as filesize,
            i.drive1_id as drive1_id, i.drive2_id as drive2_id, i.buffer_status
            from ITEMS i
            where (i.%s is null or i.%s = 0)
            and i.buffer_status = 1
            and i.backupgroup_id = %s
            order by filesize desc
        """ % (tracking_field, tracking_field, backupgroup_id)

        # print(sql_getfilesforrun)

        try:
            cursor.execute(sql_getfilesforrun)
            files = cursor.fetchall()
            return files

        except Exception as e:
            print("Exception")  # sql error
            print(e)
            tb = e.__traceback__
            traceback.print_tb(tb)

    def get_drive(self, backupgroup_id, external):
        cursor = self.cursor
        sql_getdrive = """SELECT id, name, drivefull, extern, maxsize, drive_id, group_id FROM DRIVES d
            inner join DRIVES_GROUPS dg
            on d.id = dg.drive_id
            where group_id = %s and drivefull = false and extern = %s limit 1
        """ % (backupgroup_id, external)

        try:
            cursor.execute(sql_getdrive)
            result = cursor.fetchone()

            return result

        except Exception as e:
            print("Exception")  # sql error
            print(e)
            tb = e.__traceback__
            traceback.print_tb(tb)
            return {}

    def get_free_space(self, drive_info: dict, drivepath: str):
        filehelper = self.file_helper
        cursor = self.cursor
        disk = filehelper.freespace(drivepath)
        sql_getusedspace = """
        select sum(size) size from (
        select max(filesize) as size, i.hash  from ITEMS i
        where
        i.backupgroup_id = %s and (i.DRIVE1_ID = %s or i.DRIVE2_ID = %s)
        group by i.hash) x
        """ % (drive_info["group_id"], drive_info["id"], drive_info["id"])
        # print(sql_getusedspace)

        try:
            cursor.execute(sql_getusedspace)
            result = cursor.fetchone()
            # print(result)
            if result["size"] is None:
                logical = int(drive_info["maxsize"])
            else:
                logical = int(drive_info["maxsize"]) - int(result["size"])
            return disk, logical

        except Exception as e:
            print("Exception")  # sql error
            print(e)
            tb = e.__traceback__
            traceback.print_tb(tb)
            return disk, 0

    def mark_item(self, bg_id, hash, external, status):
        tracking_field = 'DRIVE1_ID'
        if external:
            tracking_field = 'DRIVE2_ID'
        cursor = self.cursor
        sql_updateitem = 'update ITEMS i set %s = %s where backupgroup_id= %s and hash = "%s" ' % \
                             (tracking_field, status, bg_id, hash)

        try:
            cursor.execute(sql_updateitem)

        except Exception as e:
            print("Exception")  # sql error
            print(e)
            tb = e.__traceback__
            traceback.print_tb(tb)

    def is_hash_known(self, hash, backup_group):
        cursor = self.cursor
        sql_updateitem = 'select id from ITEMS where backupgroup_id = %s and hash = \'%s\'' % \
                         (backup_group, hash)

        try:
            cursor.execute(sql_updateitem)
            data = cursor.fetchall()
            if len(data) == 0:
                return 0
            else:
                return data[0]["id"]

        except Exception as e:
            print("Exception")  # sql error
            print(e)
            tb = e.__traceback__
            traceback.print_tb(tb)
            return 0

    def change_item_in_bui(self, bui_id, item_id, hash):
        cursor = self.cursor
        sql_updatebuitem = 'update BACKUPITEMS  set item_id  = %s, hash = \'%s\' where id = %s ' % \
                           (item_id, hash, bui_id)
        print(sql_updatebuitem)

        try:
            cursor.execute(sql_updatebuitem)

        except Exception as e:
            print("Exception")  # sql error
            print(e)
            tb = e.__traceback__
            traceback.print_tb(tb)

    def create_item(self, bg_id, hash, external, status, size):
        tracking_field = 'DRIVE1_ID'
        sql_insertitem = 'insert into ITEMS (backupgroup_id, hash, %s, filesize) values (%s, \'%s\', %s, %s)' % \
                         (tracking_field, bg_id, hash, status, size)
        if external:
            tracking_field = 'DRIVE2_ID'
            sql_insertitem = 'insert into ITEMS (backupgroup_id, hash, DRIVE1_ID, DRIVE2_ID, filesize) values (%s, \'%s\',  -12, %s, %s)' % \
                             (bg_id, hash, status, size)
        cursor = self.cursor

        try:
            cursor.execute(sql_insertitem)

        except Exception as e:
            print("Exception")  # sql error
            print(e)
            tb = e.__traceback__
            traceback.print_tb(tb)

    def close_finished_runs(self):
        sql_get_finished = """
        Select id, coalesce(x.count, 0) as count from RUNS r
        LEFT OUTER JOIN (
            Select run_id, count(*) as count
            from BACKUPITEMS b
            inner join ITEMS i
            on (b.item_id = i.id)
            where i.DRIVE1_ID < 0 or i.DRIVE2_ID < 0
            group by run_id
        ) x
        on r.id = x.run_id
        where
        (ALL_SAVED IS NULL or ALL_SAVED = 0)
        and
        id not in (
            Select distinct b.run_id as run_id
            from BACKUPITEMS b
            inner join ITEMS i
            on (b.item_id = i.id)
            where ((i.DRIVE1_ID is null or i.DRIVE1_ID = 0) or (i.DRIVE2_ID is null or i.DRIVE2_ID = 0)) )
        """
        sql_update_run = "UPDATE RUNS SET ALL_SAVED = 1, ERRORS_SAVING = %s where ID = %s"

        cursor = self.cursor

        try:
            cursor.execute(sql_get_finished)
            runs = cursor.fetchall()
            logger = self.log
            for run in runs:
                cursor.execute(sql_update_run, (run["count"], run["id"]))
                logger.info("Saved Run %s with %s Errors" %
                            (run["id"], run["count"]))
                logger.info({
                    'action': 'Saved Runs',
                    'run_id': run["id"],
                    'Errors': run["count"]
                })

        except Exception as e:
            print("Exception")  # sql error
            print(e)
            tb = e.__traceback__
            traceback.print_tb(tb)

    def cleanupBuffer(self):
        fh = FileHelper()
        dbh = DBHelper()
        logger = self.log

        sql_savedbuffer = "select * from ITEMS where (DRIVE1_ID > 0  and DRIVE2_ID > 0) and buffer_status = 1 order by id "
        sql_updatebufferstatus = "UPDATE ITEMS SET BUFFER_STATUS = 2 WHERE ID = %s"
        usage = fh.bufferusage()
        print(usage)

        try:
            db = dbh.getDictCursor()
            cursor = db["cursor"]
            cursor.execute(sql_savedbuffer)
            result = cursor.fetchall()

            for file in result:
                if usage <= 0.8:
                    break
                fh.removefrombuffer(file["HASH"], file["BACKUPGROUP_ID"])
                usage = fh.bufferusage()
                cursor.execute(sql_updatebufferstatus, (file["ID"]))
                print("removed %s from buffer for BG %s " %
                      (file["HASH"], file["BACKUPGROUP_ID"]))
                print(usage)
                logger.info({
                    'action': 'Removed from Buffer',
                    'hash': file["HASH"],
                    'bachup_group': file["BACKUPGROUP_ID"],
                    "size": file["FILESIZE"]
                })

        except Exception as e:
            print("Exception")  # sql error
            print(e)
            tb = e.__traceback__
            traceback.print_tb(tb)

    def set_drive_full(self, id):

        cursor = self.cursor
        sql_updateitem = 'update DRIVES set drivefull = 1 where id=%s ' % id

        try:
            cursor.execute(sql_updateitem)

        except Exception as e:
            print("Exception")  # sql error
            print(e)
            tb = e.__traceback__
            traceback.print_tb(tb)