Example #1
0
    def __daemon(self):
        times = 0
        path = os.path.join(config.DATAFILES_DIR, config.DUMP_FILE)

        while(self.loop):
            times += 1
            print u"start regular backup of %d times" % times

            backup = Backup()
            backup.main()

            print "complete.\n%d files were backed up." % backup.numberOfFiles
            backup.numberOfFiles = 0

            if os.path.exists(path):
                f = open(path)
                self.loop = pickle.load(f)
                f.close()
            else:
                self.loop = False

            time.sleep(self.interval)

            if os.path.exists(path):
                f = open(path)
                self.loop = pickle.load(f)
                f.close()
            else:
                self.loop = False
Example #2
0
	def test_CheckBackupToParent(self):
		""" 
		UnitTest: Check a backup of the test dir to it's parent dir.
		"""

		config={'destination':'..',
			'directories':['.'],
			'exclude_regexps':[]}

		b=Backup(config)
		b.Backup()
def start_backups():
    for website in website_list:
        # website_id = website[0]
        vhost = website[1]
        db_name = website[2]
        log.info("Starting backup of: %s to %s..." % (vhost, storage_provider.value))
        job = Backup(vhost, storage_provider, db_name)
        job.start()
        log.info("Completed Backup of: %s to %s..." % (vhost, storage_provider.value))
        print("~~~~~~~~~~")
    print()
    print("########### BACKUPS COMPLETE")
    print()
Example #4
0
    def backup(self):
        """
        function takes gui inputs, plugs into backup class
        runs backup
        displays relevant outputs

        flow:
         - get inputs from input boxes
         - check inputs
         - disable buttons
         - create backup object
         -  start timer
         - run back function
         - end timer
         - display time taken

        """
        target_folder = self.target_folder.get()
        backup_folder = self.backup_path.get()
        password = self.pass_box.get()

        if not self.check_inputs(target_folder, backup_folder, password):
            return

        self.disable_buttons()

        backup_obj = Backup(target_folder, backup_folder, password)

        start = time.time()
        try:
            dest = backup_obj.backup(ui_caller=self)
        except OSError as e:
            messagebox.showerror(
                "OS Error", f"Following Error Occurred During Backup: {e}")
            self("Error Occurred, Backup Aborted")
            self.enable_buttons()
            return

        end = time.time()

        t = datetime(1, 1, 1) + timedelta(seconds=(end - start))
        self(
            f"Time Taken: {t.hour} Hours: {t.minute} Minutes: {t.second} Seconds"
        )
        self.enable_buttons()

        messagebox.showinfo(
            "Backup Complete",
            f"Backup Successfully completed\n Output File: {dest}")
Example #5
0
def run():
    parser = OptionParser()
    parser.add_option("--version",
                      dest="print_version",
                      help="Display version number and exit",
                      action="store_true",
                      default=False)
    parser.add_option("-v",
                      "--verbose",
                      dest="verbose",
                      help="Increase verbosity",
                      action="store_true",
                      default=False)
    parser.add_option("-c",
                      "--config",
                      dest="config",
                      help="Use YAML config file as defaults")
    parser.add_option("-l",
                      "--location",
                      dest="backup_location",
                      help="Directory to save the backup(s) to (required)")
    parser.add_option("-n",
                      "--name",
                      dest="backup_name",
                      help="Backup name for this cluster/replset (required)")
    parser.add_option("-H",
                      "--host",
                      dest="host",
                      help="MongoDB host to connect to (default: localhost)",
                      default="localhost")
    parser.add_option("-P",
                      "--port",
                      dest="port",
                      type="int",
                      help="MongoDB port to connect to (default: 27017)",
                      default=27017)
    parser.add_option(
        "-u",
        "--user",
        dest="user",
        help="MongoDB user name to authenticate with (on all connections)")
    parser.add_option(
        "-p",
        "--password",
        dest="password",
        help="MongoDB password to authenticate with (on all connections)")
    parser.add_option(
        "-a",
        "--authdb",
        dest="authdb",
        help=
        "MongoDB database name to authenticate against (on all connections)",
        default='admin')
    parser.add_option(
        "-B",
        "--backup_binary",
        dest="backup_binary",
        help="Location of mongodump binary (default: /usr/bin/mongodump)",
        default="/usr/bin/mongodump")
    parser.add_option(
        "-m",
        "--maxlag",
        dest="max_repl_lag_secs",
        type="int",
        help=
        "Maximum MongoDB replication secondary slave drift in seconds (default: 5)",
        default=5)
    parser.add_option(
        "-b",
        "--balancer_wait",
        dest="balancer_wait_secs",
        type="int",
        help=
        "Maximum time in seconds to wait for MongoDB balancer to stop (default: 300)",
        default=300)
    parser.add_option(
        "-R",
        "--resolver-threads",
        dest="resolver_threads",
        type="int",
        help=
        "The number of threads to use for resolving oplogs (default: 2 per CPU)",
        default=None)
    parser.add_option(
        "-A",
        "--archiver-threads",
        dest="archiver_threads",
        type="int",
        help=
        "The number of threads to use for archiving/compressing (default: 1 per CPU)",
        default=None)
    parser.add_option(
        "--nsca-server",
        dest="nsca_server",
        help=
        "The host/port of the Nagios NSCA server (enables NSCA notifications)")
    parser.add_option("--nsca-password",
                      dest="nsca_password",
                      help="The password to use with the Nagios NSCA server")
    parser.add_option("--nsca-check-name",
                      dest="nsca_check_name",
                      help="The Nagios NSCA check name to report to")
    parser.add_option("--nsca-check-host",
                      dest="nsca_check_host",
                      help="The Nagios NSCA check hostname to report to")
    parser.add_option(
        "--s3-bucket-name",
        dest="upload_s3_bucket_name",
        help="The AWS S3 Bucket name to upload backups to (enables S3 backups)"
    )
    parser.add_option(
        "--s3-bucket-prefix",
        dest="upload_s3_bucket_prefix",
        help="The AWS S3 Bucket prefix to upload backups to (default: /)",
        default="/")
    parser.add_option("--s3-access-key",
                      dest="upload_s3_access_key",
                      help="The AWS S3 Access Key to use for upload")
    parser.add_option("--s3-secret-key",
                      dest="upload_s3_secret_key",
                      help="The AWS S3 Secret Key to use for upload")
    parser.add_option(
        "--s3-url",
        dest="upload_s3_url",
        help=
        "The AWS S3 host/url to use for upload (default: s3.amazonaws.com)",
        default="s3.amazonaws.com")
    parser.add_option(
        "--s3-threads",
        dest="upload_s3_threads",
        help="The number of threads to use for AWS S3 uploads (default: 4)",
        type="int",
        default=4)
    parser.add_option(
        "--s3-chunk-mb",
        dest="upload_s3_chunk_size_mb",
        help="The size of multipart chunks for AWS S3 uploads (default: 50)",
        type="int",
        default=50)
    parser.add_option(
        "--s3-remove-uploaded",
        dest="upload_s3_remove_uploaded",
        help="Remove local files after successful upload (default: false)",
        action="store_true",
        default=False)
    parser.add_option(
        "--no-archive",
        dest="no_archiver",
        help="Disable archiving of backups directories post-resolving",
        action="store_true",
        default=False)
    parser.add_option("--no-archive-gzip",
                      dest="no_archiver_gzip",
                      help="Disable gzip compression of archive files",
                      action="store_true",
                      default=False)
    parser.add_option(
        "--lazy",
        dest="no_oplog_tailer",
        help=
        "Disable tailing/resolving of clusterwide oplogs. This makes a shard-consistent, not cluster-consistent backup",
        action="store_true",
        default=False)
    parser.add_option("--lock-file",
                      dest="lock_file",
                      help="Location of lock file (default: /tmp/%s.lock)" %
                      prog_name,
                      default="/tmp/%s.lock" % prog_name)
    parser.set_defaults()

    options = handle_options(parser)

    options.program_name = prog_name
    options.version = __version__
    options.git_commit = git_commit

    if options.print_version:
        print "%s version: %s, git commit hash: %s" % (prog_name, __version__,
                                                       git_commit)
        if options.verbose:
            print_python_versions()
        sys.exit(0)
    if not options.backup_name:
        parser.error('-n/--name flag is required!')
    if not options.backup_location:
        parser.error('-l/--location flag is required!')

    if options.nsca_server and not options.config:
        req_attrs = ['nsca_check_name', 'nsca_check_host']
        for attr in req_attrs:
            if not getattr(options, attr):
                parser.error(
                    '--%s is a required field when using --nsca-server!' %
                    attr)

    try:
        v = Backup(options)
        v.run()
    except Exception, e:
        # noinspection PyUnusedLocal
        print "Backup '%s' failed for mongodb instance %s:%s : %s" % (
            options.backup_name, options.host, options.port, e)
        sys.exit(1)
Example #6
0
def main(config, host, port, user, ssh, no_relatives, verbose):

    now = datetime.now()
    logfile = '/var/log/dbt/backup_{}.log'.format(host)
    if not os.path.exists(os.path.dirname(logfile)):
        os.makedirs(os.path.dirname(logfile))
    rotate_logs = os.path.exists(logfile)

    loglevel = logging.DEBUG if verbose else logging.INFO
    logger = logging.getLogger()
    logger.setLevel(loglevel)
    handler = RotatingFileHandler(logfile, maxBytes=50000, backupCount=10)
    handler.setFormatter(
        logging.Formatter(
            '[%(asctime)s] %(levelname)-8s %(filename)-10s %(lineno)-4d %(message)s'
        ))
    logger.addHandler(handler)
    if rotate_logs:
        logger.handlers[0].doRollover()

    logger.info('==============================================')

    # Parse configuration yaml file
    if config is None or not os.path.isfile(config):
        logger.error('Error: invalid config file: {}'.format(config))
        raise FileNotFoundError

    lockfile = config + '.lock'
    if os.path.exists(lockfile):
        logger.error('{} exists in the filesystem'.format(lockfile))
        raise FileExistsError

    open(lockfile, 'a').close()

    with open(config) as f:
        yml_config = yaml.safe_load(f)

    yml_config['target_dir'] = yml_config['target_dir'].rstrip('/')

    yml_config['user'] = user
    yml_config['port'] = port
    yml_config['host'] = host
    yml_config['no_rels'] = no_relatives
    yml_config['ssh'] = ssh
    yml_config['lockfile'] = lockfile

    logger.debug('Backup invoked with the following options:')
    logger.info('  Configuration file: {}'.format(config))
    logger.debug('  Don' 't use relative paths: {}'.format(no_relatives))
    if ssh:
        logger.debug('  ssh: {}'.format(ssh))
        logger.debug('  host: {}'.format(host))
        logger.debug('  user: {}'.format(user))
        logger.debug('  port: {}'.format(port))

        cmd = ['nc', '-z', '-v', host, str(port)]
        ret = subprocess.run(cmd, stderr=subprocess.PIPE)
        if ret.returncode != 0:
            logger.error('Port {} is not open on {}'.format(port, host))
            logger.error(ret.stderr)
            exit(ret.returncode)

    # Check for doubled entries in the prio field
    prios = []
    for i in yml_config['intervals']:
        prios.append(i['prio'])
    if len(prios) != len(set(prios)):
        logger.error('Double defined priorities in {} found'.format(config))
        raise KeyError
    # Setup base folders and if needed create a new full backup
    init = Init(now, yml_config)
    backup = Backup(yml_config, init.get_backup_target(), now)

    os.remove(lockfile)

    end = datetime.now()
    seconds = (end - now).total_seconds()
    hours, remainder = divmod(seconds, 3600)
    minutes, seconds = divmod(remainder, 60)
    logger.info('Execution time: {} hrs {} mins {} secs'.format(
        hours, minutes, seconds))
def run():
    parser = OptionParser()
    parser.add_option("--version", dest="print_version", help="Display version number and exit", action="store_true", default=False)
    parser.add_option("-v", "--verbose", dest="verbose", help="Increase verbosity", action="store_true", default=False)
    parser.add_option("-c", "--config", dest="config", help="Use YAML config file as defaults")
    parser.add_option("-l", "--location", dest="backup_location", help="Directory to save the backup(s) to (required)")
    parser.add_option("-n", "--name", dest="backup_name", help="Backup name for this cluster/replset (required)")
    parser.add_option("-H", "--host", dest="host", help="MongoDB host to connect to (default: localhost)", default="localhost")
    parser.add_option("-P", "--port", dest="port", type="int", help="MongoDB port to connect to (default: 27017)", default=27017)
    parser.add_option("-u", "--user", dest="user", help="MongoDB user name to authenticate with (on all connections)")
    parser.add_option("-p", "--password", dest="password", help="MongoDB password to authenticate with (on all connections)")
    parser.add_option("-a", "--authdb", dest="authdb", help="MongoDB database name to authenticate against (on all connections)", default='admin')
    parser.add_option("-B", "--backup_binary", dest="backup_binary", help="Location of mongodump binary (default: /usr/bin/mongodump)", default="/usr/bin/mongodump")
    parser.add_option("-m", "--maxlag", dest="max_repl_lag_secs", type="int", help="Maximum MongoDB replication secondary slave drift in seconds (default: 5)", default=5)
    parser.add_option("-b", "--balancer_wait", dest="balancer_wait_secs", type="int", help="Maximum time in seconds to wait for MongoDB balancer to stop (default: 300)", default=300)
    parser.add_option("-R", "--resolver-threads", dest="resolver_threads", type="int", help="The number of threads to use for resolving oplogs (default: 2 per CPU)", default=None)
    parser.add_option("-A", "--archiver-threads", dest="archiver_threads", type="int", help="The number of threads to use for archiving/compressing (default: 1 per CPU)", default=None)
    parser.add_option("--nsca-server", dest="nsca_server", help="The host/port of the Nagios NSCA server (enables NSCA notifications)")
    parser.add_option("--nsca-password", dest="nsca_password", help="The password to use with the Nagios NSCA server")
    parser.add_option("--nsca-check-name", dest="nsca_check_name", help="The Nagios NSCA check name to report to")
    parser.add_option("--nsca-check-host", dest="nsca_check_host", help="The Nagios NSCA check hostname to report to")
    parser.add_option("--s3-bucket-name", dest="upload_s3_bucket_name", help="The AWS S3 Bucket name to upload backups to (enables S3 backups)")
    parser.add_option("--s3-bucket-prefix", dest="upload_s3_bucket_prefix", help="The AWS S3 Bucket prefix to upload backups to (default: /)", default="/")
    parser.add_option("--s3-access-key", dest="upload_s3_access_key", help="The AWS S3 Access Key to use for upload")
    parser.add_option("--s3-secret-key", dest="upload_s3_secret_key", help="The AWS S3 Secret Key to use for upload")
    parser.add_option("--s3-url", dest="upload_s3_url", help="The AWS S3 host/url to use for upload (default: s3.amazonaws.com)", default="s3.amazonaws.com")
    parser.add_option("--s3-threads", dest="upload_s3_threads", help="The number of threads to use for AWS S3 uploads (default: 4)", type="int", default=4)
    parser.add_option("--s3-chunk-mb", dest="upload_s3_chunk_size_mb", help="The size of multipart chunks for AWS S3 uploads (default: 50)", type="int", default=50)
    parser.add_option("--s3-remove-uploaded", dest="upload_s3_remove_uploaded", help="Remove local files after successful upload (default: false)", action="store_true", default=False)
    parser.add_option("--no-archive", dest="no_archiver", help="Disable archiving of backups directories post-resolving", action="store_true", default=False)
    parser.add_option("--no-archive-gzip", dest="no_archiver_gzip", help="Disable gzip compression of archive files", action="store_true", default=False)
    parser.add_option("--lazy", dest="no_oplog_tailer", help="Disable tailing/resolving of clusterwide oplogs. This makes a shard-consistent, not cluster-consistent backup", action="store_true", default=False)
    parser.add_option("--lock-file", dest="lock_file", help="Location of lock file (default: /tmp/%s.lock)" % prog_name, default="/tmp/%s.lock" % prog_name)
    parser.set_defaults()

    options = handle_options(parser)

    options.program_name = prog_name
    options.version      = __version__
    options.git_commit   = git_commit

    if options.print_version:
        print "%s version: %s, git commit hash: %s" % (prog_name, __version__, git_commit)
        if options.verbose:
            print_python_versions()
        sys.exit(0)
    if not options.backup_name:
        parser.error('-n/--name flag is required!')
    if not options.backup_location:
        parser.error('-l/--location flag is required!')

    if options.nsca_server and not options.config:
        req_attrs = ['nsca_check_name', 'nsca_check_host']
        for attr in req_attrs:
            if not getattr(options, attr):
                parser.error('--%s is a required field when using --nsca-server!' % attr)

    try:
        v = Backup(options)
        v.run()
    except Exception, e:
        # noinspection PyUnusedLocal
        print "Backup '%s' failed for mongodb instance %s:%s : %s" % (options.backup_name, options.host, options.port, e)
        sys.exit(1)
Example #8
0
 def __init__(self, datadir):
     self.frontier = UsersFrontier(datadir + '/frontier.txt')
     self.requester = ProxiedRequester(datadir + '/proxies.txt')
     self.notif = ServicesNotifier()
     self.backup = Backup(datadir)
Example #9
0
    def _check_status(self):
        self._set_main_window_status("Connecting to server...", False)
        try:
            result = self._api.status()
        except (ApiError, ApiTransientError) as e:
            self._logger.error("Got error from status API: {}".format(str(e)))
            return

        self.set_main_window_title.emit(
            "TradeSkillMaster Application r{} - {}".format(
                Config.CURRENT_VERSION, self._api.get_username()))
        app_info = result['appInfo']
        if app_info['news'] != self._last_news:
            # show news
            if self._settings.news_notification:
                # remove any HTML from the news
                news = re.compile(r'<[^>]+>').sub('', app_info['news'])
                self.show_desktop_notification.emit(news, False)
            self._last_news = app_info['news']

        # update addon status
        self._addon_versions = result['addons']
        self._update_addon_status()

        # download addon updates
        self._set_main_window_status(
            "One moment. Checking for addon updates...", False)
        installed_addons = []
        install_all = False
        download_notifications = []
        for addon in self._addon_versions:
            latest_version = addon['version']
            version_type, version_int, version_str = self._wow_helper.get_installed_version(
                addon['name'])
            if version_type == WoWHelper.INVALID_VERSION and install_all:
                # install all addons when upgrading / downgrading
                version_int = 0
                version_str = ""
            if version_type == WoWHelper.RELEASE_VERSION or install_all:
                if latest_version == 0:
                    # remove this addon since it no longer exists
                    self._wow_helper.delete_addon(addon['name'])
                elif version_int < latest_version and self._api.get_is_premium(
                ):
                    # update this addon
                    self._download_addon(addon['name'])
                    if self._settings.addon_notification:
                        download_notifications.append(
                            "Downloaded {} {}".format(
                                addon['name'],
                                self._wow_helper.get_installed_version(
                                    addon['name'])[2]))
                    installed_addons.append(addon['name'])
                else:
                    installed_addons.append(addon['name'])
            else:
                # this is a Dev version
                installed_addons.append(addon['name'])
        if len(download_notifications) > 2:
            self.show_desktop_notification.emit("Downloading addon updates!",
                                                False)
        else:
            for text in download_notifications:
                self.show_desktop_notification.emit(text, False)
        self._set_main_window_status(
            "One moment. Backing up addon settings...", False)
        new_backups = self._wow_helper.set_addons_and_do_backups(
            installed_addons)
        if self._settings.backup_notification:
            for backup in new_backups:
                self.show_desktop_notification.emit(
                    "Created backup for {}".format(backup.account), False)
        if self._api.get_is_premium():
            # send the new backups to the TSM servers
            for backup in new_backups:
                zip_path = os.path.abspath(
                    os.path.join(Config.BACKUP_DIR_PATH,
                                 backup.get_local_zip_name()))
                with open(zip_path, "rb") as f:
                    self._logger.info("Uploading backup: {}".format(
                        backup.get_remote_zip_name()))
                    try:
                        self._api.backup(backup.get_remote_zip_name(),
                                         f.read())
                    except (ApiError, ApiTransientError) as e:
                        self._logger.error(
                            "Got error from backup API: {}".format(str(e)))

        # set the list of backups to just the local ones first
        self._backups = self._wow_helper.get_backups()
        if self._api.get_is_premium():
            self._set_main_window_status(
                "One moment. Getting backup status...", False)
            # get remote backups
            try:
                remote_backup_info = self._api.backup()
            except (ApiError, ApiTransientError) as e:
                remote_backup_info = None
                self._logger.error("Got error from backup API: {}".format(
                    str(e)))
            if remote_backup_info:
                # add the remote backups to the list
                for key, backups in remote_backup_info.items():
                    system_id, account = key.split(
                        Config.BACKUP_NAME_SEPARATOR)
                    for backup_info in backups:
                        backup = Backup(system_id=system_id,
                                        account=account,
                                        timestamp=datetime.fromtimestamp(
                                            backup_info['timestamp']),
                                        keep=backup_info['keep'],
                                        is_local=False,
                                        is_remote=True)
                        tracked = False
                        if system_id == Config.SYSTEM_ID:
                            # check if we also have this backup locally
                            for i, local_backup in enumerate(self._backups):
                                if backup == local_backup:
                                    local_backup.keep = backup.keep
                                    local_backup.is_remote = True
                                    tracked = True
                                    break
                        if not tracked:
                            self._backups.append(backup)
        self._update_backup_status()

        # update addon status again incase we changed something (i.e. downloaded updates or deleted an old addon)
        self._update_addon_status()

        # check realm data (AuctionDB / Shopping) status
        app_data = self._wow_helper.get_app_data()
        if not app_data:
            # TSM_AppHelper is not installed
            self.show_desktop_notification.emit(
                "You need to install the TradeSkillMaster_AppHelper addon!",
                True)
            self._set_main_window_status(
                "<font color='red'>You need to install <a href=\"http://www.curse.com/addons/wow/tradeskillmaster_apphelper\" style=\"color: #EC7800\">TradeSkillMaster_AppHelper</a>!</font>"
            )
            return
        auctiondb_updates = {}
        shopping_updates = {}
        self._data_sync_status = {}
        if len(result['realms']) == 0:
            # No realms setup so no point in going further
            self.show_desktop_notification.emit("You have no realms setup!",
                                                True)
            self._set_main_window_status(
                "<font color='red'>You have no <a href=\"https://tradeskillmaster.com/realms\" style=\"color: #EC7800\">realms setup</a>!</font>"
            )
            return
        for info in result['realms']:
            self._data_sync_status[info['name']] = {
                'type':
                "realm",
                'id':
                info['id'],
                'region':
                info['region'],
                'masterId':
                info['masterId'],
                'auctiondb':
                info['lastModified'],
                'shopping':
                info['lastModified'] if self._api.get_is_premium() else -1,
            }
        for info in result['regions']:
            self._data_sync_status[info['name']] = {
                'type': "region",
                'id': info['id'],
                'auctiondb': info['lastModified']
            }
        self._update_data_sync_status()
        for realm_name, info in self._data_sync_status.items():
            if info['type'] == "realm":
                if info['auctiondb'] > app_data.last_update(
                        "AUCTIONDB_MARKET_DATA", realm_name):
                    key = (info['type'], info['masterId'])
                    if key not in auctiondb_updates:
                        auctiondb_updates[key] = []
                    auctiondb_updates[key].append(info['id'])
                if info['shopping'] > app_data.last_update(
                        "SHOPPING_SEARCHES", realm_name):
                    if info['masterId'] not in shopping_updates:
                        shopping_updates[info['masterId']] = []
                    shopping_updates[info['masterId']].append(info['id'])
            elif info['type'] == "region":
                if info['auctiondb'] > app_data.last_update(
                        "AUCTIONDB_MARKET_DATA", realm_name):
                    key = (info['type'], info['id'])
                    auctiondb_updates[key] = []
            else:
                raise Exception("Invalid type {}".format(info['type']))

        hit_error = False

        # get auctiondb updates
        self._set_main_window_status(
            "One moment. Downloading AuctionDB data...", False)
        updated_realms = []
        for key, realms in auctiondb_updates.items():
            type, id = key
            try:
                data = self._api.auctiondb(type, str(id))['data']
                if type == "realm":
                    for realm_id in realms:
                        realm_name, last_modified = next(
                            (x['name'], x['lastModified'])
                            for x in result['realms'] if x['id'] == realm_id)
                        app_data.update("AUCTIONDB_MARKET_DATA", realm_name,
                                        data, last_modified)
                        updated_realms.append(realm_name)
                elif type == "region":
                    region_name, last_modified = next(
                        (x['name'], x['lastModified'])
                        for x in result['regions'] if x['id'] == id)
                    app_data.update("AUCTIONDB_MARKET_DATA", region_name, data,
                                    last_modified)
                    updated_realms.append(region_name)
                else:
                    raise Exception("Invalid type {}".format(type))
            except (ApiError, ApiTransientError) as e:
                # log an error and keep going
                self._logger.error("Got error from AuctionDB API: {}".format(
                    str(e)))
                hit_error = True
        if not hit_error and self._settings.realm_data_notification and updated_realms:
            self.show_desktop_notification.emit(
                "Updated AuctionDB data for {}".format(
                    " / ".join(updated_realms)), False)

        # get shopping updates
        self._set_main_window_status(
            "One moment. Downloading great deals data...", False)
        updated_realms = []
        for id, realms in shopping_updates.items():
            try:
                shopping_data = self._api.shopping(str(id))['data']
                for realm_id in realms:
                    realm_name, last_modified = next(
                        (x['name'], x['lastModified'])
                        for x in result['realms'] if x['id'] == realm_id)
                    app_data.update("SHOPPING_SEARCHES", realm_name,
                                    shopping_data, last_modified)
                    updated_realms.append(realm_name)
            except (ApiError, ApiTransientError) as e:
                # log an error and keep going
                self._logger.error("Got error from Shopping API: {}".format(
                    str(e)))
                hit_error = True
        if not hit_error and self._settings.realm_data_notification and updated_realms:
            self.show_desktop_notification.emit(
                "Updated Great Deals for {}".format(
                    " / ".join(updated_realms)), False)

        tsm_version_type, tsm_version_int, _ = self._wow_helper.get_installed_version(
            "TradeSkillMaster")
        if tsm_version_type == WoWHelper.RELEASE_VERSION and tsm_version_int >= app_info[
                'minTSMUpdateNotificationVersion']:
            lua_versions = "{" + ",".join(
                "{}={}".format(x['name'], x['version'])
                for x in self._addon_versions) + "}"
        else:
            lua_versions = "{}"
        addon_message = "{{id={id},msg=\"{msg}\"}}".format(
            **result['addonMessage'])
        app_data.update("APP_INFO", "Global", "{{version={},lastSync={},addonVersions={},message={}}}" \
                        .format(Config.CURRENT_VERSION, int(time()), lua_versions, addon_message), \
                        int(time()))
        app_data.save()
        self._update_data_sync_status()
        if hit_error:
            self._set_main_window_status("", False)
        else:
            self._set_main_window_status(
                "{}<br>Everything is up to date as of {}.".format(
                    app_info['news'],
                    QDateTime.currentDateTime().toString(
                        Qt.SystemLocaleShortDate)))
Example #10
0
def backup():
    print "start backup"
    backup = Backup()
    backup.main()
    print "%d files were backed up" % backup.numberOfFiles
Example #11
0
    feature_path = root + 'feature_data\\'

    # path to current indexing base
    indexing_path = root + 'indexing_data\\'

    # path to the backup of the indexing base before including new products
    backup_path = root + 'last_time_backup\\'
#--------------------------------------------------------------------------------------------------


#----------------------------------Check and backup before indexing----------------------------------------------------------------

    #check whether there is already an indexing base
    if os.listdir(indexing_path):
        present_of_indexing_base = 1
        Backup(indexing_path,backup_path)
    else:
        present_of_indexing_base = 0

#----------------------------------extract features of new products----------------------------------------------------------------

    #cleanining the feature folder
    RemoveFiles(feature_path)

    VF = VFExtractor(present_of_indexing_base,new_product_image_path,feature_path,indexing_path,database_image_path)
    TF = TFExtractor(present_of_indexing_base, new_product_caption_path, feature_path,indexing_path,database_keyword_path)




def run_checks():
    log.info("Running Checks...")
    for website in website_list:
        vhost = website[1]
        job = Backup(vhost, storage_provider, skip_init=True)
        job.check()
def run_policies():
    log.info("Running Policies...")
    for website in website_list:
        vhost = website[1]
        job = Backup(vhost, storage_provider, skip_init=True)
        job.policies()
def run_unlock():
    log.info("Running Unlock...")
    for website in website_list:
        vhost = website[1]
        job = Backup(vhost, storage_provider, skip_init=True)
        job.unlock()
def run_cache_cleanup():
    log.info("Running Cache Cleanup...")
    for website in website_list:
        vhost = website[1]
        job = Backup(vhost, storage_provider, skip_init=True)
        job.cache_cleanup()
Example #16
0
def Cherry():
    Log.info('######################Start Cherrypy!########################')
    cherrypy.config.update({'environment'                  : 'production',
                            'engine.autoreload_on'         : False,
                            'checker.on'                   : False,
                            'server.socket_host'           : '0.0.0.0',
                            'server.socket_port'           : UtilFunc.getWebServerPort(),
                            'server.thread_pool'           : 6,
                            'server.thread_pool_max'       : 10,
                            'server.max_request_body_size' : sys.maxint,
                            'log.screen'                   : True,
                           })
    
    services = {'/storages':Storages(), '/system':System(), '/version':Version(), 
                '/files':Files(), '/share':Share(), '/search':Search(), '/logs':ESLog(),
                '/batch':Batch(), '/photos':Photos(), '/music':Music(), 
                '/video':Video(), '/backup':Backup()}
    
    if not UtilFunc.isPCMode(): 
        from Apps.AppCtrl import AppCtrl
        services['/app'] = AppCtrl()
    
    for server in services.keys():
        mountService(services[server], "/api" + server)

    Log.info('Mount APIServices Complete!')
    
    cherrypy.tree.mount(portal(), '/', 
                            config={'/': {'tools.auth.on'         : False,
                                          'tools.staticdir.on'    : True,
                                          'tools.staticdir.dir'   : UtilFunc.module_path(),
                                         },
                                    })
    Log.info('Mount Portal Service Complete!')
    
    cherrypy.tree.mount(GuestShare(), '/share', config={'/': _getMountConfig(False)}) 
    Log.info('Mount GuestShare Service Complete!')
    
    try:
        server2 = _cpwsgi_server.CPWSGIServer()
        server2.bind_addr = ('0.0.0.0', 1984)
        adapter2 = _cpserver.ServerAdapter(cherrypy.engine, server2, server2.bind_addr)
        adapter2.subscribe()
        cherrypy.tree.graft(WebServer().my_crazy_app, "/web")
    
        syncdir = os.path.join(os.getcwd(),"sync")
        if not os.path.exists(syncdir):
            os.mkdir(syncdir)
        config = {"mount_path"      :"/syncservice",
                  "provider_mapping":{"webdav":syncdir},
                  "user_mapping"    :{},
                  "verbose"         :2,
                  "dir_browser"     :{
                                      "enable"          : True,
                                      "response_trailer": "",
                                      "davmount"        : False,
                                      "msmount"         : False
                                      }
                }
        cherrypy.tree.graft(WsgiDAVApp(config),"/syncservice")
        Log.info('Start WsgiDAV Complete!')
    except Exception, e:
        Log.info('WsgiDAV Start Failed! Reason[%s]'%e)