コード例 #1
0
    def tableau_status_loop(self):
        while True:
            logger.debug("status-check: About to timeout or " + \
                         "wait for a new primary to connect")
            try:
                system_key = SystemKeys.STATUS_REQUEST_INTERVAL
                request_interval = self.system[system_key]
            except ValueError:
                request_interval = self.STATUS_REQUEST_INTERVAL_DEFAULT

            new_primary = self.manager.check_status_event.wait(request_interval)

            logger.debug("status-check: new_primary: %s", new_primary)
            if new_primary:
                self.manager.clear_check_status_event()

            session = meta.Session()
            try:
                # Don't do a 'tabadmin status -v' if upgrading
                acquired = self.rwlock.read_acquire(blocking=False)
                if not acquired:
                    logger.debug("status-check: Upgrading.  Won't run.")
                    continue
                self.check_status()
            finally:
                if acquired:
                    self.rwlock.read_release()
                session.rollback()
                meta.Session.remove()
コード例 #2
0
ファイル: licensing.py プロジェクト: xyzlat/palette
    def get(cls, agentid, **kwargs):
        session = meta.Session()
        entry = cls.get_by_agentid(agentid)
        if not entry:
            entry = LicenseEntry(agentid=agentid)
            session.add(entry)

        if 'interactors' in kwargs and kwargs['interactors'].isdigit():
            entry.license_type = LicenseEntry.LICENSE_TYPE_NAMED_USER
            entry.interactors = int(kwargs['interactors'])
            if 'viewers' in kwargs and kwargs['viewers'].isdigit():
                entry.viewers = int(kwargs['viewers'])
            entry.cores = 0
            entry.core_licenses = 0
            return entry
        elif 'cores' in kwargs:
            entry.license_type = LicenseEntry.LICENSE_TYPE_CORE
            entry.cores = kwargs['cores']
            entry.core_licenses = kwargs['core-licenses']
            entry.interactors = 0
            entry.viewers = 0
            return entry

        # Default setting to let us know the license info has been
        # received. It could be 0 interactors and 0 viewers, which
        # means "no license".
        entry.license_type = LicenseEntry.LICENSE_TYPE_NAMED_USER
        entry.cores = 0
        entry.core_licenses = 0

        return entry
コード例 #3
0
    def check_status(self):
        logger.setLevel(self.system[SystemKeys.DEBUG_LEVEL])
        # FIXME: Tie agent to domain.
        agent = self.manager.agent_by_type(AgentManager.AGENT_TYPE_PRIMARY)
        if not agent:
            logger.debug("status-check: The primary agent is either " + \
                         "not connected or not enabled.")
            return

        aconn = agent.connection
        if not aconn:
            session = meta.Session()
            logger.debug("status-check: No primary agent currently connected.")
            self.remove_all_status()
            session.commit()
            return

        # Don't do a 'tabadmin status -v' if the user is doing an action.
        acquired = aconn.user_action_lock(blocking=False)
        if not acquired:
            logger.debug("status-check: Primary agent locked for user "
                         "action. Skipping status check.")
            return

        # We don't force the user to delay starting their request
        # until the 'tabadmin status -v' is finished.
        aconn.user_action_unlock()

        self.check_status_with_connection(agent)
コード例 #4
0
ファイル: licensing.py プロジェクト: xyzlat/palette
    def check(self, agent):
        server = self.server

        body = server.cli_cmd('tabadmin license', agent, timeout=60 * 10)

        if not 'exit-status' in body or body['exit-status'] != 0:
            return body
        if not 'stdout' in body:
            return body

        session = meta.Session()
        output = body['stdout']
        license_data = LicenseEntry.parse(output)
        entry = LicenseEntry.get(agentid=agent.agentid, **license_data)
        session.commit()

        msg = "License type: %s, " % str(entry.license_type)
        if entry.license_type == LicenseEntry.LICENSE_TYPE_NAMED_USER:
            msg += "interactors: %s, viewers: %s" % \
                        (str(entry.interactors), str(entry.viewers))
        elif entry.license_type == LicenseEntry.LICENSE_TYPE_CORE:
            msg += "cores used: %s of %s" % \
                    (str(entry.cores), str(entry.core_licenses))
        else:
            msg += "interactors: %s, viewers: %s, cores used: %s of %s" \
                        % (str(entry.interactors), str(entry.viewers),
                           str(entry.cores), str(entry.core_licenses))

        notification = self.server.notifications.get("tlicense")

        if entry.valid():
            if notification.color == 'red':
                data = agent.todict()
                data['stdout'] = msg
                self.server.event_control.gen(EventControl.LICENSE_VALID, data)
                # Remember when we sent the notification
                notification.modification_time = func.now()
                notification.color = 'green'
                notification.notified_color = 'green'
                notification.description = None
                session.commit()
            return license_data
        else:
            # license is invalid
            if notification.color != 'red':
                # Generate an event
                data = agent.todict()
                data['error'] = msg
                if notification.color != 'red':
                    self.server.event_control.gen(EventControl.LICENSE_INVALID,
                                                  data)

                notification.color = 'red'
                notification.notified_color = 'red'
                # Remember when we sent the notification
                notification.modification_time = func.now()

                session.commit()
            return server.error("License invalid on '%s': %s" % \
                                (agent.displayname, msg))
コード例 #5
0
    def sync(cls, agent):
        stmt = 'SELECT * FROM data_connections'

        data = agent.odbc.execute(stmt)
        if 'error' in data:
            return data
        if '' not in data:
            data['error'] = "Missing '' key in query response."
            return data

        ids = []

        envid = agent.server.environment.envid

        session = meta.Session()
        for odbcdata in ODBC.load(data):
            tid = odbcdata.data['id']
            entry = DataConnection.get(envid, tid, default=None)
            if not entry:
                entry = DataConnection(envid=envid)
                session.add(entry)
            entry.envid = envid
            odbcdata.copyto(entry)
            ids.append(entry.dcid)

        session.query(DataConnection).\
            filter(not_(DataConnection.dcid.in_(ids))).\
            delete(synchronize_session='fetch')

        session.commit()

        d = {u'status': 'OK', u'count': len(data[''])}
        return d
コード例 #6
0
    def init_firewall_ports(self, agent):
        """Make sure the agent's firewall ports have been initialized."""

        session = meta.Session()
        rows = session.query(FirewallEntry).\
            filter(FirewallEntry.agentid == agent.agentid).\
            all()

        # Already populated
        if rows:
            return

        # It was empty so add the initial default set.

        # First the listen_port
        entry = FirewallEntry(agentid=agent.agentid,
                              name="Palette Agent",
                              port=agent.listen_port,
                              color="green")
        session.add(entry)

        # Add the others
        if agent.agent_type == AgentManager.AGENT_TYPE_PRIMARY:
            ports = FirewallManager.DEFAULT_PRIMARY_PORTS
        else:
            ports = FirewallManager.DEFAULT_NON_PRIMARY_PORTS

        for port in ports:
            entry = FirewallEntry(agentid=agent.agentid,
                                  name=port['name'],
                                  port=port['port'],
                                  color=port['color'])
            session.add(entry)
        session.commit()
コード例 #7
0
    def move_twb_to_db(self):
        """Copy the twb file contents on the controller to the database
           and remove the twb files.
        """

        path = self.server.config.get('palette', 'workbook_archive_dir')
        controller_path = os.path.abspath(path)

        session = meta.Session()
        rows = session.query(WorkbookUpdateEntry).\
                            filter(WorkbookUpdateEntry.url != '').\
                            filter(WorkbookUpdateEntry.url != None).\
                            filter(WorkbookUpdateEntry.twb == None).\
                            all()

        for row in rows:
            twb_path = os.path.join(controller_path, row.url)
            try:
                with open(twb_path, "r") as fd_twb:
                    contents = fd_twb.read()
            except IOError as err:
                logger.error("move_twb_to_db open failed: %s", str(err))
                continue

            row.twb = contents
            session.commit()

            twb_path = os.path.join(controller_path, row.url)
            os.unlink(twb_path)
コード例 #8
0
 def remove(self, fileid):
     # envid is technically not required
     session = meta.Session()
     session.query(FileEntry).\
         filter(FileEntry.envid == self.envid).\
         filter(FileEntry.fileid == fileid).\
         delete()
     session.commit()
コード例 #9
0
    def add(self, agent, process_name, cpu, memory):
        session = meta.Session()

        entry = MetricEntry(agentid=agent.agentid,
                            process_name=process_name,
                            cpu=cpu,
                            memory=memory)
        session.add(entry)
        session.commit()
コード例 #10
0
ファイル: __init__.py プロジェクト: xyzlat/palette
 def __setitem__(self, key, value):
     """ Updates the database row for key with 'value'.
     NOTE: does *not* commit the database.
     """
     # This cast implicity checks if value is correctly typed.
     value = cast(key, value)
     session = meta.Session()
     entry = SystemEntry(envid=self.envid, key=key, value=str(value))
     session.merge(entry)
コード例 #11
0
ファイル: agent.py プロジェクト: xyzlat/palette
    def build(cls, envid, aconn):
        """Create an agent from a new connection."""
        body = aconn.auth
        session = meta.Session()

        uuid = body['uuid']

        entry = Agent.get_by_uuid(envid, uuid)
        if entry:
            # Make a copy of the object
            entry = session.merge(entry)
            # but points at the same aconn...
        else:
            entry = Agent(envid=envid, uuid=uuid)
            session.add(entry)

        entry.conn_id = aconn.conn_id
        entry.version = body['version']
        entry.os_version = body['os-version']
        entry.processor_type = body['processor-type']
        entry.processor_count = body['processor-count']
        entry.installed_memory = body['installed-memory']
        entry.hostname = body['hostname']
        entry.fqdn = body['fqdn']
        entry.ip_address = body['ip-address']
        entry.peername = aconn.peername
        entry.listen_port = body['listen-port']

        if 'static-hostname' in body:
            entry.static_hostname = bool(body['static-hostname'])
        else:
            entry.static_hostname = False

        # Note: Do not set agent_type here since 1) We need to know
        # what the agent_type was in the case where the row existed, and
        # 2) the agent_type isn't known yet at the time we are called anyway.
        entry.username = u'palette'  # fixme
        entry.password = u'tableau2014'

        entry.install_dir = body['install-dir']

        # FIXME: make required when all agents are updated.
        if 'os-bitness' in body:
            entry.bitness = body['os-bitness']

        entry.last_connection_time = func.now()
        session.commit()

        if entry.iswin:
            entry.path = ntpath
            parts = body['data-dir'].split(':')
            entry.data_dir = ntpath.join(parts[0].upper() + ':', parts[1])
        else:
            entry.path = posixpath
            entry.data_dir = body['data-dir']
        return entry
コード例 #12
0
 def get_tableau_status(self):
     try:
         return meta.Session().query(TableauProcess).\
             join(Agent).\
             filter(Agent.envid == self.envid).\
             filter(Agent.agent_type == 'primary').\
             filter(TableauProcess.name == 'Status').\
             one().status
     except NoResultFound:
         return TableauProcess.STATUS_UNKNOWN
コード例 #13
0
ファイル: mixin.py プロジェクト: xyzlat/palette
    def max(cls, column, filters=None, default=None):
        query = meta.Session().query(func.max(getattr(cls, column)))

        if filters:
            query = cls.apply_filters(query, filters)

        # pylint: disable=maybe-no-member
        value = query.scalar()
        if value is None:
            return default
        return value
コード例 #14
0
    def __setitem__(self, key, value):
        """ Update the system table but don't do a database commit """

        value = cast(key, value)
        if key in self:
            if value == self[key]: # always string comparison
                return

        session = meta.Session()
        entry = SystemEntry(envid=self.req.envid, key=key, value=value)
        session.merge(entry)
        dict.__setitem__(self, key, value)
コード例 #15
0
    def _retain_some(self):
        """Retain only the configured number of workbook versions.
            Returns:
                    The number of archive versions removed.
        """

        retain_count = self.system[SystemKeys.WORKBOOK_RETAIN_COUNT]
        if not retain_count or retain_count == -1:
            return 0

        removed_count = 0

        session = meta.Session()
        # List of workbooks that have excess archived versions:
        #   [(workbookid, total-count), ...]
        # Note we select only successfully archived workbook versions
        # (url != '').  We don't want want to count unsuccessfully
        # archived versions in the count of how many we have.
        results = session.query(WorkbookUpdateEntry.workbookid, func.count()).\
                  filter(WorkbookUpdateEntry.url != '').\
                  group_by(WorkbookUpdateEntry.workbookid).\
                  having(func.count() > retain_count).\
                  all()

#        logger.debug("workbooks _retain_some len: %d, results: %s",
#                                                len(results), str(results))

        for result in results:
            # Get list of old workbook archive entries to delete
            rows = session.query(WorkbookUpdateEntry).\
                    filter(WorkbookUpdateEntry.workbookid == result[0]).\
                    filter(WorkbookUpdateEntry.url != '').\
                    order_by(WorkbookUpdateEntry.timestamp.asc()).\
                    limit(result[1] - retain_count).\
                    all()

            for row in rows:
                # We have to remove the WorkbookUpdateEntry first
                # due to the foreign key constraint in files pointing to it.
                session.query(WorkbookUpdateEntry).\
                            filter(WorkbookUpdateEntry.wuid == row.wuid).\
                            delete()
                session.commit()

                self.server.files.remove_file_by_id(row.fileid)
                if row.fileid_twbx:
                    self.server.files.remove_file_by_id(row.fileid_twbx)

                # Fixme: We could increment only if it successfully deleted.
                removed_count += 1

        return removed_count
コード例 #16
0
ファイル: mixin.py プロジェクト: xyzlat/palette
 def populate(cls):
     session = meta.Session()
     entry = session.query(cls).first()
     if entry:
         return
     if not cls.defaults_filename is None:
         rows = cls.populate_from_file(cls.defaults_filename)
     else:
         rows = cls.defaults
     for d in rows:
         obj = cls(**d)
         session.add(obj)
     session.commit()
コード例 #17
0
    def _add(self, agentid, name, pid, status):
        """Note a session is passed.  When updating the status table, we
        do remove_all_status, then slowly add in the new status before
        doing the commit, so the table is not every empty/building if
        somebody checks it.
        """

        session = meta.Session()
        entry = TableauProcess(agentid=agentid, name=name,
                               pid=pid, status=status)
        # We merge instead of add since 'tabadmin status -v' sometimes
        # returns duplicate lines.
        session.merge(entry)
コード例 #18
0
    def sync(cls, agent):
        envid = agent.server.environment.envid
        stmt = \
            'SELECT id, name, status, created_at, updated_at, ' +\
            'user_quota, content_admin_mode, storage_quota, metrics_level, '+\
            'status_reason, subscriptions_enabled, ' +\
            'custom_subscription_footer, custom_subscription_email, '+\
            'luid, query_limit, url_namespace ' +\
            'FROM sites'

        data = agent.odbc.execute(stmt)
        if 'error' in data:
            return data
        if '' not in data:
            data['error'] = "Missing '' key in query response."
            return data

        ids = []

        session = meta.Session()
        for row in data['']:
            entry = Site.get(envid, row[0], default=None)
            if not entry:
                entry = Site(envid=envid, id=row[0])
                session.add(entry)
            entry.name = row[1]
            entry.status = row[2]
            entry.created_at = row[3]
            entry.updated_at = row[4]
            entry.user_quota = row[5]
            entry.content_admin_mode = row[6]
            entry.storage_quota = row[7]
            entry.metrics_level = row[8]
            entry.status_reason = row[9]
            entry.subscriptions_enabled = row[10]
            entry.custom_subscription_footer = row[11]
            entry.custom_subscription_email = row[12]
            entry.luid = row[13]
            entry.query_limit = row[14]
            entry.url_namespace = row[15]
            ids.append(entry.siteid)

        # FIXME: don't delete
        session.query(Site).\
            filter(not_(Site.siteid.in_(ids))).\
            delete(synchronize_session='fetch')

        session.commit()

        d = {u'status': 'OK', u'count': len(data[''])}
        return d
コード例 #19
0
    def _retain_some_obj(self, retain_count, obj_class):
        """Do the removal of excess WorkbookExtractEntry or
           DataSourceExtractEntry rows.

           Called with:
                - How many to retain
                - The object class (WorkbookExtractEntry or
                                    DataSourceExtractEntry)
        """

        session = meta.Session()
        # List of parentids that have excess archived versions:
        #   [(parentid, total-count), ...]
        # Note we select only successfully archived extracts when
        # fileid != None.  We don't want want to count unsuccessfully
        # archived extracts in the count of how many we have.
        results = session.query(obj_class.parentid, func.count()).\
                  filter(obj_class.fileid != None).\
                  group_by(obj_class.parentid).\
                  having(func.count() > retain_count).\
                  all()

        logger.debug("refresh _retain_some_obj %s len: %d, results: %s",
                     obj_class.__tablename__, len(results), str(results))

        removed_count = 0
        for result in results:
            # Get the list of rows to delete
            rows = session.query(obj_class).\
                    filter(obj_class.parentid == result[0]).\
                    filter(obj_class.fileid != None).\
                    order_by(obj_class.sid.asc()).\
                    limit(result[1] - retain_count).\
                    all()

            for row in rows:
                # We have to remove the extract row first
                # due to the foreign key constraint in the files table
                # pointing to it.
                session.query(obj_class).\
                            filter(obj_class.sid == row.sid).\
                            delete()
                session.commit()

                self.server.files.remove_file_by_id(row.fileid)

                # Fixme: We could increment only if it successfully deleted.
                removed_count += 1

        return removed_count
コード例 #20
0
    def _archive_updates(self, agent, updates):
        """Attempt to archive datasources from DataSourceUpdate rows."""

        session = meta.Session()

        count = 0

        logger.debug("Datasource Archive update count: %d", len(updates))

        for update in updates:
            if not self.system[SystemKeys.DATASOURCE_RETAIN_COUNT]:
                logger.info(
                          "Datasource Archive disabled during fixup." + \
                          "  Exiting for now.")
                break

            if not self.server.odbc_ok():
                logger.info("Datasource Archive Fixup: Archive build " + \
                          "stopping due to current state")
                break

            logger.debug("Datasource Archive update refresh dsid %d",
                         update.dsid)

            session.refresh(update)
            try:
                self._archive_ds(agent, update)
            except ArchiveException as ex:
                if ex.value == ArchiveError.BAD_CREDENTIALS:
                    msg = "datasource _archive_updates: tabcmd failed due " + \
                          "to bad credentials. " + \
                          "Skipping any remaining datasource updates now."
                    logger.info(msg)
                    break
                else:
                    raise  # should never happen
            count += 1

        # Retain only configured number of versions
        if count:
            retain_removed_count = self._retain_some()
        else:
            retain_removed_count = 0

        return {
            u'status': 'OK',
            u'updates-archived': count,
            u'retain-removed-count': retain_removed_count
        }
コード例 #21
0
    def fixup(self, agent):
        if not self.system[SystemKeys.WORKBOOK_RETAIN_COUNT]:
            logger.debug("Workbook archives are not enabled. Fixup not done.")
            return {u'disabled':
                    'Workbook Archives are not enabled.  Fixup not done.'}

        session = meta.Session()

        # potentially serveral thousand?
        updates = session.query(WorkbookUpdateEntry).\
                  filter(or_(WorkbookUpdateEntry.url == "",
                         WorkbookUpdateEntry.url == None)).\
                         all()

        return self._archive_updates(agent, updates)
コード例 #22
0
    def get_by_envid_type(cls, envid, cloud_type):
        session = meta.Session()
        filters = {'envid':envid, 'cloud_type':cloud_type}

        # pylint: disable=maybe-no-member
        subquery = session.query(func.max(cls.modification_time))
        subquery = cls.apply_filters(subquery, filters).subquery()

        query = session.query(cls).filter(cls.modification_time.in_(subquery))
        try:
            # There is a *theoretical* possibility that multiple records
            # could have the same timestamp so just take the first.
            return query.first()
        except NoResultFound:
            return None
コード例 #23
0
 def _remove_dsu(self, update):
     """Remove an update from the datasource_updates table.
        We do this if a datasource update entry was listed a new datasource
        but then was deleted before we ran 'tabcmd'.
     """
     session = meta.Session()
     try:
         session.query(DataSourceUpdateEntry).\
             filter(DataSourceUpdateEntry.dsuid == update.dsuid).\
             delete()
     except NoResultFound:
         logger.error("_remove_dsu: datasource already deleted: %d",
                      update.dsuid)
         return
     session.commit()
コード例 #24
0
 def _remove_wbu(self, update):
     """Remove an update from the workbook_updates table.
        We do this if a workbook update entry listed a new workbook
        but then was deleted before we ran 'tabcmd'.
     """
     session = meta.Session()
     try:
         session.query(WorkbookUpdateEntry).\
             filter(WorkbookUpdateEntry.wuid == update.wuid).\
             delete()
     except NoResultFound:
         logger.error("_remove_wbu: workbook already deleted: %d",
                        update.wuid)
         return
     session.commit()
コード例 #25
0
ファイル: extracts.py プロジェクト: xyzlat/palette
    def _check_existing_unfinished(self, agent, extract_thresholds):
        """Go through our unfinished extracts db to see if any have exceeded
           their start delay or duration thresholds.
        """

        session = meta.Session()
        rows = session.query(ExtractEntry).\
            filter(ExtractEntry.progress != 100).\
            all()

        db_now_utc = agent.odbc.get_db_now_utc()

        for row in rows:
            data = dict(agent.todict().items() + extract_thresholds.items())
            self._process(data, db_now_utc, row)
            session.commit()  # can move this after proven reliable
コード例 #26
0
    def load(self, agent):

        envid = self.server.environment.envid
        self._prune(agent, envid)

        controldata = HttpControlData(self.server)
        userdata = self.load_users(agent)

        maxid = HttpRequestEntry.maxid(envid)
        datadict = agent.odbc.execute(self.get_maxid_statement(maxid))
        if 'error' in datadict:
            return datadict
        if '' not in datadict:
            datadict['error'] = "Missing '' key in query response."
            return datadict

        rows = []
        schema = ODBC.schema(datadict)
        counter = 0
        session = None
        for row in datadict['']:
            if counter % 1000 == 0:
                if session != None:
                    session.commit()
                    # Our table was empty so don't test for alerts on the one
                    # placeholder row we brought in.
                    if maxid is not None:
                        for entry in rows:
                            #                           print "entry: id", entry.id, "action", entry.action,
                            #                           print "vizal_session", entry.vizql_session
                            self._test_for_alerts(rows, entry, agent,
                                                  controldata)
                session = meta.Session()
                rows = []
            counter += 1
            odbcdata = ODBCData(schema, row)
            entry = HttpRequestEntry()
            entry.envid = envid
            odbcdata.copyto(entry)
            system_user_id = userdata.get(entry.site_id, entry.user_id)
            entry.system_user_id = system_user_id
            session.add(entry)
            rows.append(entry)

        if session != None:
            session.commit()
        return {u'status': 'OK', u'count': len(datadict[''])}
コード例 #27
0
    def _archive_updates(self, agent, updates):
        """Attempt to archive workbooks from WorkbookUpdate rows."""

        session = meta.Session()

        count = 0
        data = {}
        logger.debug("Workbook archive update count: %d\n", len(updates))
        for update in updates:
            if not self.system[SystemKeys.WORKBOOK_RETAIN_COUNT]:
                logger.info(
                          "Workbook Archive disabled during fixup." + \
                          "  Exiting for now.")
                break

            if not self.server.odbc_ok():
                logger.info("Workbook Archive Fixup: Archive build " + \
                          "stopping due to current state")
                break

            logger.debug("Workbook archive update refresh wid %d",
                           update.workbookid)
            session.refresh(update)
            try:
                self._archive_wb(agent, update)
            except ArchiveException as ex:
                if ex.value == ArchiveError.BAD_CREDENTIALS:
                    msg = "workbook _archive_updates: tabcmd failed due to " + \
                          "bad credentials. " + \
                          "Skipping any remaining workbook updates now."
                    logger.info(msg)
                    data[u'error'] = msg
                    break
                else:
                    raise # should never happen
            count += 1

        # Retain only configured number of versions
        if count:
            retain_removed_count = self._retain_some()
        else:
            retain_removed_count = 0

        data[u'updates-archived'] = count
        data[u'retain-removed-count'] = retain_removed_count
        return data
コード例 #28
0
    def fixup(self, agent):
        if not self.system[SystemKeys.DATASOURCE_RETAIN_COUNT]:
            logger.debug("Datasource archives are disabled. Fixup not done.")
            return {
                u'disabled':
                'Datasource Archives are not enabled.  Fixup not done.'
            }

        session = meta.Session()

        # potentially serveral thousand?
        updates = session.query(DataSourceUpdateEntry).\
                  filter(or_(DataSourceUpdateEntry.url == "",
                         DataSourceUpdateEntry.url == None)).\
                         all()

        return self._archive_updates(agent, updates)
コード例 #29
0
    def _archive(self, agent, obj_class):
        """Archive extracts for the object type."""

        session = meta.Session()

        updates = session.query(obj_class).\
            filter(obj_class.fileid == None).\
                       all()

        count = 0
        for update in updates:
            # fixme: When a system table entry is added for extract
            # refresh data source, check which one is being archived and
            # if it is still enabled, etc.
            if not self.system[SystemKeys.EXTRACT_REFRESH_WB_RETAIN_COUNT]:
                logger.info("Extract Archiving disabled during archiving."
                            "  Exiting for now.")
                break
            name = update.parent.name  # cache in case it fails and is removed
            try:
                filename = self._build_extract(agent, update)
            except ArchiveException as ex:
                if ex.value == ArchiveError.BAD_CREDENTIALS:
                    msg = "extract archive: tabcmd failed due to " + \
                          "bad credentials. " + \
                          "Skipping any remaining extract archives now."
                    logger.info(msg)
                    break
                else:
                    raise  # should never happen

            if not filename:
                logger.error(
                    "Failed to retrieve extract refresh: from %s: %d - %s",
                    update.__tablename__, update.parentid, name)
                continue

            # retrieval is a long process, so commit after each.
            meta.Session.commit()
            count += 1

        if count:
            self._retain_some()

        return count
コード例 #30
0
    def _remove_refresh_entry(self, update):
        """Remove a refresh extract entry from the workbook_extracts or
           datasource_extracts table."""

        session = meta.Session()
        try:
            session.query(update.__class__).\
                filter(update.__class__.sid == update.sid).\
                delete()
        except NoResultFound:
            logger.error(
                "_remove_refresh_entry: %s extract already deleted: %d",
                update.__tablename__, update.sid)
            return

        session.commit()

        return