示例#1
0
    def delete_vol_file(self, agent, source_fullpathname):
        """Delete a file, check the error, and return the body result.
           Note: Does not remove the entry from the files table.
           If that is needed, that must be done by the caller."""
        logger.debug("Removing file '%s'", source_fullpathname)

        # Verify file exists.
        try:
            exists_body = agent.filemanager.filesize(source_fullpathname)
        except IOError as ex:
            logger.info("filemanager.filesize('%s') failed: %s",
                        source_fullpathname, str(ex))
            return {'error': str(ex)}

        if failed(exists_body):
            logger.info("filemanager.filesize('%s') error: %s",
                        source_fullpathname, str(exists_body))
            return exists_body

        # Remove file.
        try:
            remove_body = agent.filemanager.delete(source_fullpathname)
        except IOError as ex:
            logger.info("filemanager.delete('%s') failed: %s",
                        source_fullpathname, str(ex))
            return {'error': str(ex)}

        return remove_body
示例#2
0
    def _extract_tds_from_tdsx(self, agent, update, dst_tdsx):
        """
            A tdsx file is just a zipped tds + maybe tde files.
            Extract the tds and return the path.
        """
        # Make sure the *.tds file doesn't exist (it shouldn't but
        # we want to be sure).
        dst_tds = dst_tdsx[0:-1]  # drop the trailing 'x' from the file ext
        try:
            agent.filemanager.delete(dst_tds)
        except IOError as ex:
            logger.debug(
                "extract_tds_from_tdsx: Expected error deleting "
                "datasource dst_tds '%s': %s", dst_tds, str(ex))

        cmd = self.PCMD + ' ' + '"' + dst_tdsx + '"'
        body = self.server.cli_cmd(cmd, agent, timeout=60 * 30)

        if not self.system[SystemKeys.DATASOURCE_SAVE_TDSX]:
            # Delete the 'tdsx' since we don't archive it.
            try:
                agent.filemanager.delete(dst_tdsx)
            except IOError as ex:
                logger.debug("Error deleting datasource dst_tdsx '%s': %s",
                             dst_tdsx, str(ex))
        if failed(body):
            self._eventgen(update, data=body)
            return None
        return dst_tds
示例#3
0
    def remove_file_by_id(self, fileid):
        """Removes the file from disk or cloud.
           When done, removes the row from the files table.
        """
        file_entry = self.server.files.find_by_id(fileid)
        if not file_entry:
            logger.info("remove_file fileid %d disappeared, or wasn't added.",
                        fileid)
            return

        body = self.delfile_by_entry(file_entry)
        if failed(body):
            logger.info("remove_file failed to delete fileid %d", fileid)
        else:
            logger.debug("remove_file deleted fileid %d", fileid)
示例#4
0
    def _tabcmd_get(self, agent, update, tmpdir):
        """
            Run 'tabcmd get' on the agent to retrieve the twb/twbx file
            then return its path or None in the case of an error.
        """
        try:
            wb_entry = meta.Session.query(WorkbookEntry).\
                filter(WorkbookEntry.workbookid == update.workbookid).\
                one()
        except NoResultFound:
            logger.error("Missing workbook id: %d", update.workbookdid)
            return None

        url = '/workbooks/%s.twbx' % update.workbook.repository_url

        dst = agent.path.join(tmpdir,
                             self.clean_filename(wb_entry, update.revision) + \
                             '.twbx')

        body = self.tabcmd_run(agent, url, dst, wb_entry.site_id)

        if failed(body):
            self._eventgen(update, data=body)
            if 'stderr' in body:
                if 'Not authorized' in body['stderr']:
                    self.system[SystemKeys.WORKBOOK_RETAIN_COUNT] = 0
                    self._eventgen(update, data=body,
                            key=EventControl.\
                                    TABLEAU_ADMIN_CREDENTIALS_FAILED_WORKBOOKS)
                    raise ArchiveException(ArchiveError.BAD_CREDENTIALS)
                elif '404' in body['stderr'] and "Not Found" in body['stderr']:
                    # The update was deleted before we
                    # got to it.  Subsequent attempts will also fail,
                    # so delete the update row to stop
                    # attempting to retrieve it again.
                    # Note: We didn't remove the update row until after
                    # _eventgen used it.
                    self._remove_wbu(update)
                return None
        return dst
示例#5
0
    def _extract_twb_from_twbx(self, agent, update, dst):
        """A twbx file is just a zipped twb + associated tde files.
           Extract the twb and return the path.
           Returns:
                Success:    The twb filename on the agent.
                Fail:       None
        """
        cmd = 'ptwbx ' + '"' + dst + '"'
        body = self.server.cli_cmd(cmd, agent, timeout=60*30)

        if not self.system[SystemKeys.ARCHIVE_SAVE_TWBX]:
            # Delete the 'twbx' since we don't archive it.
            try:
                agent.filemanager.delete(dst)
            except IOError as ex:
                logger.debug("Error deleting workbook dst '%s': %s",
                                dst, str(ex))
        if failed(body):
            self._eventgen(update, data=body)
            return None
        dst = dst[0:-1] # drop the trailing 'x' from the file extension.
        return dst
示例#6
0
文件: auth.py 项目: ptzool/palette
    def load(self, agent, check_odbc_state=True):
        # pylint: disable=too-many-locals
        # pylint: disable=too-many-statements
        envid = self.server.environment.envid

        if check_odbc_state and not self.server.odbc_ok():
            return {"error": "Cannot run command while in state: %s" % \
                        self.server.state_manager.get_state()}

        stmt = \
            'SELECT system_users.name, system_users.email, ' +\
            ' system_users.hashed_password, system_users.salt, ' +\
            ' system_users.friendly_name, system_users.admin_level, ' +\
            ' system_users.created_at, system_users.id ' +\
            'FROM system_users'

        excludes = ['guest', '_system']

        data = agent.odbc.execute(stmt)

        # Send tableau readonly password-related events if appropriate.
        self._eventit(agent, data)

        if failed(data):
            return data

        session = meta.Session()

        names = ['palette']
        cache = self.load_users(agent)

        system_key = SystemKeys.ALERTS_NEW_USER_ENABLED
        alerts_new_user_enabled = self.system[system_key]
        if alerts_new_user_enabled == 'yes':
            default_email_level = 1
        else:
            default_email_level = 0

        user_count = UserProfile.user_count(envid)
        if user_count <= 1:
            first_load = True
        else:
            first_load = False

        for row in data['']:
            name = row[0]
            if name.lower() in excludes:
                continue

            sysid = row[7]
            names.append(name)

            entry = UserProfile.get_by_name(envid, name)
            if not entry:
                entry = UserProfile(envid=envid, name=name)
                entry.email_level = default_email_level
                session.add(entry)

            entry.email = row[1]
            entry.hashed_password = row[2]
            entry.salt = row[3]
            entry.friendly_name = row[4]
            entry.system_admin_level = row[5]
            entry.system_created_at = row[6]
            entry.system_user_id = sysid

            if sysid in cache:
                obj = cache[sysid]
                entry.login_at = obj.login_at
                entry.user_admin_level = obj.admin_level
                entry.licensing_role_id = obj.licensing_role_id
                entry.publisher = obj.publisher

            # On first user table import, Tableau Server Administrators
            # are set to Palette Super Admins.
            if first_load and entry.system_admin_level == 10:
                entry.roleid = Role.SUPER_ADMIN

        session.commit()

        # deleted entries no longer found in Tableau are marked inactive.
        session.query(UserProfile).\
            filter(not_(UserProfile.name.in_(names))).\
            update({'active': False}, synchronize_session='fetch')

        timestamp = datetime.now().strftime(DATEFMT)
        self.system.save(SystemKeys.AUTH_TIMESTAMP, timestamp)

        d = {u'status': 'OK', u'count': len(data[''])}
        logger.debug("auth load returning: %s", str(d))
        return d
示例#7
0
    def check_port(self, entry):
        # pylint: disable=too-many-branches
        # pylint: disable=too-many-statements
        """Tests connectivity from an agent to a host/port.
           Returns "success", "fail", or "unknown" (if agent
           isn't connected)."""

        details = {
            'service_name': entry.service_name,
            'dest_port': entry.dest_port,
            'dest_hostname': entry.dest_host
        }

        if entry.max_time:
            details['max_time'] = entry.max_time

        agent = self.server.agentmanager.agent_by_agentid(entry.agentid)
        if not agent:
            logger.debug("check_port: agentid %d not connected.  Will not " + \
                         "check service_name %s dest_host '%s' dest_port '%d'",
                         entry.agentid, entry.service_name, entry.dest_host,
                         entry.dest_port)
            details['error'] = \
                "agent %d not connected.  Can't do port check." % entry.agentid
            return details

        command = "pok %s %d" % (entry.dest_host, entry.dest_port)

        body = self.server.cli_cmd(command, agent, timeout=60 * 5)
        data = agent.todict()

        if failed(body):
            logger.error(
                "check_port: agentid %d command '%s' for service '%s' " + \
                "failed: %s",
                entry.agentid, command, entry.service_name,
                body['error'])
            details['error'] = body['error']

        if not 'exit-status' in body:
            logger.error(
                "check_port: agentid %d command '%s' for service '%s' " + \
                "did not have 'exit-status' in returned body: %s",
                entry.agentid, command, entry.service_name,
                str(body))
            details['error'] = 'Missing exit-status from port check.'
            return dict(data.items() + details.items())

        if 'stdout' in body:
            try:
                stdout = json.loads(body['stdout'])
            except ValueError as ex:
                logger.error("check_port: Bad json in stdout: %s: %s\n",
                             str(ex), body['stdout'])
                stdout = {}

            if 'milliseconds' in stdout:
                try:
                    details['connect_time'] = stdout['milliseconds'] / 1000.
                except TypeError as ex:
                    logger.error(
                        "check_port: Bad milliseconds value: %s: %s\n",
                        str(ex), str(stdout))

            if 'ip' in stdout:
                details['ip'] = stdout['ip']

            if failed(stdout):
                details['error'] = stdout['error']

        if body['exit-status'] or failed(details):
            # Non-zero exit status means failure to connect or
            # resolve hostname.
            if not 'error' in details:
                details['error'] = \
                        "Connection to '%s' failed: host '%s', port %d" % \
                       (entry.service_name, entry.dest_host, entry.dest_port)
            logger.debug(details)
        elif entry.max_time and 'connect_time' in details and \
                                details['connect_time'] > entry.max_time:
            details['error'] = ("Connection time (%.1f) exceeded maximum " + \
                       "allowed (%d.0) to '%s': host '%s', port %d") % \
                       (details['connect_time'], entry.max_time,
                       entry.service_name, entry.dest_host, entry.dest_port)
            logger.debug(details)

        if failed(details):
            color = 'red'
        else:
            color = 'green'

        # Generate an event if appropriate
        if color == 'red' and entry.notified_color != 'red':
            self.server.event_control.gen(EventControl.PORT_CONNECTION_FAILED,
                                          dict(data.items() + details.items()))
        elif entry.notified_color == 'red' and color == 'green':
            data['info'] = \
                    "Connection to '%s' is now okay: host '%s', port %d" % \
                    (entry.service_name, entry.dest_host, entry.dest_port)
            self.server.event_control.gen(EventControl.PORT_CONNECTION_OKAY,
                                          dict(data.items() + details.items()))

        # Update the row
        update_dict = {'color': color, 'notified_color': color}
        if 'connect_time' in details:
            update_dict['connect_time'] = details['connect_time']
        if 'ip' in details:
            update_dict['ip_address'] = details['ip']

        meta.Session.query(PortEntry).\
            filter(PortEntry.portid == entry.portid).\
            update(update_dict,
                   synchronize_session=False)

        meta.Session.commit()

        return details
示例#8
0
class ExtractRefreshManager(Manager, ArchiveUpdateMixin):
    NAME = 'archive extract refresh'

    def add(self, item_entry, extract_entry):
        """Add a workbook or datasource entry row.
            Called with either a WorkbookEntry or DataSourceEntry row.
        """

        wb_retain_count = \
                    self.system[SystemKeys.EXTRACT_REFRESH_WB_RETAIN_COUNT]
        # future: Use separate wb and ds retain counts when UI is updated.
        ds_retain_count = wb_retain_count

        if isinstance(item_entry, WorkbookEntry):
            if wb_retain_count:
                WorkbookExtractEntry.add(item_entry, extract_entry)
        elif isinstance(item_entry, DataSourceEntry):
            if ds_retain_count:
                DataSourceExtractEntry.add(item_entry, extract_entry)
        else:
            logger.error("ExtractRefreshManager Add: Unexpected subtitle: %s",
                         extract_entry.subtitle)

    @synchronized('refresh')
    def refresh(self, agent, check_odbc_state=True):
        """Archive extract refreshes."""

        wb_retain_count = \
                    self.system[SystemKeys.EXTRACT_REFRESH_WB_RETAIN_COUNT]
        # future: Use separate wb and ds retain counts when UI is updated.
        #        ds_retain_count = wb_retain_count

        if not wb_retain_count:
            logger.debug("Extract refresh archiving is not enabled.")
            return {
                u'disabled':
                'Extract refresh archiving is not enabled.  Not done.'
            }

        # FIXME
        if check_odbc_state and not self.server.odbc_ok():
            return {u'error':
                     "Cannot run extract refresh archive while in state: %s" % \
                     self.server.state_manager.get_state()}

        self._prune_all_missed_extracts()
        return self._archive_all(agent)

    def _archive_all(self, agent):
        """Archive all extracts: Workbooks and Data Sources."""
        workbook_count = self._archive(agent, WorkbookExtractEntry)
        datasource_count = self._archive(agent, DataSourceExtractEntry)

        return {
            u'status': 'OK',
            u'workbook-extracts-archived': workbook_count,
            u'datasource-extracts-archive': datasource_count
        }

    def _archive(self, agent, obj_class):
        """Archive extracts for the object type."""

        session = meta.Session()

        updates = session.query(obj_class).\
            filter(obj_class.fileid == None).\
                       all()

        count = 0
        for update in updates:
            # fixme: When a system table entry is added for extract
            # refresh data source, check which one is being archived and
            # if it is still enabled, etc.
            if not self.system[SystemKeys.EXTRACT_REFRESH_WB_RETAIN_COUNT]:
                logger.info("Extract Archiving disabled during archiving."
                            "  Exiting for now.")
                break
            name = update.parent.name  # cache in case it fails and is removed
            try:
                filename = self._build_extract(agent, update)
            except ArchiveException as ex:
                if ex.value == ArchiveError.BAD_CREDENTIALS:
                    msg = "extract archive: tabcmd failed due to " + \
                          "bad credentials. " + \
                          "Skipping any remaining extract archives now."
                    logger.info(msg)
                    break
                else:
                    raise  # should never happen

            if not filename:
                logger.error(
                    "Failed to retrieve extract refresh: from %s: %d - %s",
                    update.__tablename__, update.parentid, name)
                continue

            # retrieval is a long process, so commit after each.
            meta.Session.commit()
            count += 1

        if count:
            self._retain_some()

        return count

    def _build_extract(self, agent, update):
        """Retrieve the extract refresh.
           Returns:
                Success: The filename *on the agent*
                Failure: None
        """
        date_str = time.strftime(self.server.FILENAME_FMT)

        if isinstance(update, WorkbookExtractEntry):
            archive_dir = self.server.WORKBOOKS_REFRESH_DIR
            archive_type = FileManager.FILE_TYPE_WORKBOOK
            url = '/workbooks/%s.twbx' % update.parent.repository_url
            dst = agent.path.join(
                self.clean_filename(update.parent, date_str=date_str) +
                '.twbx')

        elif isinstance(update, DataSourceExtractEntry):
            archive_dir = self.server.DATASOURCES_REFRESH_DIR
            archive_type = FileManager.FILE_TYPE_DATASOURCE
            url = '/datasources/%s.tdsx' % update.parent.repository_url
            dst = agent.path.join(
                self.clean_filename(update.parent, date_str=date_str) +
                '.tdsx')
        else:
            raise RuntimeError("_build_extract: bad type")

        site_id = update.parent.site_id

        try:
            # fixme: Specify a minimum disk space required other than 0?
            dcheck = DiskCheck(self.server, agent, archive_dir, archive_type,
                               0)
        except DiskException, ex:
            self._eventgen(update, "refresh archive disk check: " + str(ex))
            return None

        dst = agent.path.join(dcheck.primary_dir, dst)

        body = self.tabcmd_run(agent, url, dst, site_id)

        if failed(body):
            self._eventgen(update, data=body)
            if 'stderr' in body:
                if 'Not authorized' in body['stderr']:
                    self.system[SystemKeys.EXTRACT_REFRESH_WB_RETAIN_COUNT] = \
                                                                         0
                    self._eventgen(update, data=body, key=EventControl.\
                                   TABLEAU_ADMIN_CREDENTIALS_FAILED_EXTRACTS)
                    raise ArchiveException(ArchiveError.BAD_CREDENTIALS)
                elif '404' in body['stderr'] and "Not Found" in body['stderr']:
                    # The extract refresh was deleted before we
                    # got to it.  Subsequent attempts will also fail,
                    # so delete the row to stop attempting to retrieve it again.
                    # Note: We didn't remove the row until after
                    # _eventgen used it.
                    self._remove_refresh_entry(update)
            return None

        if dst is None:
            # _tabcmd_get generates an event on failure
            return None

        place = self.archive_file(agent, dcheck, dst)
        update.fileid = place.placed_file_entry.fileid

        return dst