Пример #1
0
    def __init__(self):
        super(self.__class__, self).__init__()

        self._mysql = MySQL(**config.mysqlhistory.db_params)

        self._site_id_map = {}
        self._dataset_id_map = {}
Пример #2
0
 def __init__(self):
     CopyInterface.__init__(self)
     DeletionInterface.__init__(self)
     SiteInfoSourceInterface.__init__(self)
     ReplicaInfoSourceInterface.__init__(self)
     DatasetInfoSourceInterface.__init__(self)
     self._mysql = MySQL(**config.mysqlregistry.db_params)
Пример #3
0
    def __init__(self,
                 application,
                 service='dynamo',
                 asuser='',
                 db_params=config.registry.db_params):
        self._mysql = MySQL(**db_params)

        self.application = application
        self.service = service
        if asuser:
            self.user = asuser
        else:
            self.user = config.activitylock.default_user
Пример #4
0
 def __init__(self):
     self._last_update = 0 # unix time of last update
     self._mysqlreg = MySQL(**config.registry.db_params)
     self._mysqlhist = MySQL(**config.mysqlhistory.db_params)
Пример #5
0
def main(site):
    """
    Gets the listing from the dynamo database, and remote XRootD listings of a given site.
    The differences are compared to deletion queues and other things.

    .. Note::
       If you add things, list them in the module docstring.

    The differences that should be acted on are copied to the summary webpage
    and entered into the dynamoregister database.

    :param str site: The site to run the check over
    :returns: missing files, size, orphan files, size
    :rtype: list, long, list, long
    """

    start = time.time()

    prev_missing = '%s_compare_missing.txt' % site
    prev_set = set()
    if os.path.exists(prev_missing):
        with open(prev_missing, 'r') as prev_file:
            for line in prev_file:
                prev_set.add(line.strip())

        if int(config.config_dict().get('SaveCache')):
            prev_new_name = '%s.%s' % (
                prev_missing,
                datetime.datetime.fromtimestamp(
                    os.stat(prev_missing).st_mtime).strftime('%y%m%d'))
        else:
            prev_new_name = prev_missing

        shutil.move(
            prev_missing,
            os.path.join(config.config_dict()['CacheLocation'], prev_new_name))

    # All of the files and summary will be dumped here
    webdir = config.config_dict()['WebDir']

    # Open a connection temporarily to make sure we only list good sites
    status_check = MySQL(config_file='/etc/my.cnf',
                         db='dynamo',
                         config_group='mysql-dynamo')
    status = status_check.query('SELECT status FROM sites WHERE name = %s',
                                site)[0]

    if status != 'ready':
        LOG.error('Site %s status is %s', site, status)

        # Note the attempt to do listing
        conn = sqlite3.connect(os.path.join(webdir, 'stats.db'))
        curs = conn.cursor()
        curs.execute(
            """
            REPLACE INTO stats VALUES
            (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME(DATETIME(), "-4 hours"), ?, ?)
            """, (site, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))

        conn.commit()
        conn.close()

        exit(0)

    # Close the connection while we are getting the trees together
    status_check.close()

    inv_tree = getinventorycontents.get_db_listing(site)

    # Reset the DirectoryList for the XRootDLister to run on
    config.DIRECTORYLIST = [
        directory.name for directory in inv_tree.directories
    ]

    site_tree = getsitecontents.get_site_tree(site)

    # Get whether or not the site is debugged
    conn = sqlite3.connect(os.path.join(webdir, 'stats.db'))
    curs = conn.cursor()
    curs.execute('SELECT isgood FROM sites WHERE site = ?', (site, ))
    is_debugged = curs.fetchone()[0]
    conn.close()

    # Create the function to check orphans and missing

    # First, datasets in the deletions queue can be missing
    acceptable_missing = checkphedex.set_of_deletions(site)

    # Orphan files cannot belong to any dataset that should be at the site
    inv_sql = MySQL(config_file='/etc/my.cnf',
                    db='dynamo',
                    config_group='mysql-dynamo')
    acceptable_orphans = set(
        inv_sql.query(
            """
            SELECT datasets.name FROM sites
            INNER JOIN dataset_replicas ON dataset_replicas.site_id=sites.id
            INNER JOIN datasets ON dataset_replicas.dataset_id=datasets.id
            WHERE sites.name=%s
            """, site))

    # Orphan files may be a result of deletion requests
    acceptable_orphans.update(acceptable_missing)

    # Ignored datasets will not give a full listing, so they can't be accused of having orphans
    acceptable_orphans.update(
        inv_sql.query('SELECT name FROM datasets WHERE status=%s', 'IGNORED'))

    # Do not delete anything that is protected by Unified
    protected_unmerged = get_json('cmst2.web.cern.ch',
                                  '/cmst2/unified/listProtectedLFN.txt')
    acceptable_orphans.update(['/%s/%s-%s/%s' % (split_name[4], split_name[3],
                                                 split_name[6], split_name[5]) \
                                   for split_name in \
                                   [name.split('/') for name in protected_unmerged['protected']]
                              ])

    LOG.debug('Acceptable orphans: \n%s\n', '\n'.join(acceptable_orphans))

    ignore_list = config.config_dict().get('IgnoreDirectories', [])

    def double_check(file_name, acceptable):
        """
        Checks the file name against a list of datasets to not list files from.

        :param str file_name: LFN of the file
        :param set acceptable: Datasets to not list files from
                               (Acceptable orphans or missing)
        :returns: Whether the file belongs to a dataset in the list or not
        :rtype: bool
        """
        LOG.debug('Checking file_name: %s', file_name)

        # Skip over paths that include part of the list of ignored directories
        for pattern in ignore_list:
            if pattern in file_name:
                return True

        split_name = file_name.split('/')

        try:
            return '/%s/%s-%s/%s' % (split_name[4], split_name[3],
                                     split_name[6],
                                     split_name[5]) in acceptable
        except IndexError:
            LOG.warning('Strange file name: %s', file_name)
            return True

    check_orphans = lambda x: double_check(x, acceptable_orphans)
    check_missing = lambda x: double_check(x, acceptable_missing)

    # Do the comparison
    missing, m_size, orphan, o_size = datatypes.compare(
        inv_tree,
        site_tree,
        '%s_compare' % site,
        orphan_check=check_orphans,
        missing_check=check_missing)

    LOG.debug('Missing size: %i, Orphan size: %i', m_size, o_size)

    # Enter things for site in registry
    if os.environ['USER'] == 'dynamo':
        reg_sql = MySQL(config_file='/etc/my.cnf',
                        db='dynamoregister',
                        config_group='mysql-dynamo')
    else:
        reg_sql = MySQL(config_file=os.path.join(os.environ['HOME'], 'my.cnf'),
                        db='dynamoregister',
                        config_group='mysql-register-test')

    # Determine if files should be entered into the registry

    many_missing = len(missing) > int(config.config_dict()['MaxMissing'])
    many_orphans = len(orphan) > int(config.config_dict()['MaxOrphan'])

    if is_debugged and not many_missing and not many_orphans:

        def execute(query, *args):
            """
            Executes the query on the registry and outputs a log message depending on query

            :param str query: The SQL query to execute
            :param args: The arguments to the SQL query
            """

            reg_sql.query(query, *args)

            if 'transfer_queue' in query:
                LOG.info('Copying %s from %s', args[0], args[1])
            elif 'deletion_queue' in query:
                LOG.info('Deleting %s', args[0])

    else:
        if many_missing:
            LOG.error('Too many missing files: %i, you should investigate.',
                      len(missing))

        if many_orphans:
            LOG.error(
                'Too many orphan files: %i out of %i, you should investigate.',
                len(orphan), site_tree.get_num_files())

        execute = lambda *_: 0

    # Then do entries, if the site is in the debugged status

    def add_transfers(line, sites):
        """
        Add the file into the transfer queue for multiple sites.

        :param str line: The file LFN to transfer
        :param list sites: Sites to try to transfer from
        :returns: Whether or not the entry was a success
        :rtype: bool
        """

        # Don't add transfers if too many missing files
        if line in prev_set or not prev_set:
            for location in sites:
                execute(
                    """
                    INSERT IGNORE INTO `transfer_queue`
                    (`file`, `site_from`, `site_to`, `status`, `reqid`)
                    VALUES (%s, %s, %s, 'new', 0)
                    """, line, location, site)

        return bool(sites)

    # Setup a query for sites, with added condition at the end
    site_query = """
                 SELECT sites.name FROM sites
                 INNER JOIN block_replicas ON sites.id = block_replicas.site_id
                 INNER JOIN files ON block_replicas.block_id = files.block_id
                 WHERE files.name = %s AND sites.name != %s
                 AND sites.status = 'ready'
                 AND block_replicas.is_complete = 1
                 AND group_id != 0
                 {0}
                 """

    # Track files with no sources
    no_source_files = []

    for line in missing:

        # Get sites that are not tape
        sites = inv_sql.query(
            site_query.format('AND sites.storage_type != "mss"'), line, site)

        if not add_transfers(line, sites):
            # Track files without disk source
            no_source_files.append(line)

            # Get sites that are tape
            sites = inv_sql.query(
                site_query.format('AND sites.storage_type = "mss"'), line,
                site)

            add_transfers(line, sites)

    # Only get the empty nodes that are not in the inventory tree
    for line in orphan + \
            [empty_node for empty_node in site_tree.empty_nodes_list() \
                 if not inv_tree.get_node('/'.join(empty_node.split('/')[2:]),
                                          make_new=False)]:
        execute(
            """
            INSERT IGNORE INTO `deletion_queue`
            (`file`, `site`, `status`) VALUES
            (%s, %s, 'new')
            """, line, site)

    reg_sql.close()

    with open('%s_missing_nosite.txt' % site, 'w') as nosite:
        for line in no_source_files:
            nosite.write(line + '\n')

    # We want to track which blocks missing files are coming from
    track_missing_blocks = defaultdict(
        lambda: {
            'errors': 0,
            'blocks': defaultdict(lambda: {
                'group': '',
                'errors': 0
            })
        })

    blocks_query = """
                   SELECT blocks.name, IFNULL(groups.name, 'Unsubscribed') FROM blocks
                   INNER JOIN files ON files.block_id = blocks.id
                   INNER JOIN block_replicas ON block_replicas.block_id = files.block_id
                   INNER JOIN sites ON block_replicas.site_id = sites.id
                   LEFT JOIN groups ON block_replicas.group_id = groups.id
                   WHERE files.name = %s AND sites.name = %s
                   """

    with open('%s_compare_missing.txt' % site, 'r') as input_file:
        for line in input_file:
            split_name = line.split('/')
            dataset = '/%s/%s-%s/%s' % (split_name[4], split_name[3],
                                        split_name[6], split_name[5])

            output = inv_sql.query(blocks_query, line.strip(), site)

            if not output:
                LOG.warning('The following SQL statement failed: %s',
                            blocks_query % (line.strip(), site))
                LOG.warning(
                    'Most likely cause is dynamo update between the listing and now'
                )
                from_phedex = get_json(
                    'cmsweb.cern.ch',
                    '/phedex/datasvc/json/prod/filereplicas',
                    params={
                        'node': site,
                        'LFN': line.strip()
                    },
                    use_cert=True)

                try:
                    output = [(from_phedex['phedex']['block'][0]['name'].split(
                        '#')[1], from_phedex['phedex']['block'][0]['replica']
                               [0]['group'])]
                except IndexError:
                    LOG.error('File replica not in PhEDEx either!')
                    LOG.error('Skipping block level report for this file.')
                    continue

            block, group = output[0]

            track_missing_blocks[dataset]['errors'] += 1
            track_missing_blocks[dataset]['blocks'][block]['errors'] += 1
            track_missing_blocks[dataset]['blocks'][block]['group'] = group

    inv_sql.close()

    # Output file with the missing datasets
    with open('%s_missing_datasets.txt' % site, 'w') as output_file:
        for dataset, vals in \
                sorted(track_missing_blocks.iteritems(),
                       key=lambda x: x[1]['errors'],
                       reverse=True):

            for block_name, block in sorted(vals['blocks'].iteritems()):
                output_file.write('%10i    %-17s  %s#%s\n' % \
                                      (block['errors'], block['group'],
                                       dataset, block_name))

    # If there were permissions or connection issues, no files would be listed
    # Otherwise, copy the output files to the web directory
    shutil.copy('%s_missing_datasets.txt' % site, webdir)
    shutil.copy('%s_missing_nosite.txt' % site, webdir)
    shutil.copy('%s_compare_missing.txt' % site, webdir)
    shutil.copy('%s_compare_orphan.txt' % site, webdir)

    if (os.environ.get('ListAge') is None) and (os.environ.get('InventoryAge')
                                                is None):

        # Update the runtime stats on the stats page if the listing settings are not changed
        conn = sqlite3.connect(os.path.join(webdir, 'stats.db'))
        curs = conn.cursor()

        curs.execute(
            'INSERT INTO stats_history SELECT * FROM stats WHERE site=?',
            (site, ))
        curs.execute(
            """
            REPLACE INTO stats VALUES
            (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME(DATETIME(), "-4 hours"), ?, ?)
            """,
            (site, time.time() - start, site_tree.get_num_files(),
             site_tree.count_nodes(), len(
                 site_tree.empty_nodes_list()), config.config_dict().get(
                     'NumThreads',
                     config.config_dict().get('MinThreads', 0)), len(missing),
             m_size, len(orphan), o_size, len(no_source_files),
             site_tree.get_num_files(unlisted=True)))

        conn.commit()
        conn.close()
Пример #6
0
                    help='Logging level.')

args = parser.parse_args()
sys.argv = []

# Need to setup logging before loading other modules
log_level = getattr(logging, args.log_level.upper())

logging.basicConfig(level=log_level)

logger = logging.getLogger(__name__)

from common.interface.mysql import MySQL

store = MySQL(config_file='/etc/my.cnf',
              config_group='mysql-dynamo',
              db='dynamoregister')

if args.command[0] == 'update':
    logger.info('Synchronizing the user list to SiteDB.')

    from common.interface.sitedb import SiteDB
    sitedb = SiteDB()

    domain_id = store.query(
        'SELECT `id` FROM `domains` WHERE `name` = \'cern.ch\'')[0]

    query = 'INSERT INTO `users` (`name`, `domain_id`, `email`, `dn`) VALUES (%s, ' + str(
        domain_id
    ) + ', %s, %s) ON DUPLICATE KEY UPDATE `email` = `email`, `dn` = `dn`'
Пример #7
0
def get_phedex_tree(site):
    """
    Get the file list tree from PhEDEx.
    Uses the InventoryAge configuration to determine when to refresh cache.

    :param str site: The site to get information from PhEDEx for.
    :returns: A tree containing file replicas that are supposed to be at the site
    :rtype: ConsistencyCheck.datatypes.DirectoryInfo
    """

    tree = datatypes.DirectoryInfo('/store')

    valid_list = config.config_dict().get('DirectoryList', [])

    sql = MySQL(config_file='/etc/my.cnf',
                db='dynamo',
                config_group='mysql-dynamo')
    datasets = sql.query(
        'SELECT datasets.name '
        'FROM sites INNER JOIN dataset_replicas INNER JOIN datasets '
        'WHERE dataset_replicas.dataset_id=datasets.id AND '
        'dataset_replicas.site_id=sites.id and sites.name=%s', site)

    def add_files(dataset, retries):
        """
        :param str dataset: Dataset to get from PhEDEx
        :param int retries: The number of times to retry PhEDEx call
        :returns: Whether or not the addition was successful
        :rtype: bool
        """

        LOG.info('Getting PhEDEx contents for %s', dataset)

        phedex_response = get_json('cmsweb.cern.ch',
                                   '/phedex/datasvc/json/prod/filereplicas', {
                                       'node': site,
                                       'dataset': dataset
                                   },
                                   retries=retries,
                                   use_https=True)

        report = 0

        if not phedex_response:
            LOG.warning('Bad response from PhEDEx for %s', dataset)
            return False

        for block in phedex_response['phedex']['block']:
            LOG.debug('%s', block)
            replica_list = [(replica['name'], replica['bytes'],
                             int(replica['replica'][0]['time_create'] or time.time()),
                             block['name']) \
                                for replica in block['file'] \
                                if replica['name'].split('/')[2] in valid_list]

            report += len(replica_list)

            tree.add_file_list(replica_list)

        LOG.info('%i files', report)
        return True

    separate = []

    for primary in set([d.split('/')[1][:3] for d in datasets]):
        success = add_files('/%s*/*/*' % primary, 0)
        if not success:
            separate.append(primary)

    # Separate loop to retry datasets individually
    for dataset in [d for d in datasets if d.split('/')[1][:3] in separate]:
        success = add_files(dataset, 5)
        if not success:
            LOG.critical('Cannot get %s from PhEDEx. Do not trust results...',
                         dataset)

    return tree
Пример #8
0
def get_db_listing(site):
    """
    Get the list of files from dynamo database directly from MySQL.

    :param str site: The name of the site to load
    :returns: The file replicas that are supposed to be at a site
    :rtype: ConsistencyCheck.datatypes.DirectoryInfo
    """

    inv_sql = MySQL(config_file='/etc/my.cnf',
                    db='dynamo',
                    config_group='mysql-dynamo')

    # Get list of files
    curs = inv_sql._connection.cursor()

    LOG.info('About to make MySQL query for files at %s', site)

    tree = datatypes.DirectoryInfo('/store')

    def add_to_tree(curs):
        """
        Add cursor contents to the dynamo listing tree

        :param MySQLdb.cursor curs: The cursor which just completed a query to fetch
        """
        dirs_to_look = iter(sorted(config.config_dict()['DirectoryList']))

        files_to_add = []
        look_dir = ''
        row = curs.fetchone()

        while row:
            name, size = row[0:2]
            timestamp = time.mktime(row[2].timetuple()) if len(row) == 3 else 0

            current_directory = name.split('/')[2]
            try:
                while look_dir < current_directory:
                    look_dir = next(dirs_to_look)
            except StopIteration:
                break

            if current_directory == look_dir:
                LOG.debug('Adding file: %s, %i', name, size)

                files_to_add.append((name, size, timestamp))

            row = curs.fetchone()

        tree.add_file_list(files_to_add)

    curs.execute(
        """
        SELECT files.name, files.size
        FROM block_replicas
        INNER JOIN sites ON block_replicas.site_id = sites.id
        INNER JOIN files ON block_replicas.block_id = files.block_id
        WHERE block_replicas.is_complete = 1 AND sites.name = %s
        AND group_id != 0
        ORDER BY files.name ASC
        """, (site, ))

    add_to_tree(curs)

    curs.execute(
        """
        SELECT files.name, files.size, NOW()
        FROM block_replicas
        INNER JOIN sites ON block_replicas.site_id = sites.id
        INNER JOIN files ON block_replicas.block_id = files.block_id
        WHERE (block_replicas.is_complete = 0 OR group_id = 0) AND sites.name = %s
        ORDER BY files.name ASC
        """, (site, ))

    add_to_tree(curs)

    LOG.info('MySQL query returned')

    return tree
Пример #9
0
    def make_request(self,
                     resource='',
                     options=[],
                     method=GET,
                     format='url',
                     cache_lifetime=0):
        url = self.url_base
        if resource:
            url += '/' + resource

        if method == GET and len(options) != 0:
            if type(options) is list:
                url += '?' + '&'.join(options)
            elif type(options) is str:
                url += '?' + options

        if logger.getEffectiveLevel() == logging.DEBUG:
            logger.debug(url)

        # first check the cache
        if method == GET and self._cache_lock is not None and cache_lifetime > 0:
            with self._cache_lock:
                try:
                    db = MySQL(**config.webservice.cache_db_params)
                    cache = db.query(
                        'SELECT UNIX_TIMESTAMP(`timestamp`), `content` FROM `webservice` WHERE `url` = %s',
                        url)
                    db.close()
                except:
                    logger.error(
                        'Connection to cache DB failed when fetching the timestamp for %s.',
                        url)
                    cache = []

            if len(cache) != 0:
                timestamp, content = cache[0]
                if time.time() - timestamp < cache_lifetime:
                    logger.debug('Using cache for %s', url)
                    if self.accept == 'application/json':
                        result = json.loads(content)
                        unicode2str(result)

                    elif self.accept == 'application/xml':
                        # TODO implement xml -> dict
                        result = content

                    return result

        # now query the URL
        request = urllib2.Request(url)

        if method == POST:
            if format == 'url':
                # Options can be a dict or a list of key=value strings or 2-tuples. The latter case allows repeated keys (e.g. dataset=A&dataset=B)
                if type(options) is list:
                    # convert key=value strings to (key, value) 2-tuples
                    optlist = []
                    for opt in options:
                        if type(opt) is tuple:
                            optlist.append(opt)

                        elif type(opt) is str:
                            key, eq, value = opt.partition('=')
                            if eq == '=':
                                optlist.append((key, value))

                    options = optlist

                data = urllib.urlencode(options)

            elif format == 'json':
                # Options must be jsonizable.
                request.add_header('Content-type', 'application/json')
                data = json.dumps(options)

            request.add_data(data)

        wait = 1.
        exceptions = []
        while len(exceptions) != config.webservice.num_attempts:
            try:
                if self.auth_handler:
                    opener = urllib2.build_opener(self.auth_handler())
                else:
                    opener = urllib2.build_opener()

                if 'Accept' not in self.headers:
                    opener.addheaders.append(('Accept', self.accept))

                opener.addheaders.extend(self.headers)

                response = opener.open(request)

                # clean up - break reference cycle so python can free the memory up
                for handler in opener.handlers:
                    handler.parent = None
                del opener

                content = response.read()
                del response

                if method == GET and self._cache_lock is not None:
                    with tempfile.NamedTemporaryFile(mode='w',
                                                     delete=False) as tmpfile:
                        filename = tmpfile.name
                        tmpfile.write('\'%s\',\'%s\',\'%s\'' %
                                      (MySQL.escape_string(url),
                                       time.strftime('%Y-%m-%d %H:%M:%S'),
                                       MySQL.escape_string(content)))

                    os.chmod(filename, 0644)

                    with self._cache_lock:
                        try:
                            db = MySQL(**config.webservice.cache_db_params)
                            db.query(
                                'DELETE FROM `webservice` WHERE `url` = %s',
                                url)
                            db.query(
                                r"LOAD DATA LOCAL INFILE '%s' INTO TABLE `dynamocache`.`webservice` FIELDS TERMINATED BY ',' ENCLOSED BY '\''"
                                % filename)
                            db.close()
                        except:
                            logger.error(
                                'Connection to cache DB failed when writing the response of %s.',
                                url)
                            pass

                    os.remove(filename)

                if self.accept == 'application/json':
                    result = json.loads(content)
                    unicode2str(result)

                elif self.accept == 'application/xml':
                    # TODO implement xml -> dict
                    result = content

                del content

                return result

            except urllib2.HTTPError as err:
                last_except = (str(err)) + '\nBody:\n' + err.read()
            except:
                last_except = sys.exc_info()[:2]

            exceptions.append(last_except)

            logger.info(
                'Exception "%s" occurred in webservice. Trying again in %.1f seconds.',
                str(last_except), wait)

            time.sleep(wait)
            wait *= 1.5

        else:  # exhausted allowed attempts
            logger.error('Too many failed attempts in webservice')
            logger.error('%s' % ' '.join(map(str, exceptions)))
            raise RuntimeError('webservice too many attempts')
Пример #10
0
 def __init__(self, db_params=config.registry.db_params):
     self._mysql = MySQL(**db_params)
Пример #11
0
 def __init__(self, inventory, history):
     self._inventory = inventory
     self._history = history
     self._mysql = MySQL(**config.registry.db_params)