Esempio n. 1
0
def execute_select_query(cnx, select_query, select_data, n=3):

    cursor = cnx.cursor()

    GPR.print_it(select_query % select_data, n)
    
    cursor.execute( select_query % select_data )
    
    zz = [z for z in cursor]
    
    cursor.close()

    return zz
Esempio n. 2
0
def do_fs_basepath(cnx, basepath, slist, vol_id, item_tally=defaultdict(list), force_folder_scan=False, 
                                                                                  scan_hidden_files=False, 
                                                                                  depth_limit=4, 
                                                                                  scan_packages=False):
    """do_fs_basepath is a generator yielding an ordered sequence of (status, dictionary) pairs
      
      first yield the sequence of directories above the basepath, from top down.  could be empty.
      then yield basepath, then enumerate contents of basepath if it is a directory 
      or package and we want to scan packages
      """

    n = len(slist)
    for i, superfolder_dict in enumerate(slist[:-1]):   # last one is basepath
        superfolder_dict['vol_id'] = vol_id
        superfolder_dict['depth'] = i+1-n
        yield superfolder_dict 

    #     for url in enumerator2:
    basepath_url =  NSURL.fileURLWithPath_(basepath)
    
    item_dict = slist[-1]
    depth = 0 
    item_dict['vol_id'] = vol_id
    item_dict['depth'] = depth

    # see if our current item is (effectively) a directory. check/query database if it is.
    
    item_is_package = is_item_a_package(basepath_url)
    if item_dict[NSURLIsDirectoryKey] and ((not item_is_package) or scan_packages):
        
        file_exists = db_file_exists(cnx, item_dict, vol_id)
        item_dict['directory_is_up_to_date'] =  not ((not file_exists) or  force_folder_scan)  
        if (not file_exists) or  force_folder_scan:
            folder_id         = item_dict['NSFileSystemFileNumber']
            db_query_folder(cnx,  vol_id,  item_dict, depth)

        folder_file_id         = item_dict['NSFileSystemFileNumber']            
        stak.append((depth, folder_file_id))

        yield item_dict
        
        # fall-through to do enumeration
        
    else:
    
        if item_dict[NSURLIsDirectoryKey] and item_is_package and not scan_packages:
            GPR.print_it("\nbasepath is a directory and a package but we're not scanning packages.\n", 2)
    
        yield item_dict
        
        return


    #   fall-through to do enumeration.
    #    do enumeration if we are a directory or we-are-a-package-and-we-want-package
    
    enumeratorOptionKeys = 0L
    if not scan_packages:
        enumeratorOptionKeys |= NSDirectoryEnumerationSkipsPackageDescendants
    if not scan_hidden_files:
        enumeratorOptionKeys |= NSDirectoryEnumerationSkipsHiddenFiles

    enumerator2 = sharedFM.enumeratorAtURL_includingPropertiesForKeys_options_errorHandler_(
                                        basepath_url,   enumeratorURLKeys, enumeratorOptionKeys, 
                                        error_handler_for_enumerator )
                                        
    for url in enumerator2:

        item_dict = GetURLValues(url, enumeratorURLKeys)
        depth = enumerator2.level()                
        item_dict['vol_id'] = vol_id
        item_dict['depth'] = depth
            
        while len(stak) > depth:
            stak.pop()

        # see if our current item is (effectively) a directory. check/query database if it is.

        item_is_package = is_item_a_package(url)
        if item_dict[NSURLIsDirectoryKey] and ((not item_is_package) or scan_packages):

            file_exists = db_file_exists(cnx, item_dict, vol_id)
            item_dict['directory_is_up_to_date'] =  not ((not file_exists) or  force_folder_scan)  
            if (not file_exists) or  force_folder_scan:
                folder_id         = item_dict['NSFileSystemFileNumber']
                db_query_folder(cnx,   vol_id,  item_dict, depth)
                
            # (1) in addition to checking database, also need to add new files to RS2_ins[ (depth-1, folder_id) ] += rs       
            # (2) any completely new directories (ie, not just update of existing directory) won't have
            #           any database contents to check.  (this is a lesser optimization?)


            folder_file_id = item_dict['NSFileSystemFileNumber']            
            stak.append((depth, folder_file_id))
                
        # see if our current item's folder ID is in our list of (new of forced) folders to be tracked.

        folder_id = item_dict['NSFileSystemFolderNumber']
        item_dict['current_item_directory_is_being_checked'] =  (depth-1, folder_id) in RS1_db_rels
        if (depth-1, folder_id) in RS1_db_rels:
            file_id         = item_dict['NSFileSystemFileNumber']
            filename        = item_dict[NSURLNameKey]
            file_mod_date   = item_dict[NSURLContentModificationDateKey]
            s = str(file_mod_date)
            file_mod_date = s[:-len(" +0000")]
            rs = (  vol_id,   folder_id,  filename,  file_id, file_mod_date)

            # if the current item is present in RS1 then it is no longer a "file to be deleted"
            # if in filesystem but not in database then it is a "file to be inserted"
            
            try:                
                RS1_db_rels[ (depth-1, folder_id) ] -= rs       
            except KeyError:
                RS2_ins[ (depth-1, folder_id) ] += rs       
                
        yield item_dict

    # end enumerator

    return
Esempio n. 3
0
def execute_insert_into_files(cnx, query, data, verbose_level=3):
    """ returns kDuplicateKey if duplicate key violation, kRecordInserted if not."""

    # the fields in the query argument are marked %s because a magic routine that we con't see is converting our data
    #       into mysql-compatible strings and then inserting them into our %s-es.  I think that
    #       using %s implies that we could've used %r or '%s', etc; so I recommend not using the magic
    #       conversion routine implied by using (query, data) but rather explicity formating the sql using 
    #       (query % data) and passing the resultant string to cursor.execute()

    try:

        cursor = cnx.cursor()      
        
        GPR.print_it(query % data, verbose_level)


        # print "cursor._connection.charset is: " , cursor._connection.charset                
        
        # Returns an iterator when multi is True, otherwise None.            
        cursor.execute(query, data)         # (…, operation, params=None, multi=False)

        cnx.commit()

        q = "select @count_by_file_name, @count_by_file_id, @msg" # via insert trigger on table "files"
        cursor.execute(q)
        trigger_vars = dict(zip(("count_by_file_name", "count_by_file_id", "msg"), [z for z in cursor][0]))

        # kRecordInserted means we didn't get a duplicate key error
        insert_result = FilesInsertResult(l=kRecordInserted, verbose_level=verbose_level, **trigger_vars)  

        q = "select @vol_id"
        cursor.execute(q)
        vol_id = [z for z in cursor][0][0]
        cnx.commit()
            
        return (vol_id , insert_result) 

    except mysql.connector.Error as err:
        if err.errno == 1062 and err.sqlstate == '23000':
            
            if GPR.verbose_level >= verbose_level:
                n1 = err.msg.index('Duplicate entry')
                n2 = err.msg.index('for key ')
                msg2 = err.msg[n1:n2-1]
                print "    "+repr(msg2)

            cnx.commit()

            #  only insert trigger table "files" sets these variables
 
            q = "select @count_by_file_name, @count_by_file_id , @msg"
            cursor.execute(q)
            trigger_vars = dict(zip(("count_by_file_name", "count_by_file_id", "msg"), [z for z in cursor][0]))

            #   kDuplicateKey means we got a duplicate key error
            insert_result = FilesInsertResult( l = kDuplicateKey, verbose_level=verbose_level, **trigger_vars) 

            q = "select @vol_id"
            cursor.execute(q)
            vol_id = [z for z in cursor][0][0]
            cnx.commit()
 
            return (vol_id , insert_result)

        elif err.errno == 1242 and err.sqlstate == '21000':
            # 
            print "Subquery returns more than 1 row"
            print query % data
        else:
            print 'erxr:', err, err.errno , err.message , err.msg, err.sqlstate #  , dir(err)
            
        return None
        
    finally:
        
        cursor.close()