def path_from_ids(cnx, vol_id, file_id):

    z = []
    while file_id > 1:
        # sql = "select vol_id, folder_id, file_name, file_id" \
        #       " from files where vol_id = %r and file_id = %d" % (vol_id, file_id)

        sql = "select   folder_id, file_name " \
              " from files where vol_id = %r and file_id = %d" % (vol_id, file_id)
    
    
        ( folder_id, file_name ) = db_execute_sql(cnx, sql, "hi", 3)[0]
        
        
        r = (vol_id, folder_id, file_name, file_id)

        z.insert(0,r)
            
        file_id = folder_id
    
        # ('vol0003', 274342L, 'salma hayek 2.mp4', 274385L)
    
    path = "/Volumes/" + "/".join(  [ t[2] for t in z ] )

    return    path, z
def main():

    cnx = db_connect()
    

    vol_id = 'vol0004'
    file_id =  85948    
    
    (vol_id, file_id) = ('vol0004' , 86182)
    
    sql = "select path_from_vol_id_file_id(%r,  %d )" % (vol_id, file_id)
    
    r = db_execute_sql(cnx, sql)
    
    print r[0]
Exemple #3
0
def do_db_delete_tuple(cnx, rs, verbose_level_threshold=3):

    # 1.    Decode early
    # 2.    Unicode everywhere
    # 3.    Encode late  ( this is late )
        
    rel_dict =   dict(zip( ("vol_id", "folder_id", "file_name", "file_id", "file_mod_date") , rs ))  
    rel_dict["file_name"] = str(rel_dict["file_name"].encode('utf8'))
    rel_dict["vol_id"] = str(rel_dict["vol_id"].encode('utf8'))

    update_sql = ("update files "
                    " set folder_id =  0 "
                    " where files.vol_id  =      %(vol_id)r "
                    " and files.folder_id =      %(folder_id)s "
                    " and files.file_id =        %(file_id)s " 
                    " and files.file_mod_date =  %(file_mod_date)r " 
                    ) 
                    # " and files.file_name =      %(file_name)r " 

    # execute_update_query(cnx, update_sql , rel_dict, label='pop delete', verbose_level_threshold=n)
    select_query = update_sql % rel_dict
    r = db_execute_sql(cnx, select_query, label='do_db_delete_tuple', verbose_level_threshold=verbose_level_threshold)
    print rs
    print
Exemple #4
0
def do_args(args, options):
    """do_args is the high-level, self-contained routine most like the command-line invocation"""

    cnx = db_connect()
    
    required_fields =  ['vol_id', 'folder_id', 'file_name', 'file_id', 'file_size', 'file_create_date', 'file_mod_date', 'file_uti' ]

    try:
        for basepath in args:
            
            for arg_dict in do_arg_gen(basepath, cnx, options):  

                # depth 0 should be a fully-realized level
                if (arg_dict['depth'] < 0):
                    if not ('directory_is_up_to_date' in arg_dict) or not arg_dict['directory_is_up_to_date']:
                        sql_dict = GetDR(arg_dict, required_fields)
                        sql_dict['file_mod_date'] = "'1970-01-01 00:00:00'" # args are escaped and quoted at this point
                        add_file_sql = ("insert into files "
                                        "(vol_id, folder_id, file_name, file_id, file_size, file_create_date, file_mod_date, file_uti) "
                                        "values "
                                        "( %(vol_id)s, %(folder_id)s, %(file_name)s, %(file_id)s, %(file_size)s, %(file_create_date)s, "
                                        "%(file_mod_date)s, %(file_uti)s ) "
                                        )
                        # execute_update_query(cnx, add_file_sql , sql_dict, label='(depth < 0)', verbose_level_threshold=3 )

                        db_execute_sql(cnx, add_file_sql % sql_dict, label='(depth < 0)', verbose_level_threshold=2)

                    GPR.pr7z( arg_dict ) 

                elif 'sql_action' in arg_dict:

                    if arg_dict['sql_action'] in  ["update_directory", "insert"]:
                        
                        # technically, we are updating (ie, completing) the directory
                        #  before we do the directory entries?  consistency problem if we fail?

                        add_file_sql = ("insert into files "
                                        "(vol_id, folder_id, file_name, file_id, file_size, file_create_date, file_mod_date, file_uti) "
                                        "values "
                                        "( %(vol_id)s, %(folder_id)s, %(file_name)s, %(file_id)s, %(file_size)s, %(file_create_date)s, "
                                        "%(file_mod_date)s, %(file_uti)s ) "
                                        )

                        # execute_update_query(cnx, add_file_sql , sql_dict, label=arg_dict['sql_action'], verbose_level_threshold=2)  # sql and dict are "%"'ed inside function
                        
                        db_execute(cnx, add_file_sql, arg_dict, required_fields, label="do_args" + arg_dict['sql_action'], verbose_level_threshold=2)

                    else:

                        sql_dict = GetDR(arg_dict, required_fields)
                        GPR.print_it(add_file_sql % sql_dict, 3)
                                                            
                    GPR.pr7z( arg_dict ) 
                elif (arg_dict['depth'] == 0):

                    GPR.pr7z( arg_dict , verbose_level_threshold=1) 
                        
                else:
                    
                    GPR.pr7z( arg_dict , verbose_level_threshold=2) 
                
                    
            

    except MyError, err:
        print err.description
Exemple #5
0
def db_get_vol_id(cnx, in_dict, vol_id):

    #  attempt one:     volume id from volume uuids table where vol_uuid is our current Volume UUID string.
    #  attempt two:     get the vol_id back from the files table via create date and file name.
    #  attempt three:   (1)  do the insert, trigger will fill in existing/create new volume id
    #                   (2)  get the vol_id back from the files table via create date and file name.
    #    all three tries have to then write out the volumes information.
    
    if vol_id != None:
        return vol_id
        
    #
    #  attempt one:     volume id from volume uuids table where vol_uuid is our current Volume UUID string.
    #

    volume_url = in_dict['url']
    volume_uuid_dict, error =  volume_url.resourceValuesForKeys_error_( ['NSURLVolumeUUIDStringKey',
                                                        'NSURLVolumeTotalCapacityKey',
                                                        'NSURLVolumeAvailableCapacityKey',
                                                        'NSURLVolumeSupportsVolumeSizesKey'] , None )
    label='vol_id gen'
    verbose_level_threshold=2
    
    select_query = ( "select vol_id "
                     " from volume_uuids"
                     " where vol_uuid = '%s'" % str(volume_uuid_dict['NSURLVolumeUUIDStringKey'])
                     )

    r = db_execute_sql(cnx, select_query, label, verbose_level_threshold=2)
    
    if len(r) > 0:
        vol_id = r[0][0]
        db_update_volume_uuids(cnx, vol_id, volume_uuid_dict)
        return vol_id

    #
    #  attempt two:     get the vol_id back from the files table via create date and file name.
    #
    
    required_fields =  [   'file_name', 'file_create_date'  ]

    select_query = ( " select vol_id"
                     " from files"
                     " where folder_id = 1"
                     "   and file_name = %(file_name)s"
                     "   and file_create_date = %(file_create_date)s"
                    )

    r = db_execute(cnx, select_query, in_dict, required_fields, label, verbose_level_threshold=2)

    vol_id = None if r == [] else r[0][0]                       # r could hold multiple results
    
    if vol_id != None:
        db_update_volume_uuids(cnx, vol_id, volume_uuid_dict)
        return vol_id
        
    #
    #  attempt three:   (1)  do the insert, trigger will fill in existing/create new volume id
    #                         insert with returning statement will return the found/created vol_id.
    #
    
    #   at this point we know that this record doesn't already, "naturally" exist and as such
    #   our inserting a directory entry says that this directory is done/scanned/up-to-date
    #   which we don't know at this point.  So insert a date that is earlier than others
    #   to indicate that this directory is being inserted in a condition of being already out-of-date.
           
    required_fields =  [ 'folder_id', 'file_name', 'file_id', 'file_size', 'file_create_date', 'file_mod_date', 'file_uti' ]
        
    add_file_sql = ("insert into files (folder_id, file_name, file_id, file_size, file_create_date, file_mod_date, file_uti) "
                        " values ( %(folder_id)s, %(file_name)s,"
                        "         %(file_id)s, %(file_size)s, %(file_create_date)s,"
                        "         %(file_mod_date)s, %(file_uti)s ) "
                        "        returning vol_id "
                        )

    sql_dict = GetDR(in_dict, required_fields, verbose_level_threshold=verbose_level_threshold)    

    # our "already out-of-date" date
    sql_dict['file_mod_date'] = "'1970-01-01 00:00:00'" # args are escaped and quoted at this point
    sql_query = add_file_sql % sql_dict    

    r = db_execute_sql(cnx, sql_query, label, verbose_level_threshold=verbose_level_threshold)

    # r = db_execute(cnx, add_file_sql, in_dict, required_fields, label, verbose_level_threshold=2)
    
    vol_id = r[0][0]            # vol_id must exist by now.
    db_update_volume_uuids(cnx, vol_id, volume_uuid_dict)
    return vol_id