def do_delete(self, file): files = sdfiledao.get_files(file_functional_id=file, limit=1) if len(files) == 1: f = files[0] sddeletefile.immediate_delete(f) print "File successfully deleted" elif len(files) == 0: print "File not found"
def do_delete(self,file): files=sdfiledao.get_files(file_functional_id=file,limit=1) if len(files)==1: f=files[0] sddeletefile.immediate_delete(f) print "File successfully deleted" elif len(files)==0: print "File not found"
def get_one_waiting_transfer(datanode=None): if datanode is None: li = sdfiledao.get_files(limit=1, status=sdconst.TRANSFER_STATUS_WAITING) else: li=sdfiledao.get_files( limit=1, status=sdconst.TRANSFER_STATUS_WAITING,\ data_node=datanode ) if len(li) == 0: raise NoTransferWaitingException() else: t = li[0] # retrieve the dataset d = sddatasetdao.get_dataset(dataset_id=t.dataset_id) t.dataset = d return t
def print_running_transfers(): li=[] for tr in sdfiledao.get_files(status=sdconst.TRANSFER_STATUS_RUNNING): current_size=os.path.getsize(tr.get_full_local_path()) if os.path.isfile(tr.get_full_local_path()) else 0 li.append([humanize.naturalsize(current_size,gnu=False),humanize.naturalsize(tr.size,gnu=False),tr.start_date,tr.filename]) if len(li)>0: print tabulate(li,headers=['Current size','Total size','Download start date','Filename'],tablefmt="plain") else: print 'No current download'
def print_local_samples(): """Print one file of each local project.""" files = [] for project in sdsamplequery.get_local_projects(): files.extend(sdfiledao.get_files(project=project, limit=1)) for f in files: print "%s" % (f.local_path) print "%s|%s" % (f.file_functional_id, f.data_node) print
def print_local_samples(): """Print one file of each local project.""" files=[] for project in sdsamplequery.get_local_projects(): files.extend(sdfiledao.get_files(project=project,limit=1)) for f in files: print "%s" % (f.local_path) print "%s|%s" % (f.file_functional_id,f.data_node) print
def delete_transfers(limit=None): """ Note 'limit' is used to delete only a subset of all files marked for deletion each time this func is called. If 'limit' is None, all files marked for deletion are removed. """ transfer_list=sdfiledao.get_files(status=sdconst.TRANSFER_STATUS_DELETE,limit=limit) for tr in transfer_list: immediate_delete(tr)
def replica(args): if args.action=="next": if args.file_id is None: import sdfiledao,sdconst files=sdfiledao.get_files(status=sdconst.TRANSFER_STATUS_ERROR) for file_ in files: replica_next(file_.file_functional_id,args) else: replica_next(args.file_id,args) else: print_stderr('Incorrect argument')
def delete_insertion_group(insertion_group_id): files=sdfiledao.get_files(insertion_group_id=insertion_group_id) files=[f for f in files] # exclude files already marked for deletion if len(files)>0: for f in files: sddeletefile.deferred_delete(f.file_functional_id) sdlog.info("SDINSGRP-001","File marked for deletion (%s)"%f.file_functional_id) print "%i file(s) marked for deletion"%len(files) sddao.add_history_line(sdconst.ACTION_DELETE,insertion_group_id=insertion_group_id) else: print "Nothing to delete"
def replica(args): if args.action == "next": if args.file_id is None: import sdfiledao, sdconst files = sdfiledao.get_files(status=sdconst.TRANSFER_STATUS_ERROR) for file_ in files: replica_next(file_.file_functional_id, args) else: replica_next(args.file_id, args) else: print_stderr('Incorrect argument')
def get_one_waiting_transfer(): li=sdfiledao.get_files(limit=1,status=sdconst.TRANSFER_STATUS_WAITING) if len(li)==0: raise NoTransferWaitingException() else: t=li[0] # retrieve the dataset d=sddatasetdao.get_dataset(dataset_id=t.dataset_id) t.dataset=d return t
def get_file(file_functional_id=None): li=sdfiledao.get_files(file_functional_id=file_functional_id) if len(li)==0: raise FileNotFoundException() else: f=li[0] # retrieve the dataset d=sddatasetdao.get_dataset(dataset_id=f.dataset_id) f.dataset=d return f
def get_file(file_functional_id=None): li = sdfiledao.get_files(file_functional_id=file_functional_id) if len(li) == 0: raise FileNotFoundException() else: f = li[0] # retrieve the dataset d = sddatasetdao.get_dataset(dataset_id=f.dataset_id) f.dataset = d return f
def replica(args): if len(args.parameter)<1: print_stderr('Incorrect argument') else: action=args.parameter[0] # it's a naming mess: rename top level action as subcommand if action=="next": if len(args.parameter)==1: import sdfiledao,sdconst files=sdfiledao.get_files(status=sdconst.TRANSFER_STATUS_ERROR) for file_ in files: replica_next(file_.file_functional_id,args) elif len(args.parameter)==2: file_functional_id=args.parameter[1] replica_next(file_functional_id,args)
def delete_transfers(limit=None, remove_all=True): """Perform the deletion of DATA and METADATA. Returns how many files with TRANSFER_STATUS_DELETE status remain Notes - Can be called from the daemon code (deferred mode), or from interactive code (immediate mode). - 'limit' is used to delete only a subset of all files marked for deletion each time this func is called. If 'limit' is None, all files marked for deletion are removed. """ transfer_list = sdfiledao.get_files(status=sdconst.TRANSFER_STATUS_DELETE, limit=limit) try: for tr in transfer_list: if remove_all: immediate_delete(tr) else: immediate_md_delete(tr) sddb.conn.commit( ) # final commit (we do all deletion in one transaction). except Exception as e: sdlog.error("SDDELETE-880", "Error occurs during files suppression (%s)" % (str(e), )) # no rollback here: i.e. we also commit if error occur (most likely a # filesystem permission error). This is to keep medatata synced with # data (else many files would have # been removed from filesystem but with metadata still in db..). # # TODO: exception is too generic here: # improve this code by using a specific exception for "permission error". # sddb.conn.commit() raise # fatal error return sdfilequery.transfer_status_count( status=sdconst.TRANSFER_STATUS_DELETE)
def cleanup_running_transfer(): """This handle zombie cases (transfers with 'running' status, but not running). Check for zombie transfer (move "running" transfer to "waiting") Notes: - remaining "running" transfers exist if the daemon has been killed or if the server rebooted when the daemon was running) - if there are still transfers in running state, we switch them to waiting and remove file chunk """ transfer_list=sdfiledao.get_files(status=sdconst.TRANSFER_STATUS_RUNNING) for t in transfer_list: sdlog.info("SDTSCHED-023","fixing transfer status (%s)"%t.get_full_local_path()) if os.path.isfile(t.get_full_local_path()): os.remove(t.get_full_local_path()) t.status=sdconst.TRANSFER_STATUS_WAITING sdfiledao.update_file(t)
def delete_insertion_group(insertion_group_id): files=sdfiledao.get_files(insertion_group_id=insertion_group_id) files=[f for f in files] # exclude files already marked for deletion if len(files)>0: for f in files: sddeletefile.deferred_delete(f.file_functional_id) sdlog.info("SDINSGRP-001","File marked for deletion (%s)"%f.file_functional_id) sddb.conn.commit() # final commit (we do all update in one transaction). # deferred mode # if effective deletion is done by the daemon, uncomment this line #print "%i file(s) marked for deletion"%len(files) # immediate mode sddeletefile.delete_transfers_lowmem() print "%i file(s) deleted"%len(files) sdhistorydao.add_history_line(sdconst.ACTION_DELETE,insertion_group_id=insertion_group_id) else: print "Nothing to delete"
def cleanup_running_transfer(): """This handle zombie cases (transfers with 'running' status, but not running). Check for zombie transfer (move "running" transfer to "waiting") Notes: - remaining "running" transfers exist if the daemon has been killed or if the server rebooted when the daemon was running) - if there are still transfers in running state, we switch them to waiting and remove file chunk """ transfer_list = sdfiledao.get_files(status=sdconst.TRANSFER_STATUS_RUNNING) for t in transfer_list: sdlog.info("SDTSCHED-023", "fixing transfer status (%s)" % t.get_full_local_path()) if os.path.isfile(t.get_full_local_path()): os.remove(t.get_full_local_path()) t.status = sdconst.TRANSFER_STATUS_WAITING sdfiledao.update_file(t)
def print_running_transfers(): li = [] for tr in sdfiledao.get_files(status=sdconst.TRANSFER_STATUS_RUNNING): current_size = os.path.getsize( tr.get_full_local_path()) if os.path.isfile( tr.get_full_local_path()) else 0 li.append([ humanize.naturalsize(current_size, gnu=False), humanize.naturalsize(tr.size, gnu=False), tr.start_date, tr.filename ]) if len(li) > 0: print tabulate(li, headers=[ 'Current size', 'Total size', 'Download start date', 'Filename' ], tablefmt="plain") else: print 'No current download'
def delete_transfers(limit=None,remove_all=True): """Perform the deletion of DATA and METADATA. Returns how many files with TRANSFER_STATUS_DELETE status remain Notes - Can be called from the daemon code (deferred mode), or from interactive code (immediate mode). - 'limit' is used to delete only a subset of all files marked for deletion each time this func is called. If 'limit' is None, all files marked for deletion are removed. """ transfer_list=sdfiledao.get_files(status=sdconst.TRANSFER_STATUS_DELETE,limit=limit) try: for tr in transfer_list: if remove_all: immediate_delete(tr) else: immediate_md_delete(tr) sddb.conn.commit() # final commit (we do all deletion in one transaction). except Exception as e: sdlog.error("SDDELETE-880","Error occurs during files suppression (%s)"%(str(e),)) # no rollback here: i.e. we also commit if error occur (most likely a # filesystem permission error). This is to keep medatata synced with # data (else many files would have # been removed from filesystem but with metadata still in db..). # # TODO: exception is too generic here: # improve this code by using a specific exception for "permission error". # sddb.conn.commit() raise # fatal error return sdfilequery.transfer_status_count(status=sdconst.TRANSFER_STATUS_DELETE)
def delete_insertion_group(insertion_group_id): files = sdfiledao.get_files(insertion_group_id=insertion_group_id) files = [f for f in files] # exclude files already marked for deletion if len(files) > 0: for f in files: sddeletefile.deferred_delete(f.file_functional_id) sdlog.info("SDINSGRP-001", "File marked for deletion (%s)" % f.file_functional_id) sddb.conn.commit( ) # final commit (we do all update in one transaction). # deferred mode # if effective deletion is done by the daemon, uncomment this line #print "%i file(s) marked for deletion"%len(files) # immediate mode sddeletefile.delete_transfers_lowmem() print "%i file(s) deleted" % len(files) sdhistorydao.add_history_line(sdconst.ACTION_DELETE, insertion_group_id=insertion_group_id) else: print "Nothing to delete"