예제 #1
0
파일: library.py 프로젝트: msauria/galaxy
 def _make_library_uploaded_dataset(self, trans, params, name, path, type, library_bunch, in_folder=None):
     link_data_only = params.get('link_data_only', 'copy_files')
     uuid_str = params.get('uuid', None)
     file_type = params.get('file_type', None)
     library_bunch.replace_dataset = None  # not valid for these types of upload
     uploaded_dataset = util.bunch.Bunch()
     new_name = name
     # Remove compressed file extensions, if any, but only if
     # we're copying files into Galaxy's file space.
     if link_data_only == 'copy_files':
         if new_name.endswith('.gz'):
             new_name = new_name.rstrip('.gz')
         elif new_name.endswith('.zip'):
             new_name = new_name.rstrip('.zip')
     uploaded_dataset.name = new_name
     uploaded_dataset.path = path
     uploaded_dataset.type = type
     uploaded_dataset.ext = None
     uploaded_dataset.file_type = file_type
     uploaded_dataset.dbkey = params.get('dbkey', None)
     uploaded_dataset.to_posix_lines = params.get('to_posix_lines', None)
     uploaded_dataset.space_to_tab = params.get('space_to_tab', None)
     uploaded_dataset.tag_using_filenames = params.get('tag_using_filenames', False)
     uploaded_dataset.purge_source = getattr(trans.app.config, 'ftp_upload_purge', True)
     if in_folder:
         uploaded_dataset.in_folder = in_folder
     uploaded_dataset.data = upload_common.new_upload(trans, 'api', uploaded_dataset, library_bunch)
     uploaded_dataset.link_data_only = link_data_only
     uploaded_dataset.uuid = uuid_str
     if link_data_only == 'link_to_files':
         uploaded_dataset.data.link_to(path)
         trans.sa_session.add_all((uploaded_dataset.data, uploaded_dataset.data.dataset))
         trans.sa_session.flush()
     return uploaded_dataset
예제 #2
0
 def _make_library_uploaded_dataset(self, trans, params, name, path, type, library_bunch, in_folder=None):
     link_data_only = params.get('link_data_only', 'copy_files')
     uuid_str = params.get('uuid', None)
     file_type = params.get('file_type', None)
     library_bunch.replace_dataset = None  # not valid for these types of upload
     uploaded_dataset = util.bunch.Bunch()
     new_name = name
     # Remove compressed file extensions, if any, but only if
     # we're copying files into Galaxy's file space.
     if link_data_only == 'copy_files':
         if new_name.endswith('.gz'):
             new_name = new_name.rstrip('.gz')
         elif new_name.endswith('.zip'):
             new_name = new_name.rstrip('.zip')
     uploaded_dataset.name = new_name
     uploaded_dataset.path = path
     uploaded_dataset.type = type
     uploaded_dataset.ext = None
     uploaded_dataset.file_type = file_type
     uploaded_dataset.dbkey = params.get('dbkey', None)
     uploaded_dataset.to_posix_lines = params.get('to_posix_lines', None)
     uploaded_dataset.space_to_tab = params.get('space_to_tab', None)
     uploaded_dataset.tag_using_filenames = params.get('tag_using_filenames', False)
     uploaded_dataset.tags = params.get('tags', None)
     uploaded_dataset.purge_source = getattr(trans.app.config, 'ftp_upload_purge', True)
     if in_folder:
         uploaded_dataset.in_folder = in_folder
     uploaded_dataset.data = upload_common.new_upload(trans, 'api', uploaded_dataset, library_bunch)
     uploaded_dataset.link_data_only = link_data_only
     uploaded_dataset.uuid = uuid_str
     if link_data_only == 'link_to_files':
         uploaded_dataset.data.link_to(path)
         trans.sa_session.add_all((uploaded_dataset.data, uploaded_dataset.data.dataset))
         trans.sa_session.flush()
     return uploaded_dataset
예제 #3
0
def _precreate_fetched_hdas(trans, history, target, outputs):
    for item in target.get("elements", []):
        name = item.get("name", None)
        if name is None:
            src = item.get("src", None)
            if src == "url":
                url = item.get("url")
                if name is None:
                    name = url.split("/")[-1]
            elif src == "path":
                path = item["path"]
                if name is None:
                    name = os.path.basename(path)

        file_type = item.get("ext", "auto")
        dbkey = item.get("dbkey", "?")
        uploaded_dataset = Bunch(type='file',
                                 name=name,
                                 file_type=file_type,
                                 dbkey=dbkey)
        tag_list = item.get("tags", [])
        data = upload_common.new_upload(trans,
                                        '',
                                        uploaded_dataset,
                                        library_bunch=None,
                                        history=history,
                                        tag_list=tag_list)
        outputs.append(data)
        item["object_id"] = data.id
예제 #4
0
 def make_library_uploaded_dataset(self,
                                   trans,
                                   params,
                                   name,
                                   path,
                                   type,
                                   library_bunch,
                                   in_folder=None):
     library_bunch.replace_dataset = None  # not valid for these types of upload
     uploaded_dataset = util.bunch.Bunch()
     uploaded_dataset.name = name
     uploaded_dataset.path = path
     uploaded_dataset.type = type
     uploaded_dataset.ext = None
     uploaded_dataset.file_type = params.file_type
     uploaded_dataset.dbkey = params.dbkey
     uploaded_dataset.space_to_tab = params.space_to_tab
     if in_folder:
         uploaded_dataset.in_folder = in_folder
     uploaded_dataset.data = upload_common.new_upload(
         trans, uploaded_dataset, library_bunch)
     if params.get('link_data_only', False):
         uploaded_dataset.link_data_only = True
         uploaded_dataset.data.file_name = os.path.abspath(path)
         trans.sa_session.add(uploaded_dataset.data)
         trans.sa_session.flush()
     return uploaded_dataset
예제 #5
0
 def create_dataset( name ):
     ud = Bunch( name=name, file_type=None, dbkey=None )
     if nonfile_params.get( 'folder_id', False ):
         replace_id = nonfile_params.get( 'replace_id', None )
         if replace_id not in [ None, 'None' ]:
             replace_dataset = trans.sa_session.query( l.LibraryDataset ).get( int( replace_id ) )
         else:
             replace_dataset = None
         library_bunch = upload_common.handle_library_params( trans, nonfile_params, nonfile_params.folder_id, replace_dataset )
     else:
         library_bunch = None
     return upload_common.new_upload( trans, ud, library_bunch=library_bunch, state=trans.app.model.HistoryDatasetAssociation.states.UPLOAD )
예제 #6
0
 def create_dataset( name ):
     ud = Bunch( name=name, file_type=None, dbkey=None )
     if nonfile_params.get( 'folder_id', False ):
         replace_id = nonfile_params.get( 'replace_id', None )
         if replace_id not in [ None, 'None' ]:
             replace_dataset = trans.sa_session.query( trans.app.model.LibraryDataset ).get( trans.security.decode_id( replace_id ) )
         else:
             replace_dataset = None
         # FIXME: instead of passing params here ( chiech have been process by util.Params(), the original kwd
         # should be passed so that complex objects that may have been included in the initial request remain.
         library_bunch = upload_common.handle_library_params( trans, nonfile_params, nonfile_params.folder_id, replace_dataset )
     else:
         library_bunch = None
     return upload_common.new_upload( trans, cntrller, ud, library_bunch=library_bunch, state=trans.app.model.HistoryDatasetAssociation.states.UPLOAD )
예제 #7
0
 def create_dataset( name ):
     ud = Bunch( name=name, file_type=None, dbkey=None )
     if nonfile_params.get( 'folder_id', False ):
         replace_id = nonfile_params.get( 'replace_id', None )
         if replace_id not in [ None, 'None' ]:
             replace_dataset = trans.sa_session.query( trans.app.model.LibraryDataset ).get( trans.security.decode_id( replace_id ) )
         else:
             replace_dataset = None
         # FIXME: instead of passing params here ( chiech have been process by util.Params(), the original kwd
         # should be passed so that complex objects that may have been included in the initial request remain.
         library_bunch = upload_common.handle_library_params( trans, nonfile_params, nonfile_params.folder_id, replace_dataset )
     else:
         library_bunch = None
     return upload_common.new_upload( trans, cntrller, ud, library_bunch=library_bunch, state=trans.app.model.HistoryDatasetAssociation.states.UPLOAD )
예제 #8
0
 def _make_library_uploaded_dataset(self,
                                    trans,
                                    params,
                                    name,
                                    path,
                                    type,
                                    library_bunch,
                                    in_folder=None):
     link_data_only = params.get('link_data_only', 'copy_files')
     uuid_str = params.get('uuid', None)
     file_type = params.get('file_type', None)
     library_bunch.replace_dataset = None  # not valid for these types of upload
     uploaded_dataset = util.bunch.Bunch()
     new_name = name
     # Remove compressed file extensions, if any, but only if
     # we're copying files into Galaxy's file space.
     if link_data_only == 'copy_files':
         if new_name.endswith('.gz'):
             new_name = new_name.rstrip('.gz')
         elif new_name.endswith('.zip'):
             new_name = new_name.rstrip('.zip')
     uploaded_dataset.name = new_name
     uploaded_dataset.path = path
     uploaded_dataset.type = type
     uploaded_dataset.ext = None
     uploaded_dataset.file_type = file_type
     uploaded_dataset.dbkey = params.get('dbkey', None)
     uploaded_dataset.to_posix_lines = params.get('to_posix_lines', None)
     uploaded_dataset.space_to_tab = params.get('space_to_tab', None)
     uploaded_dataset.tag_using_filenames = params.get(
         'tag_using_filenames', True)
     if in_folder:
         uploaded_dataset.in_folder = in_folder
     uploaded_dataset.data = upload_common.new_upload(
         trans, 'api', uploaded_dataset, library_bunch)
     uploaded_dataset.link_data_only = link_data_only
     uploaded_dataset.uuid = uuid_str
     if link_data_only == 'link_to_files':
         uploaded_dataset.data.file_name = os.path.abspath(path)
         # Since we are not copying the file into Galaxy's managed
         # default file location, the dataset should never be purgable.
         uploaded_dataset.data.dataset.purgable = False
         trans.sa_session.add_all(
             (uploaded_dataset.data, uploaded_dataset.data.dataset))
         trans.sa_session.flush()
     return uploaded_dataset
예제 #9
0
 def create_dataset(name):
     ud = Bunch(name=name, file_type=None, dbkey=None)
     if nonfile_params.get('folder_id', False):
         replace_id = nonfile_params.get('replace_id', None)
         if replace_id not in [None, 'None']:
             replace_dataset = trans.sa_session.query(
                 l.LibraryDataset).get(int(replace_id))
         else:
             replace_dataset = None
         library_bunch = upload_common.handle_library_params(
             trans, nonfile_params, nonfile_params.folder_id,
             replace_dataset)
     else:
         library_bunch = None
     return upload_common.new_upload(
         trans,
         ud,
         library_bunch=library_bunch,
         state=trans.app.model.HistoryDatasetAssociation.states.UPLOAD)
예제 #10
0
 def make_library_uploaded_dataset( self, trans, params, name, path, type, library_bunch, in_folder=None ):
     library_bunch.replace_dataset = None # not valid for these types of upload
     uploaded_dataset = util.bunch.Bunch()
     uploaded_dataset.name = name
     uploaded_dataset.path = path
     uploaded_dataset.type = type
     uploaded_dataset.ext = None
     uploaded_dataset.file_type = params.file_type
     uploaded_dataset.dbkey = params.dbkey
     uploaded_dataset.space_to_tab = params.space_to_tab
     if in_folder:
         uploaded_dataset.in_folder = in_folder
     uploaded_dataset.data = upload_common.new_upload( trans, uploaded_dataset, library_bunch )
     if params.get( 'link_data_only', False ):
         uploaded_dataset.link_data_only = True
         uploaded_dataset.data.file_name = os.path.abspath( path )
         trans.sa_session.add( uploaded_dataset.data )
         trans.sa_session.flush()
     return uploaded_dataset
예제 #11
0
def _precreate_fetched_hdas(trans, history, target, outputs):
    for item in target.get("elements", []):
        name = item.get("name", None)
        if name is None:
            src = item.get("src", None)
            if src == "url":
                url = item.get("url")
                if name is None:
                    name = url.split("/")[-1]
            elif src == "path":
                path = item["path"]
                if name is None:
                    name = os.path.basename(path)

        file_type = item.get("ext", "auto")
        dbkey = item.get("dbkey", "?")
        uploaded_dataset = Bunch(
            type='file', name=name, file_type=file_type, dbkey=dbkey
        )
        data = upload_common.new_upload(trans, '', uploaded_dataset, library_bunch=None, history=history)
        outputs.append(data)
        item["object_id"] = data.id
예제 #12
0
파일: library.py 프로젝트: osallou/galaxy
 def _make_library_uploaded_dataset(self, trans, params, name, path, type, library_bunch, in_folder=None):
     link_data_only = params.get('link_data_only', 'copy_files')
     uuid_str = params.get('uuid', None)
     file_type = params.get('file_type', None)
     library_bunch.replace_dataset = None  # not valid for these types of upload
     uploaded_dataset = util.bunch.Bunch()
     new_name = name
     # Remove compressed file extensions, if any, but only if
     # we're copying files into Galaxy's file space.
     if link_data_only == 'copy_files':
         if new_name.endswith('.gz'):
             new_name = new_name.rstrip('.gz')
         elif new_name.endswith('.zip'):
             new_name = new_name.rstrip('.zip')
     uploaded_dataset.name = new_name
     uploaded_dataset.path = path
     uploaded_dataset.type = type
     uploaded_dataset.ext = None
     uploaded_dataset.file_type = file_type
     uploaded_dataset.dbkey = params.get('dbkey', None)
     uploaded_dataset.to_posix_lines = params.get('to_posix_lines', None)
     uploaded_dataset.space_to_tab = params.get('space_to_tab', None)
     uploaded_dataset.tag_using_filenames = params.get('tag_using_filenames', True)
     if in_folder:
         uploaded_dataset.in_folder = in_folder
     uploaded_dataset.data = upload_common.new_upload(trans, 'api', uploaded_dataset, library_bunch)
     uploaded_dataset.link_data_only = link_data_only
     uploaded_dataset.uuid = uuid_str
     if link_data_only == 'link_to_files':
         uploaded_dataset.data.file_name = os.path.abspath(path)
         # Since we are not copying the file into Galaxy's managed
         # default file location, the dataset should never be purgable.
         uploaded_dataset.data.dataset.purgable = False
         trans.sa_session.add_all((uploaded_dataset.data, uploaded_dataset.data.dataset))
         trans.sa_session.flush()
     return uploaded_dataset