Esempio n. 1
0
 def data( self, trans, dataset_id, chrom="", low="", high="" ):
     """
     Called by the browser to request a block of data
     """
     dataset = trans.app.model.HistoryDatasetAssociation.get( dataset_id )
     if not dataset: return messages.NO_DATA
     if dataset.state == trans.app.model.Job.states.ERROR:
         return messages.NO_DATA
     if not dataset.state == trans.app.model.Job.states.OK:
         return messages.PENDING
     track_store = trans.app.track_store.get( dataset )
     if not track_store.exists:
         # Test if we can make a track
         indexers = trans.app.datatypes_registry.get_indexers_by_datatype( dataset.extension )
         if indexers:
             tool = indexers[0]   # They are sorted by class chain so use the top one
             # If we can, return pending and launch job
             job = trans.app.model.Job()
             job.session_id = trans.get_galaxy_session().id
             job.history_id = trans.history.id
             job.tool_id = tool.id
             job.tool_version = "1.0.0"
             job.add_input_dataset( "input_dataset", dataset )
             job.add_parameter( "input_dataset", to_json_string( dataset.id ) )
             # This is odd
             # job.add_output_dataset( "input_dataset", dataset )
             # create store path, this is rather unclear?
             track_store.set()
             job.add_parameter( "store_path", to_json_string( track_store.path ) )    
             job.flush()
             trans.app.job_manager.job_queue.put( job.id, tool )
             return messages.PENDING
         else:
             return messages.NO_DATA
     else:
         # Data for that chromosome or resolution does not exist?
         # HACK: we're "pending" because the store exists without a manifest
         try:
             track_store.get_manifest()
         except track_store.DoesNotExist:
             return messages.PENDING
         if chrom and low and high:
             low = math.floor(float(low))
             high = math.ceil(float(high))
             resolution = dataset.datatype.get_track_resolution( dataset, low, high )
             try:
                 data = track_store.get( chrom, resolution )
             except track_store.DoesNotExist:
                 return messages.NO_DATA
             window = dataset.datatype.get_track_window( dataset, data, low, high )
             glob = {"data":window, "type":dataset.datatype.get_track_type()};
             if resolution: glob["resolution"] = resolution
             return window
         else:
             return messages.DATA
Esempio n. 2
0
 def import_workflow( self, trans, **kwd ):
     repository_metadata_id = kwd.get( 'repository_metadata_id', '' )
     workflow_name = kwd.get( 'workflow_name', '' )
     if workflow_name:
         workflow_name = decode( workflow_name )
     webapp = kwd.get( 'webapp', 'community' )
     message = kwd.get( 'message', '' )
     status = kwd.get( 'status', 'done' )
     repository_metadata = get_repository_metadata_by_id( trans, repository_metadata_id )
     workflows = repository_metadata.metadata[ 'workflows' ]
     workflow_data = None
     for workflow_data in workflows:
         if workflow_data[ 'name' ] == workflow_name:
             break
     if workflow_data:
         if kwd.get( 'open_for_url', False ):
             tmp_fd, tmp_fname = tempfile.mkstemp()
             to_file = open( tmp_fname, 'wb' )
             to_file.write( to_json_string( workflow_data ) )
             return open( tmp_fname )
         galaxy_url = trans.get_cookie( name='toolshedgalaxyurl' )
         url = '%sworkflow/import_workflow?tool_shed_url=%s&repository_metadata_id=%s&workflow_name=%s&webapp=%s' % \
             ( galaxy_url, url_for( '/', qualified=True ), repository_metadata_id, encode( workflow_name ), webapp )
         return trans.response.send_redirect( url )
     return trans.response.send_redirect( web.url_for( controller='workflow',
                                                       action='view_workflow',
                                                       message=message,
                                                       status=status ) )
Esempio n. 3
0
def main():
    #Parse Command Line
    parser = optparse.OptionParser()
    parser.add_option('-j',
                      '--jar_path',
                      dest='jar_path',
                      action='store',
                      type="string",
                      default=None,
                      help='snpEff.jar path')
    (options, args) = parser.parse_args()

    filename = args[0]

    params = from_json_string(open(filename).read())
    target_directory = params['output_data'][0]['extra_files_path']
    os.mkdir(target_directory)
    data_manager_dict = {}

    #Create Defuse Reference Data
    data_manager_dict = fetch_databases(data_manager_dict, target_directory,
                                        options.jar_path)

    #save info to json file
    open(filename, 'wb').write(to_json_string(data_manager_dict))
Esempio n. 4
0
def add_sequencer( sequencer_index, sequencer_form_definition_id, sequencer_info ):
    '''Adds a new sequencer to the sequencer table along with its form values.'''
    # Create a new form values record with the supplied sequencer information
    values = to_json_string( { 'field_0': sequencer_info.get( 'host', '' ),
                               'field_1': sequencer_info.get( 'username', '' ),
                               'field_2': sequencer_info.get( 'password', '' ),
                               'field_3': sequencer_info.get( 'data_dir', '' ),
                               'field_4': sequencer_info.get( 'rename_dataset', '' ) } )
    cmd = "INSERT INTO form_values VALUES ( %s, %s, %s, %s, '%s' )" % ( nextval( 'form_values' ), 
                                                                        localtimestamp(), 
                                                                        localtimestamp(), 
                                                                        sequencer_form_definition_id,
                                                                        values )
    db_session.execute(cmd)
    sequencer_form_values_id = get_latest_id( 'form_values' )
    # Create a new sequencer record with reference to the form value created above.
    name = 'Sequencer_%i' % sequencer_index
    desc = ''
    version = ''
    result_datasets = dict()
    sequencer_type_id = 'simple_unknown_sequencer'
    cmd = "INSERT INTO sequencer VALUES ( %s, %s, %s, '%s', '%s', '%s', '%s', %s, %s, %s )"
    cmd = cmd % ( nextval('sequencer'), 
                  localtimestamp(), 
                  localtimestamp(), 
                  name, 
                  desc, 
                  sequencer_type_id,
                  version, 
                  sequencer_form_definition_id,
                  sequencer_form_values_id,
                  boolean( 'false' ) )
    db_session.execute(cmd)
    return get_latest_id( 'sequencer' )
def add_sequencer( sequencer_index, sequencer_form_definition_id, sequencer_info ):
    '''Adds a new sequencer to the sequencer table along with its form values.'''
    # Create a new form values record with the supplied sequencer information
    values = to_json_string( { 'field_0': sequencer_info.get( 'host', '' ),
                               'field_1': sequencer_info.get( 'username', '' ),
                               'field_2': sequencer_info.get( 'password', '' ),
                               'field_3': sequencer_info.get( 'data_dir', '' ),
                               'field_4': sequencer_info.get( 'rename_dataset', '' ) } )
    cmd = "INSERT INTO form_values VALUES ( %s, %s, %s, %s, '%s' )" % ( nextval( 'form_values' ),
                                                                        localtimestamp(),
                                                                        localtimestamp(),
                                                                        sequencer_form_definition_id,
                                                                        values )
    migrate_engine.execute(cmd)
    sequencer_form_values_id = get_latest_id( 'form_values' )
    # Create a new sequencer record with reference to the form value created above.
    name = 'Sequencer_%i' % sequencer_index
    desc = ''
    version = ''
    result_datasets = dict()
    sequencer_type_id = 'simple_unknown_sequencer'
    cmd = "INSERT INTO sequencer VALUES ( %s, %s, %s, '%s', '%s', '%s', '%s', %s, %s, %s )"
    cmd = cmd % ( nextval('sequencer'),
                  localtimestamp(),
                  localtimestamp(),
                  name,
                  desc,
                  sequencer_type_id,
                  version,
                  sequencer_form_definition_id,
                  sequencer_form_values_id,
                  boolean( 'false' ) )
    migrate_engine.execute(cmd)
    return get_latest_id( 'sequencer' )
Esempio n. 6
0
def create_job( trans, params, tool, json_file_path, data_list, folder=None ):
    """
    Create the upload job.
    """
    job = trans.app.model.Job()
    job.session_id = trans.get_galaxy_session().id
    if folder:
        job.library_folder_id = folder.id
    else:
        job.history_id = trans.history.id
    job.tool_id = tool.id
    job.tool_version = tool.version
    job.state = job.states.UPLOAD
    trans.sa_session.add( job )
    trans.sa_session.flush()
    log.info( 'tool %s created job id %d' % ( tool.id, job.id ) )
    trans.log_event( 'created job id %d' % job.id, tool_id=tool.id )

    for name, value in tool.params_to_strings( params, trans.app ).iteritems():
        job.add_parameter( name, value )
    job.add_parameter( 'paramfile', to_json_string( json_file_path ) )
    if folder:
        for i, dataset in enumerate( data_list ):
            job.add_output_library_dataset( 'output%i' % i, dataset )
    else:
        for i, dataset in enumerate( data_list ):
            job.add_output_dataset( 'output%i' % i, dataset )
    job.state = job.states.NEW
    trans.sa_session.add( job )
    trans.sa_session.flush()

    # Queue the job for execution
    trans.app.job_queue.put( job.id, tool )
    trans.log_event( "Added job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id )
    return dict( [ ( 'output%i' % i, v ) for i, v in enumerate( data_list ) ] )
def main():
    #Parse Command Line
    parser = optparse.OptionParser()
    parser.add_option('-d',
                      '--dbkey_description',
                      dest='dbkey_description',
                      action='store',
                      type="string",
                      default=None,
                      help='dbkey_description')
    (options, args) = parser.parse_args()

    filename = args[0]

    params = from_json_string(open(filename).read())
    target_directory = params['output_data'][0]['extra_files_path']
    os.mkdir(target_directory)
    data_manager_dict = {}

    database_id = params['param_dict']['database_id']
    database_name = params['param_dict']['database_name']

    #Fetch the FASTA
    REFERENCE_SOURCE_TO_DOWNLOAD[
        params['param_dict']['reference_source']['reference_source_selector']](
            data_manager_dict, params, target_directory, database_id,
            database_name)

    #save info to json file
    open(filename, 'wb').write(to_json_string(data_manager_dict))
 def display_data(self,
                  trans,
                  dataset,
                  preview=False,
                  filename=None,
                  to_ext=None,
                  chunk=None):
     #TODO Prevent failure when displaying extremely long > 50kb lines.
     if to_ext:
         return self._serve_raw(trans, dataset, to_ext)
     if chunk:
         ck_index = int(chunk)
         f = open(dataset.file_name)
         f.seek(ck_index * self.CHUNK_SIZE)
         # If we aren't at the start of the file, seek to next newline.  Do this better eventually.
         if f.tell() != 0:
             cursor = f.read(1)
             while cursor and cursor != '\n':
                 cursor = f.read(1)
         ck_data = f.read(self.CHUNK_SIZE)
         cursor = f.read(1)
         while cursor and ck_data[-1] != '\n':
             ck_data += cursor
             cursor = f.read(1)
         return to_json_string({
             'ck_data': ck_data,
             'ck_index': ck_index + 1
         })
     return trans.fill_template("/dataset/tabular_chunked.mako",
                                dataset=dataset)
Esempio n. 9
0
 def setSessionParam(self, param, value):
     if self.trans.get_user():
         prefs = self.trans.get_user().preferences
         #hbdict = dict()
         #hbdict[param] = value
         prefs['hb_'+param] = to_json_string(value)
         self.trans.sa_session.flush()
 def setSessionParam(self, param, value):
     if self.trans.get_user():
         prefs = self.trans.get_user().preferences
         #hbdict = dict()
         #hbdict[param] = value
         prefs['hb_' + param] = to_json_string(value)
         self.trans.sa_session.flush()
Esempio n. 11
0
def job_param_filter(view, left, operator, right):
    view.do_query = True
    alias = aliased(JobParameter)
    param_name = re.sub(r'^param.', '', left)
    view.query = view.query.filter(
        and_(Job.id == alias.job_id, alias.name == param_name,
             alias.value == to_json_string(right)))
Esempio n. 12
0
def main():
    #Parse Command Line
    parser = optparse.OptionParser()
    parser.add_option( '-f', '--fasta_filename', dest='fasta_filename', action='store', type="string", default=None, help='fasta_filename' )
    parser.add_option( '-d', '--fasta_dbkey', dest='fasta_dbkey', action='store', type="string", default=None, help='fasta_dbkey' )
    parser.add_option( '-t', '--fasta_description', dest='fasta_description', action='store', type="string", default=None, help='fasta_description' )
    parser.add_option( '-n', '--data_table_name', dest='data_table_name', action='store', type="string", default=None, help='data_table_name' )
    (options, args) = parser.parse_args()
    
    filename = args[0]
    
    params = from_json_string( open( filename ).read() )
    target_directory = params[ 'output_data' ][0]['extra_files_path']
    os.mkdir( target_directory )
    data_manager_dict = {}
    
    dbkey = options.fasta_dbkey
    
    if dbkey in [ None, '', '?' ]:
        raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( dbkey ) )
    
    sequence_id, sequence_name = get_id_name( params, dbkey=dbkey, fasta_description=options.fasta_description )
    
    #build the index
    build_bowtie2_index( data_manager_dict, options.fasta_filename, params, target_directory, dbkey, sequence_id, sequence_name, data_table_name=options.data_table_name or DEFAULT_DATA_TABLE_NAME )
    
    #save info to json file
    open( filename, 'wb' ).write( to_json_string( data_manager_dict ) )
Esempio n. 13
0
def main():
    #Parse Command Line
    parser = optparse.OptionParser()
    parser.add_option( '-f', '--fasta_filename', dest='fasta_filename', action='store', type="string", default=None, help='fasta_filename' )
    parser.add_option( '-d', '--fasta_dbkey', dest='fasta_dbkey', action='store', type="string", default=None, help='fasta_dbkey' )
    parser.add_option( '-t', '--fasta_description', dest='fasta_description', action='store', type="string", default=None, help='fasta_description' )
    parser.add_option( '-n', '--data_table_name', dest='data_table_name', action='store', type="string", default=None, help='data_table_name' )
    parser.add_option( '-c', '--color_space', dest='color_space', action='store_true', default=False, help='color_space' )
    (options, args) = parser.parse_args()
    
    filename = args[0]
    
    params = from_json_string( open( filename ).read() )
    target_directory = params[ 'output_data' ][0]['extra_files_path']
    os.mkdir( target_directory )
    data_manager_dict = {}
    
    dbkey = options.fasta_dbkey
    
    if dbkey in [ None, '', '?' ]:
        raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( dbkey ) )
    
    sequence_id, sequence_name = get_id_name( params, dbkey=dbkey, fasta_description=options.fasta_description )
    
    #build the index
    build_bwa_index( data_manager_dict, options.fasta_filename, params, target_directory, dbkey, sequence_id, sequence_name, data_table_name=options.data_table_name or DEFAULT_DATA_TABLE_NAME, color_space=options.color_space )
    
    #save info to json file
    open( filename, 'wb' ).write( to_json_string( data_manager_dict ) )
def main():
    options, args = get_arg()
    tool_dir = args[0]

    path_to_alfa = os.path.join(tool_dir, 'ALFA.py')

    if options.output_filename == None:
        msg = 'No json output file specified'
        sys.exit(msg)
    output_filename = options.output_filename

    # Interestingly the output file to return is not empty initially.
    # it contains a dictionary, with notably the path to the dir where the alfa_indexes
    # are expected to be found
    params = from_json_string(open(output_filename).read())
    target_directory = params['output_data'][0]['extra_files_path']
    os.mkdir(target_directory)

    tmp_dir = tempfile.mkdtemp(prefix='tmp', suffix='')
    os.chdir(tmp_dir)

    data_manager_dict = {}

    if options.ensembl_info:
        kingdom, species_name = options.ensembl_info
        species_name = standardize_species_name(species_name)
        url = get_ensembl_url_root(kingdom)
        species_name, species_line = test_ensembl_species_exists(
            kingdom, url, species_name)
        gtf_archive_name = get_ensembl_gtf_archive(kingdom, url, species_name,
                                                   species_line)
        data_table_entry = get_data_table_new_entry(gtf_archive_name)
        gtf_file_name = '%s.gtf' % data_table_entry['prefix']
        uncompress_gz(gtf_archive_name, gtf_file_name)
        generate_alfa_indexes(path_to_alfa, gtf_file_name)
        stranded_index_name = '%s.stranded.index' % data_table_entry['prefix']
        unstranded_index_name = '%s.unstranded.index' % data_table_entry[
            'prefix']
        add_data_table_entry(data_manager_dict, data_table_entry)

    print("____________________________________________________________")
    print("*** General Info")
    print("URL ROOT:\t%s" % url)
    print("SPECIES:\t%s" % data_table_entry['species'])
    print("VERSION:\t%s" % data_table_entry['version'])
    print("RELEASE:\t%s" % data_table_entry['release'])
    print("VALUE:\t%s" % data_table_entry['value'])
    print("DBKEY:\t%s" % data_table_entry['dbkey'])
    print("NAME:\t%s" % data_table_entry['name'])
    print("PREFIX:\t%s" % data_table_entry['prefix'])

    shutil.copyfile(stranded_index_name,
                    os.path.join(target_directory, stranded_index_name))
    shutil.copyfile(unstranded_index_name,
                    os.path.join(target_directory, unstranded_index_name))

    cleanup_before_exit(tmp_dir)

    open(output_filename, 'wb').write(to_json_string(data_manager_dict))
Esempio n. 15
0
def encode_data(key, data):
    """
    Encode data to send a question to Biostar
    """
    text = json.to_json_string(data)
    text = base64.urlsafe_b64encode(text)
    digest = hmac.new(key, text).hexdigest()
    return text, digest
Esempio n. 16
0
def encode_data( key, data ):
    """
    Encode data to send a question to Biostar
    """
    text = json.to_json_string(data)
    text = base64.urlsafe_b64encode(text)
    digest = hmac.new(key, text).hexdigest()
    return text, digest
Esempio n. 17
0
def create_job( trans, params, tool, json_file_path, data_list, folder=None, history=None ):
    """
    Create the upload job.
    """
    job = trans.app.model.Job()
    galaxy_session = trans.get_galaxy_session()
    if type( galaxy_session ) == trans.model.GalaxySession:
        job.session_id = galaxy_session.id
    if trans.user is not None:
        job.user_id = trans.user.id
    if folder:
        job.library_folder_id = folder.id
    else:
        if not history:
            history = trans.history
        job.history_id = history.id
    job.tool_id = tool.id
    job.tool_version = tool.version
    job.state = job.states.UPLOAD
    trans.sa_session.add( job )
    trans.sa_session.flush()
    log.info( 'tool %s created job id %d' % ( tool.id, job.id ) )
    trans.log_event( 'created job id %d' % job.id, tool_id=tool.id )

    for name, value in tool.params_to_strings( params, trans.app ).iteritems():
        job.add_parameter( name, value )
    job.add_parameter( 'paramfile', to_json_string( json_file_path ) )
    object_store_id = None
    for i, dataset in enumerate( data_list ):
        if folder:
            job.add_output_library_dataset( 'output%i' % i, dataset )
        else:
            job.add_output_dataset( 'output%i' % i, dataset )
        # Create an empty file immediately
        if not dataset.dataset.external_filename:
            dataset.dataset.object_store_id = object_store_id
            try:
                trans.app.object_store.create( dataset.dataset )
            except ObjectInvalid:
                raise Exception('Unable to create output dataset: object store is full')
            object_store_id = dataset.dataset.object_store_id
            trans.sa_session.add( dataset )
            # open( dataset.file_name, "w" ).close()
    job.object_store_id = object_store_id
    job.state = job.states.NEW
    job.set_handler(tool.get_job_handler(None))
    trans.sa_session.add( job )
    trans.sa_session.flush()

    # Queue the job for execution
    trans.app.job_queue.put( job.id, job.tool_id )
    trans.log_event( "Added job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id )
    output = odict()
    for i, v in enumerate( data_list ):
        output[ 'output%i' % i ] = v
    return job, output
Esempio n. 18
0
def create_job( trans, params, tool, json_file_path, data_list, folder=None, history=None ):
    """
    Create the upload job.
    """
    job = trans.app.model.Job()
    galaxy_session = trans.get_galaxy_session()
    if type( galaxy_session ) == trans.model.GalaxySession:
        job.session_id = galaxy_session.id
    if trans.user is not None:
        job.user_id = trans.user.id
    if folder:
        job.library_folder_id = folder.id
    else:
        if not history:
            history = trans.history
        job.history_id = history.id
    job.tool_id = tool.id
    job.tool_version = tool.version
    job.state = job.states.UPLOAD
    trans.sa_session.add( job )
    trans.sa_session.flush()
    log.info( 'tool %s created job id %d' % ( tool.id, job.id ) )
    trans.log_event( 'created job id %d' % job.id, tool_id=tool.id )

    for name, value in tool.params_to_strings( params, trans.app ).iteritems():
        job.add_parameter( name, value )
    job.add_parameter( 'paramfile', to_json_string( json_file_path ) )
    object_store_id = None
    for i, dataset in enumerate( data_list ):
        if folder:
            job.add_output_library_dataset( 'output%i' % i, dataset )
        else:
            job.add_output_dataset( 'output%i' % i, dataset )
        # Create an empty file immediately
        if not dataset.dataset.external_filename:
            dataset.dataset.object_store_id = object_store_id
            try:
                trans.app.object_store.create( dataset.dataset )
            except ObjectInvalid:
                raise Exception('Unable to create output dataset: object store is full')
            object_store_id = dataset.dataset.object_store_id
            trans.sa_session.add( dataset )
            # open( dataset.file_name, "w" ).close()
    job.object_store_id = object_store_id
    job.state = job.states.NEW
    job.set_handler(tool.get_job_handler(None))
    trans.sa_session.add( job )
    trans.sa_session.flush()

    # Queue the job for execution
    trans.app.job_queue.put( job.id, job.tool_id )
    trans.log_event( "Added job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id )
    output = odict()
    for i, v in enumerate( data_list ):
        output[ 'output%i' % i ] = v
    return job, output
Esempio n. 19
0
 def get_account_info(self, trans, key_id, secret):
     """
     Get EC2 Account Info
     """
     account_info = {}
     cml = cloudman.launch.CloudManLauncher(key_id, secret)
     ec2_conn = cml.connect_ec2(key_id, secret)
     kps = ec2_conn.get_all_key_pairs()
     account_info['clusters'] = cml.get_clusters_pd()
     account_info['keypairs'] = [akp.name for akp in kps]
     return to_json_string(account_info)
Esempio n. 20
0
 def get_account_info(self, trans, key_id, secret):
     """
     Get EC2 Account Info
     """
     account_info = {}
     cml = cloudman.launch.CloudManLauncher(key_id, secret)
     ec2_conn = cml.connect_ec2(key_id, secret)
     kps = ec2_conn.get_all_key_pairs()
     account_info['clusters'] = cml.get_clusters_pd()
     account_info['keypairs'] = [akp.name for akp in kps]
     return to_json_string(account_info)
Esempio n. 21
0
def job_param_filter(view, left, operator, right):
    view.do_query = True
    alias = aliased( JobParameter )
    param_name = re.sub(r'^param.', '', left)
    view.query = view.query.filter(
        and_(
            Job.id == alias.job_id,
            alias.name == param_name,
            alias.value == to_json_string(right)
        )
    )
def main():
    options, args = get_arg()
    tool_dir = args[0]

    path_to_alfa = os.path.join(tool_dir, 'ALFA.py')

    if options.output_filename == None:
        msg = 'No json output file specified'
        sys.exit(msg)
    output_filename = options.output_filename

    # Interestingly the output file to return is not empty initially.
    # it contains a dictionary, with notably the path to the dir where the alfa_indexes
    # are expected to be found
    params = from_json_string(open(output_filename).read())
    target_directory = params['output_data'][0]['extra_files_path']
    os.mkdir(target_directory)

    tmp_dir = tempfile.mkdtemp(prefix='tmp', suffix='')
    os.chdir(tmp_dir)

    data_manager_dict = {}

    if options.ensembl_info:
        kingdom, species_name = options.ensembl_info
        species_name = standardize_species_name(species_name)
        url = get_ensembl_url_root(kingdom)
        species_name, species_line = test_ensembl_species_exists(kingdom, url, species_name)
        gtf_archive_name = get_ensembl_gtf_archive(kingdom, url, species_name, species_line)
        data_table_entry = get_data_table_new_entry(gtf_archive_name)
        gtf_file_name = '%s.gtf' % data_table_entry['prefix']
        uncompress_gz(gtf_archive_name, gtf_file_name)
        generate_alfa_indexes(path_to_alfa, gtf_file_name)
        stranded_index_name = '%s.stranded.index' % data_table_entry['prefix']
        unstranded_index_name = '%s.unstranded.index' % data_table_entry['prefix']
        add_data_table_entry(data_manager_dict, data_table_entry)

    print("____________________________________________________________")
    print("*** General Info")
    print("URL ROOT:\t%s" % url)
    print("SPECIES:\t%s" % data_table_entry['species'])
    print("VERSION:\t%s" % data_table_entry['version'])
    print("RELEASE:\t%s" % data_table_entry['release'])
    print("VALUE:\t%s" % data_table_entry['value'])
    print("DBKEY:\t%s" % data_table_entry['dbkey'])
    print("NAME:\t%s" % data_table_entry['name'])
    print("PREFIX:\t%s" % data_table_entry['prefix'])

    shutil.copyfile(stranded_index_name, os.path.join(target_directory, stranded_index_name))
    shutil.copyfile(unstranded_index_name, os.path.join(target_directory, unstranded_index_name))

    cleanup_before_exit(tmp_dir)

    open(output_filename, 'wb').write(to_json_string(data_manager_dict))
Esempio n. 23
0
 def handle( self ):
     request = self.request.recv( 8192 )
     response = {}
     valid, request, response = json.validate_jsonrpc_request( request, ( 'get_state', ), () )
     if valid:
         self.request.send( json.to_json_string( json.jsonrpc_response( request=request, result=self.server.state_result.result ) ) )
     else:
         error_msg = 'Unable to serve request: %s' % response['error']['message']
         if 'data' in response['error']:
             error_msg += ': %s' % response['error']['data']
         log.error( error_msg )
         log.debug( 'Original request was: %s' % request )
Esempio n. 24
0
def main():
    #Parse Command Line
    parser = optparse.OptionParser()
    parser.add_option('-j',
                      '--jar_path',
                      dest='jar_path',
                      action='store',
                      type="string",
                      default=None,
                      help='snpEff.jar path')
    parser.add_option('-c',
                      '--config',
                      dest='config',
                      action='store',
                      type="string",
                      default=None,
                      help='snpEff.config path')
    parser.add_option('-g',
                      '--genome_version',
                      dest='genome_version',
                      action='store',
                      type="string",
                      default=None,
                      help='genome_version')
    parser.add_option('-o',
                      '--organism',
                      dest='organism',
                      action='store',
                      type="string",
                      default=None,
                      help='organism name')
    (options, args) = parser.parse_args()

    filename = args[0]

    params = from_json_string(open(filename).read())
    target_directory = params['output_data'][0]['extra_files_path']
    os.mkdir(target_directory)
    data_manager_dict = {}

    #Create SnpEff Reference Data
    for genome_version, organism in zip(
            options.genome_version.split(','),
            getOrganismNames(options.jar_path, options.genome_version,
                             options.organism).split(',')):
        download_database(data_manager_dict, target_directory,
                          options.jar_path, options.config, genome_version,
                          organism)

    #save info to json file
    open(filename, 'wb').write(to_json_string(data_manager_dict))
Esempio n. 25
0
def create_paramfile(trans, uploaded_datasets):
    """
    Create the upload tool's JSON "param" file.
    """
    json_file = tempfile.mkstemp()
    json_file_path = json_file[1]
    json_file = os.fdopen(json_file[0], 'w')
    for uploaded_dataset in uploaded_datasets:
        data = uploaded_dataset.data
        if uploaded_dataset.type == 'composite':
            # we need to init metadata before the job is dispatched
            data.init_meta()
            for meta_name, meta_value in uploaded_dataset.metadata.iteritems():
                setattr(data.metadata, meta_name, meta_value)
            trans.sa_session.add(data)
            trans.sa_session.flush()
            json = dict(file_type=uploaded_dataset.file_type,
                        dataset_id=data.dataset.id,
                        dbkey=uploaded_dataset.dbkey,
                        type=uploaded_dataset.type,
                        metadata=uploaded_dataset.metadata,
                        primary_file=uploaded_dataset.primary_file,
                        composite_file_paths=uploaded_dataset.composite_files,
                        composite_files=dict([
                            (k, v.__dict__)
                            for k, v in data.datatype.get_composite_files(
                                data).items()
                        ]))
        else:
            try:
                is_binary = uploaded_dataset.datatype.is_binary
            except:
                is_binary = None
            try:
                link_data_only = uploaded_dataset.link_data_only
            except:
                link_data_only = False
            json = dict(file_type=uploaded_dataset.file_type,
                        ext=uploaded_dataset.ext,
                        name=uploaded_dataset.name,
                        dataset_id=data.dataset.id,
                        dbkey=uploaded_dataset.dbkey,
                        type=uploaded_dataset.type,
                        is_binary=is_binary,
                        link_data_only=link_data_only,
                        space_to_tab=uploaded_dataset.space_to_tab,
                        path=uploaded_dataset.path)
        json_file.write(to_json_string(json) + '\n')
    json_file.close()
    return json_file_path
Esempio n. 26
0
 def get_chunk(self, trans, dataset, chunk):
     ck_index = int(chunk)
     f = open(dataset.file_name)
     f.seek(ck_index * self.CHUNK_SIZE)
     # If we aren't at the start of the file, seek to next newline.  Do this better eventually.
     if f.tell() != 0:
         cursor = f.read(1)
         while cursor and cursor != '\n':
             cursor = f.read(1)
     ck_data = f.read(self.CHUNK_SIZE)
     cursor = f.read(1)
     while cursor and ck_data[-1] != '\n':
         ck_data += cursor
         cursor = f.read(1)
     return to_json_string( { 'ck_data': util.unicodify( ck_data ), 'ck_index': ck_index + 1 } )
Esempio n. 27
0
 def get_chunk(self, trans, dataset, chunk):
     ck_index = int(chunk)
     f = open(dataset.file_name)
     f.seek(ck_index * self.CHUNK_SIZE)
     # If we aren't at the start of the file, seek to next newline.  Do this better eventually.
     if f.tell() != 0:
         cursor = f.read(1)
         while cursor and cursor != '\n':
             cursor = f.read(1)
     ck_data = f.read(self.CHUNK_SIZE)
     cursor = f.read(1)
     while cursor and ck_data[-1] != '\n':
         ck_data += cursor
         cursor = f.read(1)
     return to_json_string({'ck_data': ck_data, 'ck_index': ck_index + 1})
Esempio n. 28
0
 def get_state(self, transfer_jobs, via_socket=False):
     transfer_jobs = listify(transfer_jobs)
     rval = []
     for tj in transfer_jobs:
         if via_socket and tj.state not in tj.terminal_states and tj.socket:
             try:
                 request = json.jsonrpc_request(method='get_state', id=True)
                 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                 sock.settimeout(5)
                 sock.connect(('localhost', tj.socket))
                 sock.send(json.to_json_string(request))
                 response = sock.recv(8192)
                 valid, response = json.validate_jsonrpc_response(
                     response, id=request['id'])
                 if not valid:
                     # No valid response received, make some pseudo-json-rpc
                     raise Exception(
                         dict(
                             code=128,
                             message=
                             'Did not receive valid response from transfer daemon for state'
                         ))
                 if 'error' in response:
                     # Response was valid but Request resulted in an error
                     raise Exception(response['error'])
                 else:
                     # Request was valid
                     response['result']['transfer_job_id'] = tj.id
                     rval.append(response['result'])
             except Exception, e:
                 # State checking via the transfer daemon failed, just
                 # return the state from the database instead.  Callers can
                 # look for the 'error' member of the response to see why
                 # the check failed.
                 self.sa_session.refresh(tj)
                 error = e.args
                 if type(error) != dict:
                     error = dict(
                         code=256,
                         message='Error connecting to transfer daemon',
                         data=str(e))
                 rval.append(
                     dict(transfer_job_id=tj.id,
                          state=tj.state,
                          error=error))
         else:
             self.sa_session.refresh(tj)
             rval.append(dict(transfer_job_id=tj.id, state=tj.state))
def main():
    #Parse Command Line
    parser = optparse.OptionParser()
    parser.add_option('-k',
                      '--dbkey',
                      dest='dbkey',
                      action='store',
                      type="string",
                      default=None,
                      help='dbkey')
    parser.add_option('-d',
                      '--description',
                      dest='description',
                      action='store',
                      type="string",
                      default=None,
                      help='description')
    parser.add_option('-c',
                      '--defuse_config',
                      dest='defuse_config',
                      action='store',
                      type="string",
                      default=None,
                      help='defuse_config')
    parser.add_option('-s',
                      '--defuse_script',
                      dest='defuse_script',
                      action='store',
                      type="string",
                      default=None,
                      help='defuse_script')
    (options, args) = parser.parse_args()

    filename = args[0]

    params = from_json_string(open(filename).read())
    target_directory = params['output_data'][0]['extra_files_path']
    os.mkdir(target_directory)
    data_manager_dict = {}

    #Create Defuse Reference Data
    run_defuse_script(data_manager_dict, params, target_directory,
                      options.dbkey, options.description,
                      options.defuse_config, options.defuse_script)

    #save info to json file
    open(filename, 'wb').write(to_json_string(data_manager_dict))
Esempio n. 30
0
 def launch_instance(self, trans, cluster_name, password, key_id, secret,
                     instance_type, share_string, keypair, ami=None,
                     zone=None, bucket_default=None, **kwargs):
     ami = ami or trans.app.config.cloudlaunch_default_ami
     cfg = cloudman.CloudManConfig(key_id, secret, cluster_name, ami,
                                   instance_type, password, placement=zone)
     cml = cloudman.launch.CloudManLauncher(key_id, secret)
     # This should probably be handled better on the bioblend side, but until
     # an egg update can be made, this needs to conditionally include the
     # parameter or not, even if the value is None.
     if bucket_default:
         result = cml.launch(cluster_name, ami, instance_type, password,
                             cfg.kernel_id, cfg.ramdisk_id, cfg.key_name,
                             cfg.security_groups, cfg.placement,
                             bucket_default=bucket_default)
     else:
         result = cml.launch(cluster_name, ami, instance_type, password,
                             cfg.kernel_id, cfg.ramdisk_id, cfg.key_name,
                             cfg.security_groups, cfg.placement)
     # result is a dict with sg_names, kp_name, kp_material, rs, and instance_id
     if not result['rs']:
         trans.response.status = 400
         return "Instance failure, but no specific error was detected.  Please check your AWS Console."
     instance = result['rs'].instances[0]
     while not instance.public_dns_name:
         try:
             instance.update()
         except EC2ResponseError:
             # This can happen when update is invoked before the instance is fully registered.
             pass
         time.sleep(1)
     if result['kp_material']:
         # We have created a keypair.  Save to tempfile for one time retrieval.
         (fd, fname) = tempfile.mkstemp(prefix=PKEY_PREFIX, dir=trans.app.config.new_file_path)
         f = os.fdopen(fd, 'wt')
         f.write(result['kp_material'])
         f.close()
         kp_material_tag = fname[fname.rfind(PKEY_PREFIX) + len(PKEY_PREFIX):]
     else:
         kp_material_tag = None
     return to_json_string({'cluster_name': cluster_name,
                            'instance_id': result['rs'].instances[0].id,
                            'image_id': result['rs'].instances[0].image_id,
                            'public_dns_name': result['rs'].instances[0].public_dns_name,
                            'kp_name': result['kp_name'],
                            'kp_material_tag': kp_material_tag})
Esempio n. 31
0
def create_paramfile( trans, uploaded_datasets ):
    """
    Create the upload tool's JSON "param" file.
    """
    json_file = tempfile.mkstemp()
    json_file_path = json_file[1]
    json_file = os.fdopen( json_file[0], 'w' )
    for uploaded_dataset in uploaded_datasets:
        data = uploaded_dataset.data
        if uploaded_dataset.type == 'composite':
            # we need to init metadata before the job is dispatched
            data.init_meta()
            for meta_name, meta_value in uploaded_dataset.metadata.iteritems():
                setattr( data.metadata, meta_name, meta_value )
            trans.sa_session.add( data )
            trans.sa_session.flush()
            json = dict( file_type = uploaded_dataset.file_type,
                         dataset_id = data.dataset.id,
                         dbkey = uploaded_dataset.dbkey,
                         type = uploaded_dataset.type,
                         metadata = uploaded_dataset.metadata,
                         primary_file = uploaded_dataset.primary_file,
                         composite_file_paths = uploaded_dataset.composite_files,
                         composite_files = dict( [ ( k, v.__dict__ ) for k, v in data.datatype.get_composite_files( data ).items() ] ) )
        else:
            try:
                is_binary = uploaded_dataset.datatype.is_binary
            except:
                is_binary = None
            try:
                link_data_only = uploaded_dataset.link_data_only
            except:
                link_data_only = False
            json = dict( file_type = uploaded_dataset.file_type,
                         ext = uploaded_dataset.ext,
                         name = uploaded_dataset.name,
                         dataset_id = data.dataset.id,
                         dbkey = uploaded_dataset.dbkey,
                         type = uploaded_dataset.type,
                         is_binary = is_binary,
                         link_data_only = link_data_only,
                         space_to_tab = uploaded_dataset.space_to_tab,
                         path = uploaded_dataset.path )
        json_file.write( to_json_string( json ) + '\n' )
    json_file.close()
    return json_file_path
Esempio n. 32
0
 def get_chunk(self, trans, dataset, chunk):
     ck_index = int(chunk)
     f = open(dataset.file_name)
     f.seek(ck_index * trans.app.config.display_chunk_size)
     # If we aren't at the start of the file, seek to next newline.  Do this better eventually.
     if f.tell() != 0:
         cursor = f.read(1)
         while cursor and cursor != '\n':
             cursor = f.read(1)
     ck_data = f.read(trans.app.config.display_chunk_size)
     cursor = f.read(1)
     while cursor and ck_data[-1] != '\n':
         ck_data += cursor
         cursor = f.read(1)
     return to_json_string({
         'ck_data': util.unicodify(ck_data),
         'ck_index': ck_index + 1
     })
Esempio n. 33
0
 def handle_incoming(cls, incoming):
     npd = {}
     for key, val in incoming.iteritems():
         if key.startswith("pja"):
             sp = key.split("__")
             ao_key = sp[2] + sp[1]
             # flag / output_name / pjatype / desc
             if not ao_key in npd:
                 npd[ao_key] = {"action_type": sp[2], "output_name": sp[1], "action_arguments": {}}
             if len(sp) > 3:
                 if sp[3] == "output_name":
                     npd[ao_key]["output_name"] = val
                 else:
                     npd[ao_key]["action_arguments"][sp[3]] = val
         else:
             # Not pja stuff.
             pass
     return to_json_string(npd)
def main():
    #Parse Command Line
    parser = optparse.OptionParser()
    parser.add_option( '-j', '--jar_path', dest='jar_path', action='store', type="string", default=None, help='snpEff.jar path' )
    (options, args) = parser.parse_args()

    filename = args[0]

    params = from_json_string( open( filename ).read() )
    target_directory = params[ 'output_data' ][0]['extra_files_path']
    os.mkdir( target_directory )
    data_manager_dict = {}


    #Create Defuse Reference Data
    data_manager_dict = fetch_databases( data_manager_dict, target_directory, options.jar_path)

    #save info to json file
    open( filename, 'wb' ).write( to_json_string( data_manager_dict ) )
Esempio n. 35
0
def main():
    #Parse Command Line
    parser = argparse.ArgumentParser()
    parser.add_argument('-o', '--out', help='Output file')
    args = parser.parse_args()

    filename = args.out

    params = from_json_string(open(filename).read())
    target_directory = params['output_data'][0]['extra_files_path']
    os.mkdir(target_directory)
    data_manager_dict = {}

    sequence_id, sequence_name = get_reference_id_name(params)
    trained_dir = get_url(params)
    #Fetch the FASTA
    download_from_GlimmerHMM(data_manager_dict, target_directory, sequence_id, sequence_name, trained_dir)
    #save info to json file
    open(filename, 'wb').write(to_json_string(data_manager_dict))
Esempio n. 36
0
def main():

    # get args from command line
    global args
    args = get_args()

    # Extract json file params
    data_manager_dict = {}
    filename = args.output
    params = from_json_string(open(filename).read())
    target_directory = params['output_data'][0]['extra_files_path']
    os.mkdir(target_directory)

    # if args.database=="frogs_db_data":
    frogs_sources(data_manager_dict, target_directory)
    # elif args.database=="HVL_db_data":
    #     HVL_sources(data_manager_dict,target_directory)

    # save info to json file
    open(filename, 'wt').write(to_json_string(data_manager_dict))
Esempio n. 37
0
 def handle_incoming(cls, incoming):
     npd = {}
     for key, val in incoming.iteritems():
         if key.startswith('pja'):
             sp = key.split('__')
             ao_key = sp[2] + sp[1]
             # flag / output_name / pjatype / desc
             if ao_key not in npd:
                 npd[ao_key] = {'action_type': sp[2],
                                'output_name': sp[1],
                                'action_arguments': {}}
             if len(sp) > 3:
                 if sp[3] == 'output_name':
                     npd[ao_key]['output_name'] = val
                 else:
                     npd[ao_key]['action_arguments'][sp[3]] = val
         else:
             # Not pja stuff.
             pass
     return to_json_string(npd)
Esempio n. 38
0
 def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, chunk=None):
     #TODO Prevent failure when displaying extremely long > 50kb lines.
     if to_ext:
         return self._serve_raw(trans, dataset, to_ext)
     if chunk:
         ck_index = int(chunk)
         f = open(dataset.file_name)
         f.seek(ck_index * self.CHUNK_SIZE)
         # If we aren't at the start of the file, seek to next newline.  Do this better eventually.
         if f.tell() != 0:
             cursor = f.read(1)
             while cursor and cursor != '\n':
                 cursor = f.read(1)
         ck_data = f.read(self.CHUNK_SIZE)
         cursor = f.read(1)
         while cursor and ck_data[-1] != '\n':
             ck_data += cursor
             cursor = f.read(1)
         return to_json_string({'ck_data': ck_data, 'ck_index': ck_index+1})
     return trans.fill_template( "/dataset/tabular_chunked.mako",dataset = dataset)
def main():
    # Parse command line
    parser = optparse.OptionParser()
    parser.add_option('-o', '--outfile', dest='outfile', action='store', type="string", default=None, help='outfile')
    parser.add_option('-f', '--fasta_filename', dest='fasta_filename', action='store', type="string", default=None, help='fasta_filename')
    parser.add_option('-s', '--sequence_name', dest='sequence_name', action='store', type="string", default=None, help='sequence_name')
    parser.add_option('-n', '--data_table_name', dest='data_table_name', action='store', type="string", default=None, help='data_table_name')
    (options, args) = parser.parse_args()

    outfile = options.outfile

    params = from_json_string(open(outfile).read())
    target_directory = params[ 'output_data' ][0]['extra_files_path']
    os.mkdir(target_directory)
    data_manager_dict = {}

    # Build index files
    build_2bwt_index(data_manager_dict, options.fasta_filename, params, target_directory, options.sequence_name, data_table_name=options.data_table_name or DEFAULT_DATA_TABLE_NAME)

    # Save info to json file
    open(outfile, 'wb').write(to_json_string(data_manager_dict))
def main():
    #Parse Command Line
    parser = optparse.OptionParser()
    parser.add_option( '-d', '--dbkey_description', dest='dbkey_description', action='store', type="string", default=None, help='dbkey_description' )
    (options, args) = parser.parse_args()
    
    filename = args[0]
    
    params = from_json_string( open( filename ).read() )
    target_directory = params[ 'output_data' ][0]['extra_files_path']
    os.mkdir( target_directory )
    data_manager_dict = {}

    database_id = params['param_dict']['database_id']
    database_name = params['param_dict']['database_name']

    #Fetch the FASTA
    REFERENCE_SOURCE_TO_DOWNLOAD[ params['param_dict']['reference_source']['reference_source_selector'] ]( data_manager_dict, params, target_directory, database_id, database_name )
    
    #save info to json file
    open( filename, 'wb' ).write( to_json_string( data_manager_dict ) )
Esempio n. 41
0
 def get_html( self, prefix="", disabled=False ):
     primary_field = self.primary_field
     html = '<div class="switch-option">'
     html += primary_field.get_html( prefix=prefix, disabled=disabled )
     html += '<input name="__switch_default__" type="hidden" value="%s" />' % self.default_field
     options = []
     for name, delegate_field in self.delegate_fields.items():
         field = to_json_string( delegate_field.to_dict() )
         option = " '%s': %s" % ( name, field )
         options.append( option )
     html += '<script>$(document).ready( function() {\nvar switchOptions = {\n'
     html += ','.join( options )
     html += '};\n'
     html += 'if ( window.enhanced_galaxy_tools ) {\n'
     html += 'require( [ "galaxy.tools" ], function( mod_tools ) { new mod_tools.SwitchSelectView({\n'
     html += 'el: $(\'[name="%s%s"]\').closest( "div.switch-option" ),' % ( prefix, primary_field.name )
     html += 'default_option: "%s",\n' % self.default_field
     html += 'prefix: "%s",\n' % prefix
     html += 'switch_options: switchOptions }); } )\n'
     html += "}"
     html += '});\n</script></div>'
     return html
Esempio n. 42
0
 def handle_incoming(cls, incoming):
     npd = {}
     for key, val in incoming.iteritems():
         if key.startswith('pja'):
             sp = key.split('__')
             ao_key = sp[2] + sp[1]
             # flag / output_name / pjatype / desc
             if not ao_key in npd:
                 npd[ao_key] = {
                     'action_type': sp[2],
                     'output_name': sp[1],
                     'action_arguments': {}
                 }
             if len(sp) > 3:
                 if sp[3] == 'output_name':
                     npd[ao_key]['output_name'] = val
                 else:
                     npd[ao_key]['action_arguments'][sp[3]] = val
         else:
             # Not pja stuff.
             pass
     return to_json_string(npd)
Esempio n. 43
0
def main():
    #Parse Command Line
    parser = optparse.OptionParser()
    parser.add_option( '-d', '--dbkey_description', dest='dbkey_description', action='store', type="string", default=None, help='dbkey_description' )
    (options, args) = parser.parse_args()
    
    filename = args[0]
    
    params = from_json_string( open( filename ).read() )
    target_directory = params[ 'output_data' ][0]['extra_files_path']
    os.mkdir( target_directory )
    data_manager_dict = {}
    
    dbkey, sequence_id, sequence_name = get_dbkey_id_name( params, dbkey_description=options.dbkey_description ) 
    
    if dbkey in [ None, '', '?' ]:
        raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( dbkey ) )
    
    #Fetch the FASTA
    REFERENCE_SOURCE_TO_DOWNLOAD[ params['param_dict']['reference_source']['reference_source_selector'] ]( data_manager_dict, params, target_directory, dbkey, sequence_id, sequence_name )
    
    #save info to json file
    open( filename, 'wb' ).write( to_json_string( data_manager_dict ) )
def main():
    #Parse Command Line
    parser = optparse.OptionParser()
    parser.add_option( '-j', '--jar_path', dest='jar_path', action='store', type="string", default=None, help='snpEff.jar path' )
    parser.add_option( '-c', '--config', dest='config', action='store', type="string", default=None, help='snpEff.config path' )
    parser.add_option( '-g', '--genome_version', dest='genome_version', action='store', type="string", default=None, help='genome_version' )
    parser.add_option( '-o', '--organism', dest='organism', action='store', type="string", default=None, help='organism name' )
    (options, args) = parser.parse_args()

    filename = args[0]

    params = from_json_string( open( filename ).read() )
    target_directory = params[ 'output_data' ][0]['extra_files_path']
    os.mkdir( target_directory )
    data_manager_dict = {}


    #Create SnpEff Reference Data
    for genome_version, organism in zip(options.genome_version.split(','), options.organism.split(',')):
        download_database( data_manager_dict, target_directory, options.jar_path, options.config, genome_version, organism )

    #save info to json file
    open( filename, 'wb' ).write( to_json_string( data_manager_dict ) )
    def reset_metadata_on_installed_repositories( self, trans, payload, **kwd ):
        """
        PUT /api/tool_shed_repositories/reset_metadata_on_installed_repositories

        Resets all metadata on all repositories installed into Galaxy in an "orderly fashion".

        :param key: the API key of the Galaxy admin user.
        """
        try:
            start_time = strftime( "%Y-%m-%d %H:%M:%S" )
            results = dict( start_time=start_time,
                            successful_count=0,
                            unsuccessful_count=0,
                            repository_status=[] )
            # Make sure the current user's API key proves he is an admin user in this Galaxy instance.
            if not trans.user_is_admin():
                raise HTTPForbidden( detail='You are not authorized to reset metadata on repositories installed into this Galaxy instance.' )
            query = suc.get_query_for_setting_metadata_on_repositories( trans, my_writable=False, order=False )
            # Now reset metadata on all remaining repositories.
            for repository in query:
                repository_id = trans.security.encode_id( repository.id )
                try:
                    invalid_file_tups, metadata_dict = metadata_util.reset_all_metadata_on_installed_repository( trans, repository_id )
                    if invalid_file_tups:
                        message = tool_util.generate_message_for_invalid_tools( trans, invalid_file_tups, repository, None, as_html=False )
                        results[ 'unsuccessful_count' ] += 1
                    else:
                        message = "Successfully reset metadata on repository %s owned by %s" % ( str( repository.name ), str( repository.owner ) )
                        results[ 'successful_count' ] += 1
                except Exception, e:
                    message = "Error resetting metadata on repository %s owned by %s: %s" % ( str( repository.name ), str( repository.owner ), str( e ) )
                    results[ 'unsuccessful_count' ] += 1
                results[ 'repository_status' ].append( message )
            stop_time = strftime( "%Y-%m-%d %H:%M:%S" )
            results[ 'stop_time' ] = stop_time
            return json.to_json_string( results, sort_keys=True, indent=4 * ' ' )
Esempio n. 46
0
 def get_state( self, transfer_jobs, via_socket=False ):
     transfer_jobs = listify( transfer_jobs )
     rval = []
     for tj in transfer_jobs:
         if via_socket and tj.state not in tj.terminal_states and tj.socket:
             try:
                 request = json.jsonrpc_request( method='get_state', id=True )
                 sock = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
                 sock.settimeout( 5 )
                 sock.connect( ( 'localhost', tj.socket ) )
                 sock.send( json.to_json_string( request ) )
                 response = sock.recv( 8192 )
                 valid, response = json.validate_jsonrpc_response( response, id=request['id'] )
                 if not valid:
                     # No valid response received, make some pseudo-json-rpc
                     raise Exception( dict( code=128, message='Did not receive valid response from transfer daemon for state' ) )
                 if 'error' in response:
                     # Response was valid but Request resulted in an error
                     raise Exception( response['error'])
                 else:
                     # Request was valid
                     response['result']['transfer_job_id'] = tj.id
                     rval.append( response['result'] )
             except Exception, e:
                 # State checking via the transfer daemon failed, just
                 # return the state from the database instead.  Callers can
                 # look for the 'error' member of the response to see why
                 # the check failed.
                 self.sa_session.refresh( tj )
                 error = e.args
                 if type( error ) != dict:
                     error = dict( code=256, message='Error connecting to transfer daemon', data=str( e ) )
                 rval.append( dict( transfer_job_id=tj.id, state=tj.state, error=error ) )
         else:
             self.sa_session.refresh( tj )
             rval.append( dict( transfer_job_id=tj.id, state=tj.state ) )
Esempio n. 47
0
def create_job(trans, params, tool, json_file_path, data_list, folder=None):
    """
    Create the upload job.
    """
    job = trans.app.model.Job()
    job.session_id = trans.get_galaxy_session().id
    if folder:
        job.library_folder_id = folder.id
    else:
        job.history_id = trans.history.id
    job.tool_id = tool.id
    job.tool_version = tool.version
    job.state = job.states.UPLOAD
    trans.sa_session.add(job)
    trans.sa_session.flush()
    log.info('tool %s created job id %d' % (tool.id, job.id))
    trans.log_event('created job id %d' % job.id, tool_id=tool.id)

    for name, value in tool.params_to_strings(params, trans.app).iteritems():
        job.add_parameter(name, value)
    job.add_parameter('paramfile', to_json_string(json_file_path))
    if folder:
        for i, dataset in enumerate(data_list):
            job.add_output_library_dataset('output%i' % i, dataset)
    else:
        for i, dataset in enumerate(data_list):
            job.add_output_dataset('output%i' % i, dataset)
    job.state = job.states.NEW
    trans.sa_session.add(job)
    trans.sa_session.flush()

    # Queue the job for execution
    trans.app.job_queue.put(job.id, tool)
    trans.log_event("Added job to the job queue, id: %s" % str(job.id),
                    tool_id=job.tool_id)
    return dict([('output%i' % i, v) for i, v in enumerate(data_list)])
Esempio n. 48
0
def main():
    #Parse Command Line
    parser = optparse.OptionParser()
    parser.add_option( '--fasta_filename', dest='fasta_filename', action='store', type="string", default=None, help='fasta_filename' )
    parser.add_option( '--fasta_dbkey', dest='fasta_dbkey', action='store', type="string", default=None, help='fasta_dbkey' )
    parser.add_option( '--fasta_description', dest='fasta_description', action='store', type="string", default=None, help='fasta_description' )
    parser.add_option( '--data_table_name', dest='data_table_name', action='store', type="string", default=None, help='data_table_name' )
    parser.add_option( '--sjdbGTFfile', type="string", default=None )
    parser.add_option( '--sjdbGTFchrPrefix', type="string", default=None )
    parser.add_option( '--sjdbGTFfeatureExon', type="string", default=None )
    parser.add_option( '--sjdbGTFtagExonParentTranscript', type="string", default=None )
    parser.add_option( '--sjdbFileChrStartEnd', type="string", default=None )
    parser.add_option( '--sjdbOverhang', type="string", default='100' )
    parser.add_option( '--runThreadN', type="string", default='4' )
    (options, args) = parser.parse_args()
    filename = args[0]
    params = from_json_string( open( filename ).read() )
    target_directory = params[ 'output_data' ][0]['extra_files_path'].encode('ascii','replace')
    dbkey = options.fasta_dbkey
    if dbkey in [ None, '', '?' ]:
        raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( dbkey ) )
    
    sequence_id, sequence_name = get_id_name( params, dbkey=dbkey, fasta_description=options.fasta_description )
    
    try:
         os.mkdir( target_directory )
    except OSError:
        pass
    #build the index
    data_manager_dict = build_rnastar_index( data_manager_dict = {}, fasta_filename = options.fasta_filename, target_directory = target_directory,
      dbkey = dbkey, sequence_id = sequence_id, sequence_name = sequence_name, data_table_name=options.data_table_name,
      sjdbOverhang=options.sjdbOverhang,sjdbGTFfile=options.sjdbGTFfile,
      sjdbFileChrStartEnd=options.sjdbFileChrStartEnd,sjdbGTFtagExonParentTranscript=options.sjdbGTFtagExonParentTranscript,
      sjdbGTFfeatureExon=options.sjdbGTFfeatureExon,sjdbGTFchrPrefix=options.sjdbGTFchrPrefix,
      n_threads=options.runThreadN )
    open( filename, 'wb' ).write( to_json_string( data_manager_dict ) )
Esempio n. 49
0
                                           trans.app).iteritems():
     job.add_parameter(name, value)
 current_user_roles = trans.get_current_user_roles()
 for name, dataset in inp_data.iteritems():
     if dataset:
         if not trans.app.security_agent.can_access_dataset(
                 current_user_roles, dataset.dataset):
             raise "User does not have permission to use a dataset (%s) provided for input." % data.id
         job.add_input_dataset(name, dataset)
     else:
         job.add_input_dataset(name, None)
 for name, dataset in out_data.iteritems():
     job.add_output_dataset(name, dataset)
 job.object_store_id = object_store_id
 if job_params:
     job.params = to_json_string(job_params)
 job.set_handler(tool.get_job_handler(job_params))
 trans.sa_session.add(job)
 # Now that we have a job id, we can remap any outputs if this is a rerun and the user chose to continue dependent jobs
 # This functionality requires tracking jobs in the database.
 if trans.app.config.track_jobs_in_database and rerun_remap_job_id is not None:
     try:
         old_job = trans.sa_session.query(
             trans.app.model.Job).get(rerun_remap_job_id)
         assert old_job is not None, '(%s/%s): Old job id is invalid' % (
             rerun_remap_job_id, job.id)
         assert old_job.tool_id == job.tool_id, '(%s/%s): Old tool id (%s) does not match rerun tool id (%s)' % (
             old_job.id, job.id, old_job.tool_id, job.tool_id)
         if trans.user is not None:
             assert old_job.user_id == trans.user.id, '(%s/%s): Old user id (%s) does not match rerun user id (%s)' % (
                 old_job.id, job.id, old_job.user_id, trans.user.id)
Esempio n. 50
0
            'value': identifier,
            'dbkey': dbkey,
            'name': description,
            'path': os.path.basename(filename),
        }
    elif method == 'history':
        # Copy file from history
        filename = params['param_dict']['reference_source']['input_gene_list']
        target_filename = os.path.join(target_dir,os.path.basename(filename))
        shutil.copyfile(filename,target_filename)
        # Check identifier and description
        if not description:
            description = "%s: %s" % (dbkey,
                                      os.path.splitext(os.path.basename(filename))[0])
        if not identifier:
            identifier = "%s_%s" % (dbkey,
                                    os.path.splitext(os.path.basename(filename))[0])
        # Update the output dictionary
        data_manager_dict['data_tables']['rnachipintegrator_canonical_genes'] = {
            'value': identifier,
            'dbkey': dbkey,
            'name': description,
            'path': os.path.basename(filename),
        }
    else:
        raise NotImplementedError("Method '%s' not implemented" % method)

    #save info to json file
    open(jsonfile,'wb').write(to_json_string(data_manager_dict))

                        repository,
                        None,
                        as_html=False)
                    results['unsuccessful_count'] += 1
                else:
                    message = "Successfully reset metadata on repository %s owned by %s" % \
                        ( str( repository.name ), str( repository.owner ) )
                    results['successful_count'] += 1
            except Exception, e:
                message = "Error resetting metadata on repository %s owned by %s: %s" % \
                    ( str( repository.name ), str( repository.owner ), str( e ) )
                results['unsuccessful_count'] += 1
            results['repository_status'].append(message)
        stop_time = strftime("%Y-%m-%d %H:%M:%S")
        results['stop_time'] = stop_time
        return json.to_json_string(results, sort_keys=True, indent=4)

    @expose_api
    def show(self, trans, id, **kwd):
        """
        GET /api/tool_shed_repositories/{encoded_tool_shed_repsository_id}
        Display a dictionary containing information about a specified tool_shed_repository.

        :param id: the encoded id of the ToolShedRepository object
        """
        # Example URL: http://localhost:8763/api/tool_shed_repositories/df7a1f0c02a5b08e
        tool_shed_repository = suc.get_tool_shed_repository_by_id(
            trans.app, id)
        if tool_shed_repository is None:
            log.debug(
                "Unable to locate tool_shed_repository record for id %s." %
Esempio n. 52
0
def main():
    if install_and_test_base_util.tool_shed_api_key is None:
        # If the tool shed URL specified in any dict is not present in the tool_sheds_conf.xml, the installation will fail.
        log.debug(
            "Cannot proceed without a valid tool shed API key set in the enviroment variable GALAXY_INSTALL_TEST_TOOL_SHED_API_KEY."
        )
        return 1
    if install_and_test_base_util.galaxy_tool_shed_url is None:
        log.debug(
            "Cannot proceed without a valid Tool Shed base URL set in the environment variable GALAXY_INSTALL_TEST_TOOL_SHED_URL."
        )
        return 1
    # ---- Configuration ------------------------------------------------------
    galaxy_test_host = os.environ.get("GALAXY_INSTALL_TEST_HOST", install_and_test_base_util.default_galaxy_test_host)
    # Set the GALAXY_INSTALL_TEST_HOST variable so that Twill will have the Galaxy url to which to
    # install repositories.
    os.environ["GALAXY_INSTALL_TEST_HOST"] = galaxy_test_host
    # Set the GALAXY_TEST_HOST environment variable so that the toolbox tests will have the Galaxy url
    # on which to to run tool functional tests.
    os.environ["GALAXY_TEST_HOST"] = galaxy_test_host
    galaxy_test_port = os.environ.get(
        "GALAXY_INSTALL_TEST_PORT", str(install_and_test_base_util.default_galaxy_test_port_max)
    )
    os.environ["GALAXY_TEST_PORT"] = galaxy_test_port
    tool_path = os.environ.get("GALAXY_INSTALL_TEST_TOOL_PATH", "tools")
    if "HTTP_ACCEPT_LANGUAGE" not in os.environ:
        os.environ["HTTP_ACCEPT_LANGUAGE"] = default_galaxy_locales
    galaxy_test_file_dir = os.environ.get("GALAXY_INSTALL_TEST_FILE_DIR", default_galaxy_test_file_dir)
    if not os.path.isabs(galaxy_test_file_dir):
        galaxy_test_file_dir = os.path.abspath(galaxy_test_file_dir)
    use_distributed_object_store = os.environ.get("GALAXY_INSTALL_TEST_USE_DISTRIBUTED_OBJECT_STORE", False)
    if not os.path.isdir(galaxy_test_tmp_dir):
        os.mkdir(galaxy_test_tmp_dir)
    # Set up the configuration files for the Galaxy instance.
    galaxy_shed_tool_path = os.environ.get(
        "GALAXY_INSTALL_TEST_SHED_TOOL_PATH", tempfile.mkdtemp(dir=galaxy_test_tmp_dir, prefix="shed_tools")
    )
    shed_tool_data_table_conf_file = os.environ.get(
        "GALAXY_INSTALL_TEST_SHED_TOOL_DATA_TABLE_CONF",
        os.path.join(galaxy_test_tmp_dir, "test_shed_tool_data_table_conf.xml"),
    )
    galaxy_tool_data_table_conf_file = os.environ.get(
        "GALAXY_INSTALL_TEST_TOOL_DATA_TABLE_CONF", install_and_test_base_util.tool_data_table_conf
    )
    galaxy_tool_conf_file = os.environ.get(
        "GALAXY_INSTALL_TEST_TOOL_CONF", os.path.join(galaxy_test_tmp_dir, "test_tool_conf.xml")
    )
    galaxy_job_conf_file = os.environ.get(
        "GALAXY_INSTALL_TEST_JOB_CONF", os.path.join(galaxy_test_tmp_dir, "test_job_conf.xml")
    )
    galaxy_shed_tool_conf_file = os.environ.get(
        "GALAXY_INSTALL_TEST_SHED_TOOL_CONF", os.path.join(galaxy_test_tmp_dir, "test_shed_tool_conf.xml")
    )
    galaxy_migrated_tool_conf_file = os.environ.get(
        "GALAXY_INSTALL_TEST_MIGRATED_TOOL_CONF", os.path.join(galaxy_test_tmp_dir, "test_migrated_tool_conf.xml")
    )
    galaxy_tool_sheds_conf_file = os.environ.get(
        "GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF", os.path.join(galaxy_test_tmp_dir, "test_tool_sheds_conf.xml")
    )
    galaxy_shed_tools_dict = os.environ.get(
        "GALAXY_INSTALL_TEST_SHED_TOOL_DICT_FILE", os.path.join(galaxy_test_tmp_dir, "shed_tool_dict")
    )
    file(galaxy_shed_tools_dict, "w").write(to_json_string({}))
    # Set the GALAXY_TOOL_SHED_TEST_FILE environment variable to the path of the shed_tools_dict file so that
    # test.base.twilltestcase.setUp will find and parse it properly.
    os.environ["GALAXY_TOOL_SHED_TEST_FILE"] = galaxy_shed_tools_dict
    if "GALAXY_INSTALL_TEST_TOOL_DATA_PATH" in os.environ:
        tool_data_path = os.environ.get("GALAXY_INSTALL_TEST_TOOL_DATA_PATH")
    else:
        tool_data_path = tempfile.mkdtemp(dir=galaxy_test_tmp_dir)
        os.environ["GALAXY_INSTALL_TEST_TOOL_DATA_PATH"] = tool_data_path
    # Configure the database connection and path.
    if "GALAXY_INSTALL_TEST_DBPATH" in os.environ:
        galaxy_db_path = os.environ["GALAXY_INSTALL_TEST_DBPATH"]
    else:
        tempdir = tempfile.mkdtemp(dir=galaxy_test_tmp_dir)
        galaxy_db_path = os.path.join(tempdir, "database")
    # Configure the paths Galaxy needs to install and test tools.
    galaxy_file_path = os.path.join(galaxy_db_path, "files")
    new_repos_path = tempfile.mkdtemp(dir=galaxy_test_tmp_dir)
    galaxy_tempfiles = tempfile.mkdtemp(dir=galaxy_test_tmp_dir)
    galaxy_migrated_tool_path = tempfile.mkdtemp(dir=galaxy_test_tmp_dir)
    # Set up the tool dependency path for the Galaxy instance.
    tool_dependency_dir = os.environ.get("GALAXY_INSTALL_TEST_TOOL_DEPENDENCY_DIR", None)
    if tool_dependency_dir is None:
        tool_dependency_dir = tempfile.mkdtemp(dir=galaxy_test_tmp_dir)
        os.environ["GALAXY_INSTALL_TEST_TOOL_DEPENDENCY_DIR"] = tool_dependency_dir
    os.environ["GALAXY_TOOL_DEPENDENCY_DIR"] = tool_dependency_dir
    if "GALAXY_INSTALL_TEST_DBURI" in os.environ:
        database_connection = os.environ["GALAXY_INSTALL_TEST_DBURI"]
    else:
        database_connection = "sqlite:///" + os.path.join(galaxy_db_path, "install_and_test_repositories.sqlite")
    kwargs = {}
    for dir in [galaxy_test_tmp_dir]:
        try:
            os.makedirs(dir)
        except OSError:
            pass
    print "Database connection: ", database_connection
    # Generate the shed_tool_data_table_conf.xml file.
    file(shed_tool_data_table_conf_file, "w").write(install_and_test_base_util.tool_data_table_conf_xml_template)
    os.environ["GALAXY_INSTALL_TEST_SHED_TOOL_DATA_TABLE_CONF"] = shed_tool_data_table_conf_file
    # ---- Start up a Galaxy instance ------------------------------------------------------
    # Generate the tool_conf.xml file.
    file(galaxy_tool_conf_file, "w").write(install_and_test_base_util.tool_conf_xml)
    # Generate the job_conf.xml file.
    file(galaxy_job_conf_file, "w").write(install_and_test_base_util.job_conf_xml)
    # Generate the tool_sheds_conf.xml file, but only if a the user has not specified an existing one in the environment.
    if "GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF" not in os.environ:
        file(galaxy_tool_sheds_conf_file, "w").write(install_and_test_base_util.tool_sheds_conf_xml)
    # Generate the shed_tool_conf.xml file.
    tool_conf_template_parser = string.Template(install_and_test_base_util.shed_tool_conf_xml_template)
    shed_tool_conf_xml = tool_conf_template_parser.safe_substitute(shed_tool_path=galaxy_shed_tool_path)
    file(galaxy_shed_tool_conf_file, "w").write(shed_tool_conf_xml)
    os.environ["GALAXY_INSTALL_TEST_SHED_TOOL_CONF"] = galaxy_shed_tool_conf_file
    # Generate the migrated_tool_conf.xml file.
    migrated_tool_conf_xml = tool_conf_template_parser.safe_substitute(shed_tool_path=galaxy_migrated_tool_path)
    file(galaxy_migrated_tool_conf_file, "w").write(migrated_tool_conf_xml)
    # Write the embedded web application's specific configuration to a temporary file. This is necessary in order for
    # the external metadata script to find the right datasets.
    kwargs = dict(
        admin_users="*****@*****.**",
        master_api_key=install_and_test_base_util.default_galaxy_master_api_key,
        allow_user_creation=True,
        allow_user_deletion=True,
        allow_library_path_paste=True,
        database_connection=database_connection,
        datatype_converters_config_file="datatype_converters_conf.xml.sample",
        file_path=galaxy_file_path,
        id_secret=install_and_test_base_util.galaxy_encode_secret,
        job_config_file=galaxy_job_conf_file,
        job_queue_workers=5,
        log_destination="stdout",
        migrated_tools_config=galaxy_migrated_tool_conf_file,
        new_file_path=galaxy_tempfiles,
        running_functional_tests=True,
        shed_tool_data_table_config=shed_tool_data_table_conf_file,
        shed_tool_path=galaxy_shed_tool_path,
        template_path="templates",
        tool_config_file=",".join([galaxy_tool_conf_file, galaxy_shed_tool_conf_file]),
        tool_data_path=tool_data_path,
        tool_data_table_config_path=galaxy_tool_data_table_conf_file,
        tool_dependency_dir=tool_dependency_dir,
        tool_path=tool_path,
        tool_parse_help=False,
        tool_sheds_config_file=galaxy_tool_sheds_conf_file,
        update_integrated_tool_panel=False,
        use_heartbeat=False,
    )
    galaxy_config_file = os.environ.get("GALAXY_INSTALL_TEST_INI_FILE", None)
    # If the user has passed in a path for the .ini file, do not overwrite it.
    if not galaxy_config_file:
        galaxy_config_file = os.path.join(galaxy_test_tmp_dir, "install_test_tool_shed_repositories_wsgi.ini")
        config_items = []
        for label in kwargs:
            config_tuple = label, kwargs[label]
            config_items.append(config_tuple)
        # Write a temporary file, based on universe_wsgi.ini.sample, using the configuration options defined above.
        generate_config_file("universe_wsgi.ini.sample", galaxy_config_file, config_items)
    kwargs["tool_config_file"] = [galaxy_tool_conf_file, galaxy_shed_tool_conf_file]
    # Set the global_conf[ '__file__' ] option to the location of the temporary .ini file, which gets passed to set_metadata.sh.
    kwargs["global_conf"] = install_and_test_base_util.get_webapp_global_conf()
    kwargs["global_conf"]["__file__"] = galaxy_config_file
    # ---- Build Galaxy Application --------------------------------------------------
    if not database_connection.startswith("sqlite://"):
        kwargs["database_engine_option_max_overflow"] = "20"
        kwargs["database_engine_option_pool_size"] = "10"
    kwargs["config_file"] = galaxy_config_file
    app = UniverseApplication(**kwargs)
    database_contexts.galaxy_context = app.model.context
    database_contexts.install_context = app.install_model.context

    log.debug("Embedded Galaxy application started...")
    # ---- Run galaxy webserver ------------------------------------------------------
    server = None
    global_conf = install_and_test_base_util.get_webapp_global_conf()
    global_conf["database_file"] = database_connection
    webapp = buildapp.app_factory(
        global_conf, use_translogger=False, static_enabled=install_and_test_base_util.STATIC_ENABLED, app=app
    )
    # Serve the app on a specified or random port.
    if galaxy_test_port is not None:
        server = httpserver.serve(webapp, host=galaxy_test_host, port=galaxy_test_port, start_loop=False)
    else:
        random.seed()
        for i in range(0, 9):
            try:
                galaxy_test_port = str(
                    random.randint(
                        install_and_test_base_util.default_galaxy_test_port_min,
                        install_and_test_base_util.default_galaxy_test_port_max,
                    )
                )
                log.debug("Attempting to serve app on randomly chosen port: %s", galaxy_test_port)
                server = httpserver.serve(webapp, host=galaxy_test_host, port=galaxy_test_port, start_loop=False)
                break
            except socket.error, e:
                if e[0] == 98:
                    continue
                raise
        else:
Esempio n. 53
0
                            v = ec2_conn.get_all_volumes(volume_ids = [vol_id])
                            if v:
                                zone = v[0].zone
                            else:
                                zone = ''
                except:
                    #If anything goes wrong with zone detection, use the default selection.
                    zone = ''
                for key in bucket.list():
                    if key.name.endswith('.clusterName'):
                        clusters.append({'name': key.name.split('.clusterName')[0],
                                         'persistent_data': pd_contents,
                                         'zone':zone})
        account_info['clusters'] = clusters
        account_info['zones'] = [z.name for z in ec2_conn.get_all_zones()]
        return to_json_string(account_info)

    @web.expose
    def launch_instance(self, trans, cluster_name, password, key_id, secret, instance_type, share_string, keypair, zone=None, **kwargs):
        ec2_error = None
        try:
            # Create security group & key pair used when starting an instance
            ec2_conn = connect_ec2(key_id, secret)
            sg_name = create_cm_security_group(ec2_conn)
            kp_name, kp_material = create_key_pair(ec2_conn, key_name=keypair)
        except EC2ResponseError, err:
            ec2_error = err.error_message
        if ec2_error:
            trans.response.status = 400
            return ec2_error
        else:
Esempio n. 54
0
 def get_state( self, secure=True ):
     return to_json_string( self.state )
Esempio n. 55
0
    def __call__(self, trans, **kwargs):
        # Get basics.
        webapp = kwargs.get('webapp', 'galaxy')
        status = kwargs.get('status', None)
        message = kwargs.get('message', None)
        # Build a base filter and sort key that is the combination of the saved state and defaults.
        # Saved state takes preference over defaults.
        base_filter = {}
        if self.default_filter:
            # default_filter is a dictionary that provides a default set of filters based on the grid's columns.
            base_filter = self.default_filter.copy()
        base_sort_key = self.default_sort_key
        if self.preserve_state:
            pref_name = unicode(self.__class__.__name__ +
                                self.cur_filter_pref_name)
            if pref_name in trans.get_user().preferences:
                saved_filter = from_json_string(
                    trans.get_user().preferences[pref_name])
                base_filter.update(saved_filter)
            pref_name = unicode(self.__class__.__name__ +
                                self.cur_sort_key_pref_name)
            if pref_name in trans.get_user().preferences:
                base_sort_key = from_json_string(
                    trans.get_user().preferences[pref_name])
        # Build initial query
        query = self.build_initial_query(trans, **kwargs)
        query = self.apply_query_filter(trans, query, **kwargs)
        # Maintain sort state in generated urls
        extra_url_args = {}
        # Determine whether use_default_filter flag is set.
        use_default_filter_str = kwargs.get('use_default_filter')
        use_default_filter = False
        if use_default_filter_str:
            use_default_filter = (use_default_filter_str.lower() == 'true')
        # Process filtering arguments to (a) build a query that represents the filter and (b) build a
        # dictionary that denotes the current filter.
        cur_filter_dict = {}
        for column in self.columns:
            if column.key:
                # Get the filter criterion for the column. Precedence is (a) if using default filter, only look there; otherwise,
                # (b) look in kwargs; and (c) look in base filter.
                column_filter = None
                if use_default_filter:
                    if self.default_filter:
                        column_filter = self.default_filter.get(column.key)
                elif "f-" + column.model_class.__name__ + ".%s" % column.key in kwargs:
                    # Queries that include table joins cannot guarantee unique column names.  This problem is
                    # handled by setting the column_filter value to <TableName>.<ColumnName>.
                    column_filter = kwargs.get("f-" +
                                               column.model_class.__name__ +
                                               ".%s" % column.key)
                elif "f-" + column.key in kwargs:
                    column_filter = kwargs.get("f-" + column.key)
                elif column.key in base_filter:
                    column_filter = base_filter.get(column.key)
                # Method (1) combines a mix of strings and lists of strings into a single string and (2) attempts to de-jsonify all strings.
                def from_json_string_recurse(item):
                    decoded_list = []
                    if isinstance(item, basestring):
                        try:
                            # Not clear what we're decoding, so recurse to ensure that we catch everything.
                            decoded_item = from_json_string(item)
                            if isinstance(decoded_item, list):
                                decoded_list = from_json_string_recurse(
                                    decoded_item)
                            else:
                                decoded_list = [unicode(decoded_item)]
                        except ValueError:
                            decoded_list = [unicode(item)]
                    elif isinstance(item, list):
                        return_val = []
                        for element in item:
                            a_list = from_json_string_recurse(element)
                            decoded_list = decoded_list + a_list
                    return decoded_list

                # If column filter found, apply it.
                if column_filter is not None:
                    # TextColumns may have a mix of json and strings.
                    if isinstance(column, TextColumn):
                        column_filter = from_json_string_recurse(column_filter)
                        if len(column_filter) == 1:
                            column_filter = column_filter[0]
                    # Interpret ',' as a separator for multiple terms.
                    if isinstance(
                            column_filter,
                            basestring) and column_filter.find(',') != -1:
                        column_filter = column_filter.split(',')
                    # If filter criterion is empty, do nothing.
                    if column_filter == '':
                        continue
                    # Update query.
                    query = column.filter(trans, trans.user, query,
                                          column_filter)
                    # Upate current filter dict.
                    cur_filter_dict[column.key] = column_filter
                    # Carry filter along to newly generated urls; make sure filter is a string so
                    # that we can encode to UTF-8 and thus handle user input to filters.
                    if isinstance(column_filter, list):
                        # Filter is a list; process each item.
                        for filter in column_filter:
                            if not isinstance(filter, basestring):
                                filter = unicode(filter).encode("utf-8")
                        extra_url_args["f-" + column.key] = to_json_string(
                            column_filter)
                    else:
                        # Process singleton filter.
                        if not isinstance(column_filter, basestring):
                            column_filter = unicode(column_filter)
                        extra_url_args[
                            "f-" + column.key] = column_filter.encode("utf-8")
        # Process sort arguments.
        sort_key = None
        if 'sort' in kwargs:
            sort_key = kwargs['sort']
        elif base_sort_key:
            sort_key = base_sort_key
        if sort_key:
            ascending = not (sort_key.startswith("-"))
            # Queries that include table joins cannot guarantee unique column names.  This problem is
            # handled by setting the column_filter value to <TableName>.<ColumnName>.
            table_name = None
            if sort_key.find('.') > -1:
                a_list = sort_key.split('.')
                if ascending:
                    table_name = a_list[0]
                else:
                    table_name = a_list[0][1:]
                column_name = a_list[1]
            elif ascending:
                column_name = sort_key
            else:
                column_name = sort_key[1:]
            # Sort key is a column key.
            for column in self.columns:
                if column.key and column.key.find('.') > -1:
                    column_key = column.key.split('.')[1]
                else:
                    column_key = column.key
                if (table_name is None
                        or table_name == column.model_class.__name__
                    ) and column_key == column_name:
                    query = column.sort(trans,
                                        query,
                                        ascending,
                                        column_name=column_name)
                    break
            extra_url_args['sort'] = sort_key
        # There might be a current row
        current_item = self.get_current_item(trans, **kwargs)
        # Process page number.
        if self.use_paging:
            if 'page' in kwargs:
                if kwargs['page'] == 'all':
                    page_num = 0
                else:
                    page_num = int(kwargs['page'])
            else:
                page_num = 1

            if page_num == 0:
                # Show all rows in page.
                total_num_rows = query.count()
                page_num = 1
                num_pages = 1
            else:
                # Show a limited number of rows. Before modifying query, get the total number of rows that query
                # returns so that the total number of pages can be computed.
                total_num_rows = query.count()
                query = query.limit(self.num_rows_per_page).offset(
                    (page_num - 1) * self.num_rows_per_page)
                num_pages = int(
                    math.ceil(float(total_num_rows) / self.num_rows_per_page))
        else:
            # Defaults.
            page_num = 1
            num_pages = 1

        # There are some places in grid templates where it's useful for a grid
        # to have its current filter.
        self.cur_filter_dict = cur_filter_dict

        # Preserve grid state: save current filter and sort key.
        if self.preserve_state:
            pref_name = unicode(self.__class__.__name__ +
                                self.cur_filter_pref_name)
            trans.get_user().preferences[pref_name] = unicode(
                to_json_string(cur_filter_dict))
            if sort_key:
                pref_name = unicode(self.__class__.__name__ +
                                    self.cur_sort_key_pref_name)
                trans.get_user().preferences[pref_name] = unicode(
                    to_json_string(sort_key))
            trans.sa_session.flush()
        # Log grid view.
        context = unicode(self.__class__.__name__)
        params = cur_filter_dict.copy()
        params['sort'] = sort_key
        params['async'] = ('async' in kwargs)
        params['webapp'] = webapp
        trans.log_action(trans.get_user(), unicode("grid.view"), context,
                         params)

        # Render grid.
        def url(*args, **kwargs):
            # Only include sort/filter arguments if not linking to another
            # page. This is a bit of a hack.
            if 'action' in kwargs:
                new_kwargs = dict()
            else:
                new_kwargs = dict(extra_url_args)
            # Extend new_kwargs with first argument if found
            if len(args) > 0:
                new_kwargs.update(args[0])
            new_kwargs.update(kwargs)
            # We need to encode item ids
            if 'id' in new_kwargs:
                id = new_kwargs['id']
                if isinstance(id, list):
                    new_args['id'] = [trans.security.encode_id(i) for i in id]
                else:
                    new_kwargs['id'] = trans.security.encode_id(id)
            return url_for(**new_kwargs)

        self.use_panels = (kwargs.get('use_panels', False)
                           in [True, 'True', 'true'])
        async_request = ((self.use_async) and (kwargs.get('async', False)
                                               in [True, 'True', 'true']))
        # Currently, filling the template returns a str object; this requires decoding the string into a
        # unicode object within mako templates. What probably should be done is to return the template as
        # utf-8 unicode; however, this would require encoding the object as utf-8 before returning the grid
        # results via a controller method, which is require substantial changes. Hence, for now, return grid
        # as str.
        return trans.fill_template(
            iff(async_request, self.async_template, self.template),
            grid=self,
            query=query,
            cur_page_num=page_num,
            num_pages=num_pages,
            num_page_links=self.num_page_links,
            default_filter_dict=self.default_filter,
            cur_filter_dict=cur_filter_dict,
            sort_key=sort_key,
            current_item=current_item,
            ids=kwargs.get('id', []),
            url=url,
            status=status,
            message=message,
            use_panels=self.use_panels,
            webapp=webapp,
            show_item_checkboxes=(kwargs.get('show_item_checkboxes', '')
                                  in ['True', 'true']),
            # Pass back kwargs so that grid template can set and use args without
            # grid explicitly having to pass them.
            kwargs=kwargs)