def main( options ): """Collect all user data and install the tools via the Galaxy API.""" data = {} data[ 'tool_shed_url' ] = options.tool_shed_url data[ 'name' ] = options.name data[ 'owner' ] = options.owner if options.changeset_revision: data[ 'changeset_revision' ] = options.changeset_revision else: # If the changeset_revision is not specified, default to the latest installable revision. revision_data = {} revision_data[ 'tool_shed_url' ] = options.tool_shed_url.rstrip( '/' ) revision_data[ 'name' ] = options.name revision_data[ 'owner' ] = options.owner revision_url = '%s%s' % ( options.local_url.rstrip( '/' ), '/api/tool_shed_repositories/get_latest_installable_revision' ) latest_installable_revision = submit( options.api, revision_url, revision_data, return_formatted=False ) data[ 'changeset_revision' ] = latest_installable_revision if options.tool_panel_section_id: data[ 'tool_panel_section_id' ] = options.tool_panel_section_id elif options.new_tool_panel_section_label: data[ 'new_tool_panel_section_label' ] = options.new_tool_panel_section_label if options.install_repository_dependencies: data[ 'install_repository_dependencies' ] = options.install_repository_dependencies if options.install_tool_dependencies: data[ 'install_tool_dependencies' ] = options.install_tool_dependencies submit( options.api, '%s%s' % ( options.local_url.rstrip( '/' ), '/api/tool_shed_repositories/new/install_repository_revision' ), data )
def main(): try: print("workflow_execute:py:") data = {} data['workflow_id'] = sys.argv[3] data['history'] = sys.argv[4] data['ds_map'] = {} # Trying to pass in parameter for my own dictionary data['parameters'] = {} # DBTODO If only one input is given, don't require a step # mapping, just use it for everything? for v in sys.argv[5:]: print("Multiple arguments ") print(v) try: step, src, ds_id = v.split('=') data['ds_map'][step] = {'src': src, 'id': ds_id} except ValueError: print("VALUE ERROR:") wtype, wtool, wparam, wvalue = v.split('=') try: data['parameters'][wtool] = {'param': wparam, 'value': wvalue} except ValueError: print("TOOL ID ERROR:") except IndexError: print('usage: %s key url workflow_id history step=src=dataset_id' % os.path.basename(sys.argv[0])) sys.exit(1) submit( sys.argv[1], sys.argv[2], data )
def main(options): """Collect all user data and install the tools via the Galaxy API.""" api_key = options.api base_galaxy_url = options.local_url.rstrip('/') base_tool_shed_url = options.tool_shed_url.rstrip('/') cleaned_tool_shed_url = clean_url(base_tool_shed_url) installed_tool_shed_repositories_url = '%s/api/%s' % ( base_galaxy_url, 'tool_shed_repositories') data = {} data['tool_shed_url'] = cleaned_tool_shed_url data['name'] = options.name data['owner'] = options.owner data['changeset_revision'] = options.changeset_revision tool_shed_repository_id = None installed_tool_shed_repositories = display( api_key, installed_tool_shed_repositories_url, return_formatted=False) for installed_tool_shed_repository in installed_tool_shed_repositories: tool_shed = str(installed_tool_shed_repository['tool_shed']) name = str(installed_tool_shed_repository['name']) owner = str(installed_tool_shed_repository['owner']) changeset_revision = str( installed_tool_shed_repository['changeset_revision']) if tool_shed == cleaned_tool_shed_url and name == options.name and owner == options.owner and changeset_revision == options.changeset_revision: tool_shed_repository_id = installed_tool_shed_repository['id'] break if tool_shed_repository_id: url = '%s%s' % ( base_galaxy_url, '/api/tool_shed_repositories/%s/repair_repository_revision' % str(tool_shed_repository_id)) submit(options.api, url, data) else: print("Invalid tool_shed / name / owner / changeset_revision.")
def main(options): api_key = options.api base_tool_shed_url = options.tool_shed_url.rstrip('/') my_writable = options.my_writable one_per_request = options.one_per_request skip_file = options.skip_file if skip_file: encoded_ids_to_skip = read_skip_file(skip_file) else: encoded_ids_to_skip = [] if string_as_bool(one_per_request): url = '%s/api/repositories/repository_ids_for_setting_metadata?key=%s&my_writable=%s' % ( base_tool_shed_url, api_key, str(my_writable)) repository_ids = get(url, api_key) for repository_id in repository_ids: if repository_id in encoded_ids_to_skip: print "--------" print "Skipping repository with id %s because it is in skip file %s" % ( str(repository_id), str(skip_file)) print "--------" else: data = dict(repository_id=repository_id) url = '%s/api/repositories/reset_metadata_on_repository' % base_tool_shed_url try: submit(url, data, options.api) except Exception, e: log.exception( ">>>>>>>>>>>>>>>Blew up on data: %s, exception: %s" % (str(data), str(e))) # An nginx timeout undoubtedly occurred. sys.exit(1)
def main(options): """Collect all user data and install the tools via the Galaxy API.""" api_key = options.api base_galaxy_url = options.local_url.rstrip("/") base_tool_shed_url = options.tool_shed_url.rstrip("/") cleaned_tool_shed_url = clean_url(base_tool_shed_url) installed_tool_shed_repositories_url = "%s/api/%s" % (base_galaxy_url, "tool_shed_repositories") data = {} data["tool_shed_url"] = cleaned_tool_shed_url data["name"] = options.name data["owner"] = options.owner data["changeset_revision"] = options.changeset_revision tool_shed_repository_id = None installed_tool_shed_repositories = display(api_key, installed_tool_shed_repositories_url, return_formatted=False) for installed_tool_shed_repository in installed_tool_shed_repositories: tool_shed = str(installed_tool_shed_repository["tool_shed"]) name = str(installed_tool_shed_repository["name"]) owner = str(installed_tool_shed_repository["owner"]) changeset_revision = str(installed_tool_shed_repository["changeset_revision"]) if ( tool_shed == cleaned_tool_shed_url and name == options.name and owner == options.owner and changeset_revision == options.changeset_revision ): tool_shed_repository_id = installed_tool_shed_repository["id"] break if tool_shed_repository_id: url = "%s%s" % ( base_galaxy_url, "/api/tool_shed_repositories/%s/repair_repository_revision" % str(tool_shed_repository_id), ) submit(options.api, url, data) else: print "Invalid tool_shed / name / owner / changeset_revision."
def main(): try: print("workflow_execute:py:") data = {} data['workflow_id'] = sys.argv[3] data['history'] = sys.argv[4] data['ds_map'] = {} # Trying to pass in parameter for my own dictionary data['parameters'] = {} # DBTODO If only one input is given, don't require a step # mapping, just use it for everything? for v in sys.argv[5:]: print("Multiple arguments ") print(v) try: step, src, ds_id = v.split('=') data['ds_map'][step] = {'src': src, 'id': ds_id} except ValueError: print("VALUE ERROR:") wtype, wtool, wparam, wvalue = v.split('=') try: data['parameters'][wtool] = {'param': wparam, 'value': wvalue} except ValueError: print("TOOL ID ERROR:") except IndexError: print('usage: %s key url workflow_id history step=src=dataset_id' % os.path.basename(sys.argv[0])) sys.exit(1) submit(sys.argv[1], sys.argv[2], data)
def main( options ): """Collect all user data and install the tools via the Galaxy API.""" api_key = options.api base_galaxy_url = options.local_url.rstrip( '/' ) base_tool_shed_url = options.tool_shed_url.rstrip( '/' ) cleaned_tool_shed_url = clean_url( base_tool_shed_url ) installed_tool_shed_repositories_url = '%s/api/%s' % ( base_galaxy_url, 'tool_shed_repositories' ) data = {} data[ 'tool_shed_url' ] = cleaned_tool_shed_url data[ 'name' ] = options.name data[ 'owner' ] = options.owner data[ 'changeset_revision' ] = options.changeset_revision tool_shed_repository_id = None installed_tool_shed_repositories = display( api_key, installed_tool_shed_repositories_url, return_formatted=False ) for installed_tool_shed_repository in installed_tool_shed_repositories: tool_shed = str( installed_tool_shed_repository[ 'tool_shed' ] ) name = str( installed_tool_shed_repository[ 'name' ] ) owner = str( installed_tool_shed_repository[ 'owner' ] ) changeset_revision = str( installed_tool_shed_repository[ 'changeset_revision' ] ) if tool_shed == cleaned_tool_shed_url and name == options.name and owner == options.owner and changeset_revision == options.changeset_revision: tool_shed_repository_id = installed_tool_shed_repository[ 'id' ] break if tool_shed_repository_id: url = '%s%s' % ( base_galaxy_url, '/api/tool_shed_repositories/%s/repair_repository_revision' % str( tool_shed_repository_id ) ) submit( options.api, url, data ) else: print("Invalid tool_shed / name / owner / changeset_revision.")
def main( options ): api_key = options.api base_galaxy_url = options.local_url.rstrip( '/' ) base_tool_shed_url = options.tool_shed_url.rstrip( '/' ) cleaned_tool_shed_url = clean_url( base_tool_shed_url ) installed_tool_shed_repositories_url = '%s/api/tool_shed_repositories' % base_galaxy_url tool_shed_repository_id = None installed_tool_shed_repositories = display( api_key, installed_tool_shed_repositories_url, return_formatted=False ) for installed_tool_shed_repository in installed_tool_shed_repositories: tool_shed = str( installed_tool_shed_repository[ 'tool_shed' ] ) name = str( installed_tool_shed_repository[ 'name' ] ) owner = str( installed_tool_shed_repository[ 'owner' ] ) changeset_revision = str( installed_tool_shed_repository[ 'changeset_revision' ] ) if tool_shed == cleaned_tool_shed_url and name == options.name and owner == options.owner and changeset_revision == options.changeset_revision: tool_shed_repository_id = installed_tool_shed_repository[ 'id' ] break if tool_shed_repository_id: # Get the list of exported workflows contained in the installed repository. url = '%s%s' % ( base_galaxy_url, '/api/tool_shed_repositories/%s/exported_workflows' % str( tool_shed_repository_id ) ) exported_workflows = display( api_key, url, return_formatted=False ) if exported_workflows: # Import all of the workflows in the list of exported workflows. data = {} # NOTE: to import a single workflow, add an index to data (e.g., # data[ 'index' ] = 0 # and change the url to be ~/import_workflow (singular). For example, # url = '%s%s' % ( base_galaxy_url, '/api/tool_shed_repositories/%s/import_workflow' % str( tool_shed_repository_id ) ) url = '%s%s' % ( base_galaxy_url, '/api/tool_shed_repositories/%s/import_workflows' % str( tool_shed_repository_id ) ) submit( options.api, url, data ) else: print("Invalid tool_shed / name / owner / changeset_revision.")
def main( options ): api_key = options.api base_tool_shed_url = options.tool_shed_url.rstrip( '/' ) my_writable = options.my_writable one_per_request = options.one_per_request skip_file = options.skip_file if skip_file: encoded_ids_to_skip = read_skip_file( skip_file ) else: encoded_ids_to_skip = [] if string_as_bool( one_per_request ): url = '%s/api/repositories/repository_ids_for_setting_metadata?key=%s&my_writable=%s' % ( base_tool_shed_url, api_key, str( my_writable ) ) repository_ids = get( url, api_key ) for repository_id in repository_ids: if repository_id in encoded_ids_to_skip: print "--------" print "Skipping repository with id %s because it is in skip file %s" % ( str( repository_id ), str( skip_file ) ) print "--------" else: data = dict( repository_id=repository_id ) url = '%s/api/repositories/reset_metadata_on_repository' % base_tool_shed_url try: submit( url, data, options.api ) except Exception, e: log.exception( ">>>>>>>>>>>>>>>Blew up on data: %s, exception: %s" % ( str( data ), str( e ) ) ) # An nginx timeout undoubtedly occurred. sys.exit( 1 )
def main(options): api_key = options.api base_galaxy_url = options.local_url.rstrip('/') base_tool_shed_url = options.tool_shed_url.rstrip('/') cleaned_tool_shed_url = clean_url(base_tool_shed_url) installed_tool_shed_repositories_url = '%s/api/tool_shed_repositories' % base_galaxy_url tool_shed_repository_id = None installed_tool_shed_repositories = display(api_key, installed_tool_shed_repositories_url, return_formatted=False) for installed_tool_shed_repository in installed_tool_shed_repositories: tool_shed = str(installed_tool_shed_repository['tool_shed']) name = str(installed_tool_shed_repository['name']) owner = str(installed_tool_shed_repository['owner']) changeset_revision = str(installed_tool_shed_repository['changeset_revision']) if tool_shed == cleaned_tool_shed_url and name == options.name and owner == options.owner and changeset_revision == options.changeset_revision: tool_shed_repository_id = installed_tool_shed_repository['id'] break if tool_shed_repository_id: # Get the list of exported workflows contained in the installed repository. url = '{}{}'.format(base_galaxy_url, '/api/tool_shed_repositories/%s/exported_workflows' % str(tool_shed_repository_id)) exported_workflows = display(api_key, url, return_formatted=False) if exported_workflows: # Import all of the workflows in the list of exported workflows. data = {} # NOTE: to import a single workflow, add an index to data (e.g., # data[ 'index' ] = 0 # and change the url to be ~/import_workflow (singular). For example, # url = '%s%s' % ( base_galaxy_url, '/api/tool_shed_repositories/%s/import_workflow' % str( tool_shed_repository_id ) ) url = '{}{}'.format(base_galaxy_url, '/api/tool_shed_repositories/%s/import_workflows' % str(tool_shed_repository_id)) submit(options.api, url, data) else: print("Invalid tool_shed / name / owner / changeset_revision.")
def set_lib_perms(lib_id, user_id): lib_payload = {} nlib_url = lib_url + '/' + lib_id + '/permissions' print nlib_url print user_id lib_payload['LIBRARY_ACCESS_in'] = [user_id] lib_payload['LIBRARY_MODIFY_in'] = [user_id] lib_payload['LIBRARY_ADD_in'] = [user_id] lib_payload['LIBRARY_MANAGE_in'] = [user_id] api.submit(key,nlib_url,lib_payload)
def main(api_key, api_url, in_folder, out_folder, data_library, workflow): # Find/Create data library with the above name. Assume we're putting datasets in the root folder '/' libs = display(api_key, api_url + 'libraries', return_formatted=False) library_id = None for library in libs: if library['name'] == data_library: library_id = library['id'] if not library_id: lib_create_data = {'name':data_library} library = submit(api_key, api_url + 'libraries', lib_create_data, return_formatted=False) library_id = library[0]['id'] folders = display(api_key, api_url + "libraries/%s/contents" % library_id, return_formatted = False) for f in folders: if f['name'] == "/": library_folder_id = f['id'] workflow = display(api_key, api_url + 'workflows/%s' % workflow, return_formatted = False) if not workflow: print "Workflow %s not found, terminating." sys.exit(1) if not library_id or not library_folder_id: print "Failure to configure library destination." sys.exit(1) while 1: # Watch in_folder, upload anything that shows up there to data library and get ldda, # invoke workflow, move file to out_folder. for fname in os.listdir(in_folder): fullpath = os.path.join(in_folder, fname) if os.path.isfile(fullpath): data = {} data['folder_id'] = library_folder_id data['file_type'] = 'auto' data['dbkey'] = '' data['upload_option'] = 'upload_paths' data['filesystem_paths'] = fullpath data['create_type'] = 'file' libset = submit(api_key, api_url + "libraries/%s/contents" % library_id, data, return_formatted = False) #TODO Handle this better, but the datatype isn't always # set for the followup workflow execution without this # pause. time.sleep(5) for ds in libset: if 'id' in ds: # Successful upload of dataset, we have the ldda now. Run the workflow. wf_data = {} wf_data['workflow_id'] = workflow['id'] wf_data['history'] = "%s - %s" % (fname, workflow['name']) wf_data['ds_map'] = {} for step_id, ds_in in workflow['inputs'].iteritems(): wf_data['ds_map'][step_id] = {'src':'ld', 'id':ds['id']} res = submit( api_key, api_url + 'workflows', wf_data, return_formatted=False) if res: print res # Successful workflow execution, safe to move dataset. shutil.move(fullpath, os.path.join(out_folder, fname)) time.sleep(10)
def main( options ): api_key = options.api base_tool_shed_url = options.tool_shed_url.rstrip( '/' ) data = {} data[ 'tool_shed_url' ] = options.tool_shed_url data[ 'capsule_file_name' ] = options.capsule_file_name url = '%s/api/repositories/new/import_capsule' % base_tool_shed_url try: submit( url, data, api_key ) except Exception, e: log.exception( str( e ) ) sys.exit( 1 )
def main(options): api_key = options.api base_tool_shed_url = options.tool_shed_url.rstrip('/') data = {} data['tool_shed_url'] = options.tool_shed_url data['capsule_file_name'] = options.capsule_file_name url = '%s/api/repositories/new/import_capsule' % base_tool_shed_url try: submit(url, data, api_key) except Exception, e: log.exception(str(e)) sys.exit(1)
def main(): api_key = sys.argv[1] api_base_url = sys.argv[2] api_url = "%s/api/workflows" % api_base_url try: data = {} data['installed_repository_file'] = sys.argv[3] if len(sys.argv) > 4 and sys.argv[4] == "--add_to_menu": data['add_to_menu'] = True except IndexError: print('usage: %s key galaxy_url workflow_file' % os.path.basename(sys.argv[0])) sys.exit(1) submit(api_key, api_url, data, return_formatted=False)
def main(): try: data = {} data['workflow_id'] = sys.argv[3] data['history'] = sys.argv[4] data['ds_map'] = {} # DBTODO If only one input is given, don't require a step # mapping, just use it for everything? for v in sys.argv[5:]: step, src, ds_id = v.split('=') data['ds_map'][step] = {'src': src, 'id': ds_id} except IndexError: print('usage: %s key url workflow_id history step=src=dataset_id' % os.path.basename(sys.argv[0])) sys.exit(1) submit( sys.argv[1], sys.argv[2], data )
def create_sequencer_configuration(key, base_url, request_form_filename, sample_form_filename, request_type_filename, email_addresses, return_formatted=True): #create request_form data = {} data['xml_text'] = open(request_form_filename).read() request_form = submit(key, "%sforms" % base_url, data, return_formatted=False)[0] #create sample_form data = {} data['xml_text'] = open(sample_form_filename).read() sample_form = submit(key, "%sforms" % base_url, data, return_formatted=False)[0] #get user ids user_ids = [ user['id'] for user in get(key, "%susers" % base_url) if user['email'] in email_addresses ] #create role, assign to user data = {} data['name'] = "request_type_role_%s_%s_%s name" % ( request_form['id'], sample_form['id'], '_'.join(email_addresses)) data['description'] = "request_type_role_%s_%s_%s description" % ( request_form['id'], sample_form['id'], '_'.join(email_addresses)) data['user_ids'] = user_ids role_ids = [ role['id'] for role in submit( key, "%sroles" % base_url, data, return_formatted=False) ] #create request_type data = {} data['request_form_id'] = request_form['id'] data['sample_form_id'] = sample_form['id'] data['role_ids'] = role_ids data['xml_text'] = open(request_type_filename).read() return submit(key, "%srequest_types" % base_url, data, return_formatted=return_formatted ) #create and print out results for request type
def check_response(res, api_key, api_url, wf_data, runned): retry_num = 1 while 'Error' in res: time.sleep(100) tmp = 'Error:%s\n[retry %s]%s' % (res, retry_num, wf_data['history']) logPrint(tmp) res = submit(api_key, api_url + 'workflows', wf_data, return_formatted=False) retry_num += 1 if retry_num > 2: break if 'Error' in res: tmp = 'Error:[retry 10 times]%s' % wf_data['history'] logPrint(tmp) else: #print res tmp = 'Running workflow : %s' % wf_data['history'] logPrint(tmp) runned += 1 #return 0 time.sleep(10) # Successful workflow execution, safe to move dataset. #shutil.move(fullpath, os.path.join(out_folder, fname)) return runned
def workflow_execute_parameters(*argv): data = {} data['workflow_id'] = argv[2] data['history'] = argv[3] data['ds_map'] = {} ######################################################### ### Trying to pass in parameter for my own dictionary ### data['parameters'] = {} # DBTODO If only one input is given, don't require a step # mapping, just use it for everything? for v in argv[4:]: print("Multiple arguments "); print(v); try: step, src, ds_id = v.split('='); data['ds_map'][step] = {'src':src, 'id':ds_id}; except ValueError: print("VALUE ERROR:"); #wtype, wtool, wparam, wvalue = v.split('='); fields = v.split('='); wtype,wtool,wparam = fields[0],fields[1],fields[2] wvalue = "=".join(fields[3:len(fields)]) try: data['parameters'][wtool] = {'param':wparam, 'value':wvalue} except ValueError: print("TOOL ID ERROR:"); print data return submit( argv[0], argv[1], data, return_formatted=False)
def load_file(fullpath, api_key, api_url, library_id, library_folder_id, uuid_field=None): data = {} data['folder_id'] = library_folder_id data['file_type'] = 'auto' data['dbkey'] = '' data['upload_option'] = 'upload_paths' data['filesystem_paths'] = fullpath data['create_type'] = 'file' data['link_data_only'] = 'link_to_files' handle = open(fullpath + ".json") smeta = handle.read() handle.close() ext_meta = json.loads(smeta) data['extended_metadata'] = ext_meta if uuid_field is not None and uuid_field in ext_meta: data['uuid'] = ext_meta[uuid_field] libset = submit(api_key, api_url + "libraries/%s/contents" % library_id, data, return_formatted=True) print(libset)
def create_lib(lib_user): lib_payload = {} lib_payload['name'] = lib_user lib_payload['description'] = lib_user + '\'s data library' resp = api.submit(key, lib_url, lib_payload) print type(resp) return resp
def main(): if _debug == 1: print 'Galaxy API URL: %s' % api_url print 'Galaxy API Key: %s' % api_key print 'Library to create: %s' % library_to_create print '' if api_url == None or api_key == None: print "Galaxy API Key and/or URL was not specified" sys.exit(1) libs = display(api_key, api_url + '/api/libraries', return_formatted=False) for library in libs: if library['name'] == library_to_create and library['deleted'] == False: print 'Error: Library %s already exists.' % library['name'] sys.exit(1) data = {} data['name'] = library_to_create result = submit(api_key, api_url + "/api/libraries", data, return_formatted = False) if not result['id'] == 0: print 'Success: Library created.' else: print 'Error: Failed to create library (%s).' % result['id']
def main( options ): """Collect all user data and export the repository via the Tool Shed API.""" base_tool_shed_url = options.tool_shed_url.rstrip( '/' ) repositories_url = '%s/api/repositories' % base_tool_shed_url data = {} data[ 'tool_shed_url' ] = base_tool_shed_url data[ 'name' ] = options.name data[ 'owner' ] = options.owner data[ 'changeset_revision' ] = options.changeset_revision data[ 'export_repository_dependencies' ] = options.export_repository_dependencies repository_id = None repositories = display( repositories_url, api_key=None, return_formatted=False ) for repository in repositories: name = str( repository[ 'name' ] ) owner = str( repository[ 'owner' ] ) if name == options.name and owner == options.owner: repository_id = repository[ 'id' ] break if repository_id: # We'll currently support only gzip-compressed tar archives. file_type = 'gz' url = '%s%s' % ( base_tool_shed_url, '/api/repository_revisions/%s/export' % str( repository_id ) ) export_dict = submit( url, data, return_formatted=False ) error_messages = export_dict[ 'error_messages' ] if error_messages: print "Error attempting to export revision ", options.changeset_revision, " of repository ", options.name, " owned by ", options.owner, ":\n", error_messages else: repositories_archive_filename = \ export_util.generate_repository_archive_filename( base_tool_shed_url, options.name, options.owner, options.changeset_revision, file_type, export_repository_dependencies=string_as_bool( options.export_repository_dependencies ), use_tmp_archive_dir=False ) download_url = export_dict[ 'download_url' ] download_dir = os.path.abspath( options.download_dir ) file_path = os.path.join( download_dir, repositories_archive_filename ) src = None dst = None try: src = urllib2.urlopen( download_url ) dst = open( file_path, 'wb' ) while True: chunk = src.read( CHUNK_SIZE ) if chunk: dst.write( chunk ) else: break except: raise finally: if src: src.close() if dst: dst.close() print "Successfully exported revision ", options.changeset_revision, " of repository ", options.name, " owned by ", options.owner print "to location ", file_path else: print "Invalid tool_shed / name / owner ."
def main(api_key, api_url, in_folder, data_library, uuid_field=None): # Find/Create data library with the above name. Assume we're putting datasets in the root folder '/' libs = display(api_key, api_url + 'libraries', return_formatted=False) library_id = None for library in libs: if library['name'] == data_library: library_id = library['id'] if not library_id: lib_create_data = {'name':data_library} library = submit(api_key, api_url + 'libraries', lib_create_data, return_formatted=False) library_id = library['id'] folders = display(api_key, api_url + "libraries/%s/contents" % library_id, return_formatted = False) for f in folders: if f['name'] == "/": library_folder_id = f['id'] if not library_id or not library_folder_id: print "Failure to configure library destination." sys.exit(1) if os.path.isfile(in_folder): if os.path.exists(in_folder + ".json"): fullpath = os.path.abspath(in_folder) print "Loading", fullpath load_file(fullpath, api_key, api_url, library_id, library_folder_id, uuid_field) else: for fname in os.listdir(in_folder): fullpath = os.path.join(in_folder, fname) if os.path.isfile(fullpath) and os.path.exists(fullpath + ".json"): print "Loading", fullpath load_file(fullpath, api_key, api_url, library_id, library_folder_id, uuid_field)
def add_to_library(self): ''' This method adds the dataset file to the target data library & folder by opening the corresponding url in Galaxy server running. ''' self.update_status(SampleDataset.transfer_status.ADD_TO_LIBRARY) try: data = {} data['folder_id'] = 'F%s' % api.encode_id(self.config_id_secret, self.folder_id) data['file_type'] = 'auto' data['server_dir'] = self.server_dir data['dbkey'] = '' data['upload_option'] = 'upload_directory' data['create_type'] = 'file' url = "http://%s/api/libraries/%s/contents" % ( self.galaxy_host, api.encode_id(self.config_id_secret, self.library_id)) log.debug(str((self.api_key, url, data))) retval = api.submit(self.api_key, url, data, return_formatted=False) log.debug(str(retval)) except Exception, e: self.error_and_exit(str(e))
def copy_hda_to_library_folder(base_url, key, hda_id, library_id, folder_id, message=""): url = "http://%s/api/libraries/%s/contents" % (base_url, library_id) payload = {"folder_id": folder_id, "create_type": "file", "from_hda_id": hda_id} if message: payload.update(dict(ldda_message=message)) return submit(key, url, payload)
def main( options ): """Collect all user data and install the tools via the Galaxy API.""" data = {} data[ 'tool_shed_url' ] = options.tool_shed_url data[ 'name' ] = options.name data[ 'owner' ] = options.owner data[ 'changeset_revision' ] = options.changeset_revision if options.tool_panel_section_id: data[ 'tool_panel_section_id' ] = options.tool_panel_section_id elif options.new_tool_panel_section_label: data[ 'new_tool_panel_section_label' ] = options.new_tool_panel_section_label if options.install_repository_dependencies: data[ 'install_repository_dependencies' ] = options.install_repository_dependencies if options.install_tool_dependencies: data[ 'install_tool_dependencies' ] = options.install_tool_dependencies submit( options.api, '%s%s' % ( options.local_url.strip( '/' ), '/api/tool_shed_repositories/new/install_repository_revision' ), data )
def main(options): """Collect all user data and export the repository via the Tool Shed API.""" base_tool_shed_url = options.tool_shed_url.rstrip('/') repositories_url = '%s/api/repositories' % base_tool_shed_url data = {} data['tool_shed_url'] = base_tool_shed_url data['name'] = options.name data['owner'] = options.owner data['changeset_revision'] = options.changeset_revision data[ 'export_repository_dependencies'] = options.export_repository_dependencies repository_id = None repositories = display(repositories_url, api_key=None, return_formatted=False) for repository in repositories: name = str(repository['name']) owner = str(repository['owner']) if name == options.name and owner == options.owner: repository_id = repository['id'] break if repository_id: # We'll currently support only gzip-compressed tar archives. file_type = 'gz' url = '%s%s' % (base_tool_shed_url, '/api/repository_revisions/%s/export' % str(repository_id)) export_dict = submit(url, data, return_formatted=False) error_messages = export_dict['error_messages'] if error_messages: print "Error attempting to export revision ", options.changeset_revision, " of repository ", options.name, " owned by ", options.owner, ":\n", error_messages else: export_repository_dependencies = string_as_bool( options.export_repository_dependencies) repositories_archive_filename = \ generate_repository_archive_filename( base_tool_shed_url, options.name, options.owner, options.changeset_revision, file_type, export_repository_dependencies=export_repository_dependencies, use_tmp_archive_dir=False ) download_url = export_dict['download_url'] download_dir = os.path.abspath(options.download_dir) file_path = os.path.join(download_dir, repositories_archive_filename) src = requests.get(download_url, stream=True) with open(file_path, 'wb') as dst: for chunk in src.iter_content(chunk_size=CHUNK_SIZE): if chunk: dst.write(chunk) print "Successfully exported revision ", options.changeset_revision, " of repository ", options.name, " owned by ", options.owner print "to location ", file_path else: print "Invalid tool_shed / name / owner ."
def main(options): api_key = options.api url = 'http://localhost:9009/api/categories' data = dict(name='A Test Category', description='Drop tests repositories here') try: response = submit(url, data, api_key) except Exception, e: response = str(e) print "Error attempting to create category using URL: ", url, " exception: ", str( e)
def copy_hda_to_library_folder(base_url, key, hda_id, library_id, folder_id, message=''): url = 'http://%s/api/libraries/%s/contents' % (base_url, library_id) payload = { 'folder_id' : folder_id, 'create_type' : 'file', 'from_hda_id' : hda_id, } if message: payload.update(dict(ldda_message=message)) return submit(key, url, payload)
def main(options): """Collect all user data and install the tools via the Galaxy API.""" data = {} data['tool_shed_url'] = options.tool_shed_url data['name'] = options.name data['owner'] = options.owner data['changeset_revision'] = options.changeset_revision if options.tool_panel_section_id: data['tool_panel_section_id'] = options.tool_panel_section_id elif options.new_tool_panel_section_label: data[ 'new_tool_panel_section_label'] = options.new_tool_panel_section_label if options.install_repository_dependencies: data[ 'install_repository_dependencies'] = options.install_repository_dependencies if options.install_tool_dependencies: data['install_tool_dependencies'] = options.install_tool_dependencies submit( options.api, '%s%s' % (options.local_url.strip('/'), '/api/tool_shed_repositories/new/install_repository_revision'), data)
def import_cycle(): #find all non-hidden real files. ls = run("find -L import/ \( ! -regex '.*/\..*' \) -type f") random.shuffle(ls) #if len(ls)>51: # ls = ls[ : 50 ] for filepath in ls: filepath = filepath.strip() if os.path.isfile(filepath): dir = filepath.split("/") dir = dir[2:-1] dir = "/".join(dir) print "dir ", dir cursor.execute("select * from files_imported where path='%s'" % filepath) results = cursor.fetchall() if len(results) == 0: print "importing ", filepath, dir library_id = get_id_for(dir) print "library_id : ", library_id connection.commit() id = cursor.lastrowid just_dir = dir #run( "mkdir -p hard_links/" + just_dir ) pathless = "/".join(filepath.split("/")[2:]) #run( "ln -s %s hard_links/%s" % ( filepath , pathless ) ) #hard_fullpath = os.path.abspath( "hard_links/%s" % pathless ) run("mkdir -p soft_links/" + just_dir) run("ln -s %s soft_links/%s" % (os.path.abspath(filepath), pathless)) soft_fullpath = os.path.abspath("soft_links/%s" % pathless) data = {} data['folder_id'] = library_id data['file_type'] = 'auto' data['dbkey'] = '' data['upload_option'] = 'upload_paths' data['filesystem_paths'] = soft_fullpath data['create_type'] = 'file' data['link_data_only'] = 'link_to_files' #data['server_dir'] = '/'.join( path.split('/')[:-1] ) url = api_url + "libraries/%s/contents" % library_id print url print data libset = submit(api_key, url, data, return_formatted=False) cursor.execute( "insert into files_imported(path,importtime) values('%s',datetime('now'))" % filepath) connection.commit() print libset else: print "File already imported", filepath
def getLibFolderID(exp_info): api_key = exp_info['api_key'] api_url = exp_info['api_url'] lib_folder_name = exp_info['lib_folder_name'] library_id = lib_folder_id = -1 try: libs = display(api_key, api_url + 'libraries', return_formatted=False) except: logPrint(api_url + 'libraries') logPrint("Error:Failure when libs = display") return (-1, -1, -1) library_id = None for library in libs: if library['name'] == lib_folder_name: if library['deleted']: continue library_id = library['id'] lib_exist = 1 #print 'Library [%s] existed!\n'%lib_folder_name logPrint('Library [%s] existed!' % lib_folder_name) #common_del(api_key, api_url + "libraries/%s" % library_id, {'purge':True}, return_formatted = False) #print 'delete %s'%lib_folder_name, library_id if not library_id: lib_create_data = {'name': lib_folder_name} try: library = submit(api_key, api_url + 'libraries', lib_create_data, return_formatted=False) except: logPrint("Error:Failure when library = submit") return (-1, -1, -1) #print 'Library [%s] created!\n'%lib_folder_name logPrint('Library [%s] created!' % lib_folder_name) library_id = library['id'] #library[0]['id'] lib_exist = 0 folders = display(api_key, api_url + "libraries/%s/contents" % library_id, return_formatted=False) for f in folders: if f['name'] == "/": lib_folder_id = f['id'] if not library_id or not lib_folder_id: #print "Failure to configure library destination." logPrint("Error:Failure to configure library destination.") return (-1, -1, -1) #sys.exit(1) return (library_id, lib_folder_id, lib_exist)
def create_path(path): """this is a little complicated. If there is a new path in the import directory, create it on the server. however the new path may be nested, and with the given API's there is no way to do that in one shot. You need to create a folder, find its id, then make the subfolder, etc. So this creates a long path recursively.""" (parent_path, sep, new_folder_name) = path[:-1].rpartition('/') parent = parent_path + sep if parent == '': raise "bogus" print "parent , new : ", parent, ",", new_folder_name if not folders.has_key(parent): #if the parent doesn't exist, create it recursively, then create the child ( me ) create_path(parent) parent_id = folders[parent] data = {} data['folder_id'] = parent_id data['name'] = new_folder_name data['create_type'] = 'folder' data['description'] = '' url = api_url + "libraries/" + parent_id + "/contents" #print data submit(api_key, url, data, return_formatted=False) read_folders()
def create_path( path ): """this is a little complicated. If there is a new path in the import directory, create it on the server. however the new path may be nested, and with the given API's there is no way to do that in one shot. You need to create a folder, find its id, then make the subfolder, etc. So this creates a long path recursively.""" ( parent_path , sep , new_folder_name ) = path[:-1].rpartition( '/' ) parent = parent_path + sep if parent == '': raise "bogus" print "parent , new : " , parent , "," , new_folder_name if not folders.has_key( parent ): #if the parent doesn't exist, create it recursively, then create the child ( me ) create_path( parent ) parent_id = folders[ parent ] data = {} data[ 'folder_id' ] = parent_id data[ 'name' ] = new_folder_name data[ 'create_type' ] = 'folder' data[ 'description' ] = '' url = api_url + "libraries/" + parent_id + "/contents" #print data submit( api_key, url , data , return_formatted = False ) read_folders()
def main(api_key, api_url, in_folder, data_library): # Find/Create data library with the above name. Assume we're putting datasets in the root folder '/' libs = display(api_key, api_url + 'libraries', return_formatted=False) library_id = None for library in libs: if library['name'] == data_library: library_id = library['id'] if not library_id: lib_create_data = {'name':data_library} library = submit(api_key, api_url + 'libraries', lib_create_data, return_formatted=False) library_id = library[0]['id'] folders = display(api_key, api_url + "libraries/%s/contents" % library_id, return_formatted = False) for f in folders: if f['name'] == "/": library_folder_id = f['id'] if not library_id or not library_folder_id: print "Failure to configure library destination." sys.exit(1) for fname in os.listdir(in_folder): fullpath = os.path.join(in_folder, fname) if os.path.isfile(fullpath) and os.path.exists(fullpath + ".json"): print "Loading", fullpath data = {} data['folder_id'] = library_folder_id data['file_type'] = 'auto' data['dbkey'] = '' data['upload_option'] = 'upload_paths' data['filesystem_paths'] = fullpath data['create_type'] = 'file' data['link_data_only'] = 'link_to_files' handle = open( fullpath + ".json" ) smeta = handle.read() handle.close() data['extended_metadata'] = json.loads(smeta) libset = submit(api_key, api_url + "libraries/%s/contents" % library_id, data, return_formatted = True) print libset
def main(api_key, api_url, in_folder, data_library): # Find/Create data library with the above name. Assume we're putting datasets in the root folder '/' libs = display(api_key, api_url + "libraries", return_formatted=False) library_id = None for library in libs: if library["name"] == data_library: library_id = library["id"] if not library_id: lib_create_data = {"name": data_library} library = submit(api_key, api_url + "libraries", lib_create_data, return_formatted=False) library_id = library[0]["id"] folders = display(api_key, api_url + "libraries/%s/contents" % library_id, return_formatted=False) for f in folders: if f["name"] == "/": library_folder_id = f["id"] if not library_id or not library_folder_id: print "Failure to configure library destination." sys.exit(1) for fname in os.listdir(in_folder): fullpath = os.path.join(in_folder, fname) if os.path.isfile(fullpath) and os.path.exists(fullpath + ".json"): print "Loading", fullpath data = {} data["folder_id"] = library_folder_id data["file_type"] = "auto" data["dbkey"] = "" data["upload_option"] = "upload_paths" data["filesystem_paths"] = fullpath data["create_type"] = "file" data["link_data_only"] = "link_to_files" handle = open(fullpath + ".json") smeta = handle.read() handle.close() data["extended_metadata"] = json.loads(smeta) libset = submit(api_key, api_url + "libraries/%s/contents" % library_id, data, return_formatted=True) print libset
def main(options): """Collect all user data and install the tools via the Galaxy API.""" data = {} data['tool_shed_url'] = options.tool_shed_url data['name'] = options.name data['owner'] = options.owner if options.changeset_revision: data['changeset_revision'] = options.changeset_revision else: # If the changeset_revision is not specified, default to the latest installable revision. revision_data = {} revision_data['tool_shed_url'] = options.tool_shed_url.rstrip('/') revision_data['name'] = options.name revision_data['owner'] = options.owner revision_url = '{}{}'.format( options.local_url.rstrip('/'), '/api/tool_shed_repositories/get_latest_installable_revision') latest_installable_revision = submit(options.api, revision_url, revision_data, return_formatted=False) data['changeset_revision'] = latest_installable_revision if options.tool_panel_section_id: data['tool_panel_section_id'] = options.tool_panel_section_id elif options.new_tool_panel_section_label: data[ 'new_tool_panel_section_label'] = options.new_tool_panel_section_label if options.install_repository_dependencies: data[ 'install_repository_dependencies'] = options.install_repository_dependencies if options.install_tool_dependencies: data['install_tool_dependencies'] = options.install_tool_dependencies submit( options.api, '{}{}'.format( options.local_url.rstrip('/'), '/api/tool_shed_repositories/new/install_repository_revision'), data)
def import_cycle(): #find all non-hidden real files. ls = run( "find -L import/ \( ! -regex '.*/\..*' \) -type f" ) random.shuffle( ls ) #if len(ls)>51: # ls = ls[ : 50 ] for filepath in ls: filepath = filepath.strip() if os.path.isfile( filepath ): dir = filepath.split("/") dir = dir[ 2 : -1 ] dir = "/".join( dir ) print "dir " , dir cursor.execute( "select * from files_imported where path='%s'" % filepath ) results = cursor.fetchall() if len( results ) == 0: print "importing " , filepath , dir library_id = get_id_for( dir ) print "library_id : " , library_id connection.commit() id = cursor.lastrowid just_dir = dir #run( "mkdir -p hard_links/" + just_dir ) pathless = "/".join( filepath.split("/")[2:] ) #run( "ln -s %s hard_links/%s" % ( filepath , pathless ) ) #hard_fullpath = os.path.abspath( "hard_links/%s" % pathless ) run( "mkdir -p soft_links/" + just_dir ) run( "ln -s %s soft_links/%s" % ( os.path.abspath( filepath ) , pathless ) ) soft_fullpath = os.path.abspath( "soft_links/%s" % pathless ) data = {} data['folder_id'] = library_id data['file_type'] = 'auto' data['dbkey'] = '' data['upload_option'] = 'upload_paths' data['filesystem_paths'] = soft_fullpath data['create_type'] = 'file' data['link_data_only'] = 'link_to_files' #data['server_dir'] = '/'.join( path.split('/')[:-1] ) url = api_url + "libraries/%s/contents" % library_id print url print data libset = submit(api_key, url , data, return_formatted = False) cursor.execute( "insert into files_imported(path,importtime) values('%s',datetime('now'))" % filepath ) connection.commit() print libset else: print "File already imported" , filepath
def create_sequencer_configuration( key, base_url, request_form_filename, sample_form_filename, request_type_filename, email_addresses, return_formatted=True ): # create request_form data = {} data[ 'xml_text' ] = open( request_form_filename ).read() request_form = submit( key, "%sforms" % base_url, data, return_formatted=False )[0] # create sample_form data = {} data[ 'xml_text' ] = open( sample_form_filename ).read() sample_form = submit( key, "%sforms" % base_url, data, return_formatted=False )[0] # get user ids user_ids = [ user['id'] for user in get( key, "%susers" % base_url ) if user['email'] in email_addresses ] # create role, assign to user data = {} data[ 'name' ] = "request_type_role_%s_%s_%s name" % ( request_form['id'], sample_form['id'], '_'.join( email_addresses ) ) data[ 'description' ] = "request_type_role_%s_%s_%s description" % ( request_form['id'], sample_form['id'], '_'.join( email_addresses ) ) data[ 'user_ids' ] = user_ids role_ids = [ role[ 'id' ] for role in submit( key, "%sroles" % base_url, data, return_formatted=False ) ] # create request_type data = {} data[ 'request_form_id' ] = request_form[ 'id' ] data[ 'sample_form_id' ] = sample_form[ 'id' ] data[ 'role_ids' ] = role_ids data[ 'xml_text' ] = open( request_type_filename ).read() return submit( key, "%srequest_types" % base_url, data, return_formatted=return_formatted ) # create and print out results for request type
def main(options): """Collect all user data and export the repository via the Tool Shed API.""" base_tool_shed_url = options.tool_shed_url.rstrip('/') repositories_url = '%s/api/repositories' % base_tool_shed_url data = {} data['tool_shed_url'] = base_tool_shed_url data['name'] = options.name data['owner'] = options.owner data['changeset_revision'] = options.changeset_revision data['export_repository_dependencies'] = options.export_repository_dependencies repository_id = None repositories = display(repositories_url, api_key=None, return_formatted=False) for repository in repositories: name = str(repository['name']) owner = str(repository['owner']) if name == options.name and owner == options.owner: repository_id = repository['id'] break if repository_id: # We'll currently support only gzip-compressed tar archives. file_type = 'gz' url = '%s%s' % (base_tool_shed_url, '/api/repository_revisions/%s/export' % str(repository_id)) export_dict = submit(url, data, return_formatted=False) error_messages = export_dict['error_messages'] if error_messages: print("Error attempting to export revision ", options.changeset_revision, " of repository ", options.name, " owned by ", options.owner, ":\n", error_messages) else: export_repository_dependencies = string_as_bool(options.export_repository_dependencies) repositories_archive_filename = \ generate_repository_archive_filename(base_tool_shed_url, options.name, options.owner, options.changeset_revision, file_type, export_repository_dependencies=export_repository_dependencies, use_tmp_archive_dir=False) download_url = export_dict['download_url'] download_dir = os.path.abspath(options.download_dir) file_path = os.path.join(download_dir, repositories_archive_filename) src = requests.get(download_url, stream=True) with open(file_path, 'wb') as dst: for chunk in src.iter_content(chunk_size=CHUNK_SIZE): if chunk: dst.write(chunk) print("Successfully exported revision ", options.changeset_revision, " of repository ", options.name, " owned by ", options.owner) print("to location ", file_path) else: print("Invalid tool_shed / name / owner .")
def main( options ): api_key = options.api_key if api_key: if options.tool_shed_url and options.name and options.owner: base_tool_shed_url = options.tool_shed_url.rstrip( '/' ) data = {} data[ 'tool_shed_url' ] = base_tool_shed_url data[ 'name' ] = options.name data[ 'owner' ] = options.owner url = '%s%s' % ( base_tool_shed_url, '/api/repositories/remove_repository_registry_entry' ) response_dict = submit( url, data, api_key=api_key, return_formatted=False ) print response_dict else: print "Invalid tool_shed: ", base_tool_shed_url, " name: ", options.name, " or owner: ", options.owner, "." else: print "An API key for an admin user in the Tool Shed is required to remove entries from the Tool Shed's repository registry."
def main(options): api_key = options.api_key if api_key: if options.tool_shed_url and options.name and options.owner: base_tool_shed_url = options.tool_shed_url.rstrip('/') data = {} data['tool_shed_url'] = base_tool_shed_url data['name'] = options.name data['owner'] = options.owner url = '%s%s' % (base_tool_shed_url, '/api/repositories/add_repository_registry_entry') response_dict = submit(url, data, api_key=api_key, return_formatted=False) print(response_dict) else: print("Invalid tool_shed: ", base_tool_shed_url, " name: ", options.name, " or owner: ", options.owner, ".") else: print("An API key for an admin user in the Tool Shed is required to add entries into the Tool Shed's repository registry.")
def main(): print 'Galaxy API URL: %s' % api_url print 'Galaxy API Key: %s' % api_key print 'Library to create: %s' % library_to_create print '' libs = display(api_key, api_url + '/api/libraries', return_formatted=False) for library in libs: if library['name'] == library_to_create: print 'Library already exists.' sys.exit(1) data = {} data['name'] = library_to_create result = submit(api_key, api_url + "/api/libraries", data, return_formatted = False) if not result['id'] == 0: print 'Library created.'
def main(): print 'Galaxy API URL: %s' % _api_url print 'Galaxy API Key: %s' % _api_key print 'File: %s' % _file print 'Library: %s' % _libraryPath print '' if not os.path.isfile(_file): print 'Unable to location file' sys.exit(1) fields = split(_libraryPath, '/') libraryName = fields[0] if len(fields) == 1: folderPath = '/' else: sep = '/' folderPath = '/' + sep.join(fields[1:]) library = getGalaxyLibrary(libraryName) folder = getGalaxyFolderFromLibrary(library, folderPath) if isFileInGalaxyFolder(folder, _file): print 'File already exists in Galaxy library' sys.exit(1) print 'Adding %s to %s' % (_file, _libraryPath) data = {} data['folder_id'] = folder['id'] data['create_type'] = 'file' data['file_type'] = 'auto' data['dbkey'] = '' data['upload_option'] = 'upload_paths' data['filesystem_paths'] = _file data['link_data_only'] = 'link_to_files' libset = submit(_api_key, _api_url + "/api/libraries/%s/contents" % library['id'], data, return_formatted = False) for lib in libset: file_metadata = display(_api_key, _api_url + '/api/libraries/datasets/%s' % lib['id'], return_formatted = False) while file_metadata['state'] == 'running' or file_metadata['state'] == 'queued': print 'State is %s. Sleep for 5 seconds.' % file_metadata['state'] time.sleep(5) file_metadata = display(_api_key, _api_url + '/api/libraries/datasets/%s' % lib['id'], return_formatted = False) print 'State is %s' % file_metadata['state']
def load_file(fullpath, api_key, api_url, library_id, library_folder_id, uuid_field=None): data = {} data['folder_id'] = library_folder_id data['file_type'] = 'auto' data['dbkey'] = '' data['upload_option'] = 'upload_paths' data['filesystem_paths'] = fullpath data['create_type'] = 'file' data['link_data_only'] = 'link_to_files' handle = open( fullpath + ".json" ) smeta = handle.read() handle.close() ext_meta = json.loads(smeta) data['extended_metadata'] = ext_meta if uuid_field is not None and uuid_field in ext_meta: data['uuid'] = ext_meta[uuid_field] libset = submit(api_key, api_url + "libraries/%s/contents" % library_id, data, return_formatted = True) print libset
def add_to_library( self ): ''' This method adds the dataset file to the target data library & folder by opening the corresponding url in Galaxy server running. ''' self.update_status( SampleDataset.transfer_status.ADD_TO_LIBRARY ) try: data = {} data[ 'folder_id' ] = 'F%s' % api.encode_id( self.config_id_secret, self.folder_id ) data[ 'file_type' ] = 'auto' data[ 'server_dir' ] = self.server_dir data[ 'dbkey' ] = '' data[ 'upload_option' ] = 'upload_directory' data[ 'create_type' ] = 'file' url = "http://%s/api/libraries/%s/contents" % ( self.galaxy_host, api.encode_id( self.config_id_secret, self.library_id ) ) log.debug( str( ( self.api_key, url, data ) ) ) retval = api.submit( self.api_key, url, data, return_formatted=False ) log.debug( str( retval ) ) except Exception, e: self.error_and_exit( str( e ) )
def uploadLibData(exp_info, library_id, lib_folder_id, path): format = exp_info['format'] api_key = exp_info['api_key'] api_url = exp_info['api_url'] lib_folder_name = exp_info['target_lib_folder_name'] #dict_file = {} #i = 0 #libset = display(api_key, api_url + "libraries/%s/contents" % library_id, return_formatted = False) #for lib in libset: # if lib['type']=='folder':continue # i += 1 #print 'Upload files to lib [%s]...\n'%lib_folder_name #logPrint('Upload files to lib [%s]...' %lib_folder_name) for fname in os.listdir(path): if not checkFormat(fname, format): continue res = REGEX_EFR.search(fname) if not res: continue fullpath = os.path.join(path, fname) if os.path.isfile(fullpath): data = {} data['folder_id'] = lib_folder_id data['file_type'] = format data['dbkey'] = '' data['upload_option'] = 'upload_paths' data['filesystem_paths'] = fullpath data['create_type'] = 'file' #data['link_data_only'] = 'link_to_files' libset = submit(api_key, api_url + "libraries/%s/contents" % library_id, data, return_formatted=False) #dict_file[fname] = libset logPrint('Uploading [%s]...' % fname) time.sleep(20) return 0
def main(options): api_key = options.api from_tool_shed = options.from_tool_shed.rstrip('/') to_tool_shed = options.to_tool_shed.rstrip('/') # Get the categories from the specified Tool Shed. url = '%s/api/categories' % from_tool_shed category_dicts = get(url) create_response_dicts = [] for category_dict in category_dicts: name = category_dict.get('name', None) description = category_dict.get('description', None) if name is not None and description is not None: data = dict(name=name, description=description) url = '%s/api/categories' % to_tool_shed try: response = submit(url, data, api_key) except Exception as e: response = str(e) print("Error attempting to create category using URL: ", url, " exception: ", e) create_response_dict = dict(response=response) create_response_dicts.append(create_response_dict)
def main(options): api_key = options.api from_tool_shed = options.from_tool_shed.rstrip('/') to_tool_shed = options.to_tool_shed.rstrip('/') # Get the users from the specified Tool Shed. url = '%s/api/users' % from_tool_shed user_dicts = get(url) create_response_dicts = [] for user_dict in user_dicts: username = user_dict.get('username', None) if username is not None: email = '*****@*****.**' % username password = '******' data = dict(email=email, password=password, username=username) url = '%s/api/users' % to_tool_shed try: response = submit(url, data, api_key) except Exception, e: response = str(e) print "Error attempting to create user using URL: ", url, " exception: ", str( e) create_response_dict = dict(response=response) create_response_dicts.append(create_response_dict)
#!/usr/bin/env python from __future__ import print_function import os import sys from common import submit try: assert sys.argv[3] data = {} data['from_ld_id'] = sys.argv[3] except IndexError: print('usage: %s key url library_file_id' % os.path.basename( sys.argv[0] )) print(' library_file_id is from /api/libraries/<library_id>/contents/<library_file_id>') sys.exit( 1 ) submit( sys.argv[1], sys.argv[2], data )
''' The first known prime found to exceed one million digits was discovered in 1999, and is a Mersenne prime of the form 2^6972593-1; it contains exactly 2,098,960 digits. Subsequently other Mersenne primes, of the form 2^p-1, have been found which contain more digits. However, in 2004 there was found a massive non-Mersenne prime which contains 2,357,207 digits: 28433*2^7830457+1. Find the last ten digits of this prime number. ''' import common # pow(b, e, m) returns b**e % m # With m a power of 10, we can extract trailing digits. MODULUS = 10**10 common.submit((28433 * pow(2, 7830457, MODULUS) + 1) % MODULUS, expected=8739992577)
def main(options): api_key = options.api base_galaxy_url = options.galaxy_url.rstrip('/') url = '%s/api/tool_shed_repositories/reset_metadata_on_installed_repositories' % base_galaxy_url submit(options.api, url, {})
''' The four adjacent digits in the 1000-digit number that have the greatest product are 9*9*8*9 = 5832. Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product? ''' DIGITS = "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450" import common def euler008(num_digits): # TODO(durandal): rolling window, split by '0' return max( common.product([int(d) for d in DIGITS[i:i + num_digits]]) for i in range(len(DIGITS) - num_digits)) common.assertEquals(5832, euler008(4)) common.submit(euler008(13), expected=23514624000)