def irods_cache_save(f, path, *dest): cache_filename = os.path.join(IRODS_CACHE, path[1:]) _mkdir(os.path.dirname(cache_filename)) with open(cache_filename, 'wb') as fw: copyfile(f, fw, *dest) return cache_filename
def __init__(self, server_url): super(featuresController, self).__init__(server_url) self.baseurl = server_url _mkdir(FEATURES_TABLES_FILE_DIR) log.debug('importing features') self.docs = FeatureDoc()
def setUp(): global results_location global store_local_location global file1_location global filename1 global bqsession global FeatureResource config = ConfigParser.ConfigParser() config.read('setup.cfg') root = config.get('Host', 'root') or 'localhost:8080' user = config.get('Host', 'user') or 'test' pwd = config.get('Host', 'password') or 'test' results_location = config.get('Store', 'results_location') or 'Results' _mkdir(results_location) store_location = config.get('Store', 'location') or None if store_location is None: raise NameError('Requre a store location to run test properly') store_local_location = config.get('Store', 'local_location') or 'SampleData' filename1 = config.get('Store','filename1') or None if filename1 is None: raise NameError('Requre an image to run test properly') file1_location = fetch_file(filename1, store_location, store_local_location) FeatureResource = namedtuple('FeatureResource',['image','mask','gobject']) FeatureResource.__new__.__defaults__ = (None, None, None) #start session bqsession = BQSession().init_local(user, pwd, bisque_root=root, create_mex=False)
def preview(self, model_uniq, args): class_id = int(args.get('class', -1)) sample_id = int(args.get('sample', -1)) log.debug('Preview model %s, class_id %s, sample_id %s', model_uniq, class_id, sample_id) # if class_id < 0: # response.headers['Content-Type'] = 'text/xml' # return '<model />' # if sample_id < 0: # response.headers['Content-Type'] = 'text/xml' # return '<class />' # if asking for a sample from a specific class if class_id >= 0 and sample_id >= 0: # load thumbnail for requested class and sample_id model = self.get_model(model_uniq) path = os.path.join(model.path, 'class_{0:05d}'.format(class_id)) thumbnail = os.path.join(path, 'sample_{0:05d}.png'.format(sample_id)) if not os.path.exists(thumbnail): _mkdir(path) model.cache_sample_preview(class_id, sample_id, thumbnail) # if asking for a model's thumbnail elif class_id < 0 or sample_id < 0: model = self.get_model(model_uniq) thumbnail = '%s/thumbnail.svg' % model.path return forward( FileApp( thumbnail, content_type='image/png', #content_disposition=disposition, ).cache_control(max_age=60 * 60 * 24 * 7 * 6)) # 6 weeks
def push(self, fp, storeurl, uniq=None): "Push a local file (file pointer) to the store" log.debug('local.push: url=%s', storeurl) origpath = localpath = url2localpath(storeurl) fpath,ext = os.path.splitext(origpath) _mkdir (os.path.dirname(localpath)) uniq = uniq or make_uniq_code() for x in xrange(len(uniq)-7): #for x in range(100): if not os.path.exists (localpath): log.debug('local.write: %s -> %s' , tounicode(storeurl), tounicode(localpath)) #patch for no copy file uploads - check for regular file or file like object try: move_file (fp, localpath) except OSError as e: if not os.path.exists (localpath): log.exception ("Problem moving file to%s ", localpath) else: log.error ("Problem moving file, but it seems to be there.. check permissions on store") #log.debug ("local.push: top = %s path= %s",self.top_path, localpath ) ident = localpath[len(self.top_path):] #if ident[0] == '/': # ident = ident[1:] ident = localpath2url(ident) log.info('local push blob_id: %s -> %s', tounicode(ident), tounicode(localpath)) return ident, localpath localpath = "%s-%s%s" % (fpath , uniq[3:7+x] , ext) #localpath = "%s-%04d%s" % (fpath , x , ext) log.warn ("local.write: File exists... trying %s", tounicode(localpath)) raise DuplicateFile(localpath)
def classify(self, image, model, args): ''' segment image produicing a semantic mask ''' num_points = int(args.get('points', 10)) border = int(args.get('border', 0)) my_goodness = float(args.get('goodness', model.minimum_goodness*100))/100.0 my_accuracy = float(args.get('accuracy', model.minimum_accuracy*100)) my_confidence = float(args.get('confidence', 0)) # color output mode color_mode = args.get('colors', 'ids') if color_mode not in self.color_modes: raise ConnoisseurException(responses.BAD_REQUEST, 'Requested color mode "%s" is not supported'%color_mode) # compute output file name and test cached result workdir = args['_workdir'] _mkdir (workdir) filename = '%s_%s_conf%.2f_a%s_c%s_n%s_b%s.png'%(image.uniq, color_mode, my_goodness, my_accuracy, my_confidence, num_points, border) output_file = os.path.join(workdir, filename) with Locks(None, output_file, failonexist=True) as l: if l.locked: # the file is not being currently written by another process self.do_classify(image, model, args, output_file, color_mode) # return results if os.path.exists(output_file): with Locks(output_file): pass return DataToken(data=output_file, mime='image/png', name='Segments', filename=filename)
def __init__(self, feature): self.feature = feature _mkdir(self.feature.path) self.table_plan = [] self.table_parameters = [] self.table_plan.append( self.TablePlan('values', self.feature.cached_columns(), ['idnumber']))
def fetch_file(self, filename): _mkdir(local_store_images) _mkdir(local_store_tests) url = posixpath.join(url_image_store, filename).encode('utf-8') path = os.path.join(local_store_images, filename) if not os.path.exists(path): urllib.urlretrieve(url, path) return path
def get_sample_preview_paths(self, template_path, template_filename, num_per_class=10): paths = {} for orig_id,c in self.classes_model_by_original_id.iteritems(): path = template_path.format(orig_id) _mkdir (path) class_id = c['id'] for i in range(num_per_class): filename = template_filename.format(i) paths['{0}.{1}'.format(class_id,i)] = os.path.join(path, filename) return paths
def irods_cache_save(f, path, cache, *dest): cache_filename = os.path.join(cache, path[1:]) _mkdir(os.path.dirname(cache_filename)) with Locks(None, cache_filename, failonexist=True) as l: if l.locked: with open(cache_filename, 'wb') as fw: copyfile(f, fw, *dest) with Locks(cache_filename): return cache_filename
def stores(config): samples = config.store.samples_url inputs = config.store.input_dir results = config.store.results_dir _mkdir(results) files = [] for name in [x.strip() for x in config.store.files.split()]: print "Fetching", name files.append(LocalFile(name, fetch_file(name, samples, inputs))) return Bunch(samples=samples, inputs=inputs, results=results, files=files)
def format(self, token, args): """ converts table to HDF5 """ # GobsTable = np.dtype([ # ('gobject', tables.StringCol(10)), # ('type', tables.StringCol(50)), # ('vertices', tables.VLArray()), # Col.from_atom(atom, pos=None) # ('accuracy', tables.Float32Col()), # ('goodness', tables.Float32Col()), # ('confidence', tables.Float32Col()), # ]) data = { 'gobject': [], 'type': [], 'vertices': [], 'accuracy': [], 'goodness': [], 'confidence': [], #'color': [], } for i, r in enumerate(token.data): m_g, m_a, m_c = compute_measures (r) data['gobject'].append(r['gob']) data['type'].append(r['label']) data['vertices'].append([ (v[1],v[0]) for v in r['vertex'] ]) data['accuracy'].append(m_a) data['goodness'].append(m_g) data['confidence'].append(m_c) #data['color'].append(get_color_html(r['id'])) df = pd.DataFrame(data) workdir = args['_workdir'] _mkdir (workdir) filename = '%s_%s.h5'%(token.name, args['_filename']) output_file = os.path.join(workdir, filename) with Locks(None, output_file, failonexist=True) as l: if l.locked: # the file is not being currently written by another process df.to_hdf(output_file, 'table', append=False) # return results if os.path.exists(output_file): with Locks(output_file): pass return token.setFile(path=output_file, mime=self.mime_type, filename=filename)
def fetch_file(filename, url, dir): """ @param filename: name of the file fetching from the store @param url: url of the store @param dir: the directory the file will be placed in @return the local path to the file """ _mkdir(url) _mkdir(dir) url = posixpath.join(url, filename) path = os.path.join(dir, filename) if not os.path.exists(path): urllib.urlretrieve(url, path) return path
def __init__(self, server_url): super(ImageServiceController, self).__init__(server_url) workdir = config.get('bisque.image_service.work_dir', data_path('workdir')) rundir = config.get('bisque.paths.run', os.getcwd()) _mkdir(workdir) log.info('ROOT=%s work=%s run = ', config.get('bisque.root'), workdir) self.user_map = {} # users = data_service.query('user', wpublic=1) # for u in users.xpath('user'): # self.user_map[u.get('uri')] = u.get('name') self.srv = ImageServer(work_dir=workdir, run_dir=rundir)
def irods_fetch_file_IGET(url, **kw): chk_cache() ic = IrodsConnection(url, **kw) log.debug("irods-path %s" % ic.path) localname = irods_cache_fetch(ic.path) if localname is None: with ic: log.debug("irods_fetching %s -> %s", url, ic.path) localname = irods_cache_name(ic.path) _mkdir(os.path.dirname(localname)) log.info('irods %s', ['iget', ic.path, localname]) retcode = subprocess.call(['iget', ic.path, localname]) if retcode: raise IrodsError("can't read from %s %s %s error (%s)" % (url, ic.path, localname, retcode)) return localname
def ensureWorkPath(self, path, image_id, user_name, series=0): """path may be a workdir path OR an original image path to transformed into a workdir path """ # change ./imagedir to ./workdir if needed path = os.path.realpath(path) workpath = os.path.realpath(self.workdir) if image_id and not path.startswith (workpath): path = self.initialWorkPath(image_id, user_name, series=series) # keep paths relative to running dir to reduce file name size try: path = os.path.relpath(path, self.rundir) except ValueError: pass # make sure that the path directory exists _mkdir( os.path.dirname(path) ) return path
def savefile(self, **kw): log.info("savefile request " + str(tg.request)) username = get_username() # check the user identity here and return 401 if fails if anonymous(): response.status_int = 401 log.debug('Access denied') return 'Access denied' # if requested test for uploaded hashes_str = kw.pop('hashes', None) if hashes_str != None: all_hashes = [fhash.strip() for fhash in hashes_str.split(',')] #for fhash in hashes_str.split(','): # all_hashes.append( fhash ) #found_hashes = blob_service.files_exist(all_hashes) TODO found_hashes = [] found_html = ",".join([str(h) for h in found_hashes]) return "Found: " + found_html # here user is authenticated - upload if not 'upload' in kw: response.status_int = 501 return "No file to be uploaded..." upload = kw['upload'] uploadroot = config.get('bisque.image_service.upload_dir', data_path('uploads')) upload_dir = uploadroot + '/' + str(username) _mkdir(upload_dir) if not upload.filename: return 'No file sent...' #patch for no copy file uploads - check for regular file or file like object uploadpath = upload_dir + '/' + upload.filename #KGK: note upload.file.name is not available for some uploads (attached files) #abs_path_src = os.path.abspath(upload.file.name) #if os.path.isfile(abs_path_src): # shutil.move(abs_path_src, uploadpath) #else: with open(uploadpath, 'wb') as trg: shutil.copyfileobj(upload.file, trg) return 'Upload done for: ' + upload.filename
def __init__(self, server_url): super(ConnoisseurController, self).__init__(server_url) self.basepath = os.path.dirname(inspect.getfile( inspect.currentframe())) self.workdir = config.get('bisque.connoisseur.models', data_path('connoisseur')) #_mkdir (self.workdir) self.path_models = os.path.join(self.workdir, 'models') self.path_templates = os.path.join(self.workdir, 'templates') self.path_images = os.path.join(self.workdir, 'images') _mkdir(self.path_models) _mkdir(self.path_templates) _mkdir(self.path_images) self.adapters_gobs = PluginManager( 'adapters_gobs', os.path.join(self.basepath, 'adapters_gobs'), AdapterGobjectsBase) self.adapters_pixels = PluginManager( 'adapters_pixels', os.path.join(self.basepath, 'adapters_pixels'), AdapterPixelsBase) # frameworks available in the system self.frameworks = PluginManager( 'frameworks', os.path.join(self.basepath, 'frameworks'), FrameworkBase) # classifiers available to the system, requested with method argument self.classifiers = PluginManager( 'classifiers', os.path.join(self.basepath, 'classifiers'), ClassifierBase) # importers are matched by their input mime and output mime self.importers = PluginManager( 'importers', os.path.join(self.basepath, 'importers'), ImporterBase) self.importers_by_mime = { '%s;%s' % (e.mime_input, e.mime_type): e for k, e in self.importers.plugins.iteritems() } #log.debug('Importers: %s', self.importers_by_mime) # exporters are matched by their input mime and output mime self.exporters = PluginManager( 'exporters', os.path.join(self.basepath, 'exporters'), ExporterBase) self.exporters_by_mime = { '%s;%s' % (e.mime_input, e.mime_type): e for k, e in self.exporters.plugins.iteritems() } #log.debug('Exporters: %s', self.exporters_by_mime) # loaded models hashed by their ID self.models_loaded_max = 10 self.models = {} log.info('Connoisseur service started, using temp space at: %s' % self.workdir)
def s3_cache_save(f, bucket, key, cache, creds): cache_filename = os.path.join(cache, key) _mkdir(os.path.dirname(cache_filename)) #patch for no copy file uploads - check for regular file or file like object abs_path_src = os.path.abspath(f.name) if os.path.isfile(abs_path_src): #f.close() #patch to make file move possible on windows #shutil.move(abs_path_src, cache_filename) copy_link (abs_path_src, cache_filename) else: with open(cache_filename, 'wb') as fw: shutil.copyfileobj(f, fw) if s3q: s3q.enqueue (s3_upload , bucket, key, cache_filename, creds, timeout="6h") else: s3_upload( bucket, key, cache_filename, creds) # file_size = os.path.getsize(cache_filename) # if file_size < 60 * 1e6: # log.debug ("PUSH normal") # k = Key(bucket) # k.key = key # k.set_contents_from_filename(cache_filename) # else: # log.debug ("PUSH multi") # chunk_size = 52428800 #50MB # chunk_count = int(math.ceil(file_size / float(chunk_size))) # mp = bucket.initiate_multipart_upload(key) # for i in range(chunk_count): # offset = chunk_size * i # bytes = min(chunk_size, file_size - offset) # with FileChunkIO(cache_filename, 'r', offset=offset, bytes=bytes) as fp: # mp.upload_part_from_file(fp, part_num=i + 1) # mp.complete_upload() return cache_filename
def s3_cache_fetch(bucket, key, cache, creds, blocking): cache_filename = os.path.join(cache, key) if os.path.exists(cache_filename): return cache_filename _mkdir(os.path.dirname(cache_filename)) if s3q and not blocking: log.debug ("Queuing download of %s", cache_filename) job = s3q.enqueue(s3_download, bucket, key, cache_filename, creds, blocking, timeout="6h") for ix in range (5): if job.result == None: time.sleep(1) else: return job.result # cache_filename # jobs wait has time out. log.debug ("download timedout") raise FileLocked else: cache_filename = s3_download( bucket, key, cache_filename, creds, blocking) if cache_filename is None and not blocking: raise FileLocked return cache_filename
def start_nounicode_win(ifnm, command): if isascii(ifnm): return command, None ext = os.path.splitext(ifnm)[1] uniq = hashlib.md5('%s%s'%(ifnm.encode('ascii', 'xmlcharrefreplace'),datetime.datetime.now())).hexdigest() # preserve drive letter to create hard link on the same drive # dima: os.path.join does not join drive letters correctly tmp_path = os.path.splitdrive(ifnm)[0] if tmp_path != '': tmp_path = '%s\\temp'%tmp_path _mkdir(tmp_path) tmp = str(os.path.join(tmp_path, 'bq_temp_%s%s'%(uniq, ext))) log.debug('start_nounicode_win hardlink: [%s] -> [%s]', ifnm, tmp) try: hardlink(ifnm, tmp) except OSError: log.debug('Failed creating a hard link: %s', tmp) return command, None command = [tmp if x==ifnm else x for x in command] log.debug('Created a new command: %s', command) return command, tmp
def create(self, args): ''' GET /connoisseur/create/template:CaffeNet[/name:SeaCreatures][/dataset:UUID] ''' template = args.get('template') dataset = args.get('dataset') name = args.get('name') log.debug('Create model from: %s', template) tmpls = self.get_templates() if template not in tmpls: raise ConnoisseurException( responses.NOT_FOUND, 'Requested template "%s" not found' % template) path_template = tmpls[template] model = etree.fromstring(self.get_template(template)) if name is not None: model.set('name', name) if dataset is not None: t = model.find('tag[@name="training_set"]') if t is not None: t.set('value', '%s' % dataset) # next post the model document and get its UUID model = data_service.new_resource(resource=model) # finally copy all of the template files over to model storage uniq = model.get('resource_uniq') path_model = os.path.join(self.path_models, uniq) _mkdir(path_model) log.debug('Creating model in: %s', path_model) self.copy_dir(path_template, path_model) response.headers['Content-Type'] = 'text/xml' return etree.tostring(model)
def image2numpy(uri, **kw): """ Converts image url to numpy array. For bisque image_service it changes the format to ome-tiff and reads in the tiff with pylibtiff If the uri does not return a tiff file and then the pillow reader is used instead. @param: takes in an image_url @param: query parameters added to only image service urls @return numpy image """ o = urlparse.urlsplit(uri) if 'image_service' in o.path: #finds image resource though local image service if kw: uri = BQServer().prepare_url(uri, **kw) uri = BQServer().prepare_url(uri, format='OME-BigTIFF') log.debug("Image Service uri: %s" % uri) image_path = image_service.local_file(uri) log.debug("Image Service path: %s" % image_path) if image_path is None: log.debug('Not found in image_service internally: %s' % uri) else: return convert_image2numpy(image_path) _mkdir(FEATURES_TEMP_DIR) with tempfile.NamedTemporaryFile(dir=FEATURES_TEMP_DIR, prefix='image', delete=False) as f: content = fetch_resource(uri) f.write(content) im = convert_image2numpy(f.name) os.remove(f.name) return im
def setup_simple_feature_test(ns): """ Setup feature requests test """ config = ConfigParser.ConfigParser() config.read(CONFIG_FILE) root = config.get('Host', 'root') or DEFAULT_ROOT user = config.get('Host', 'user') or DEFAULT_USER pwd = config.get('Host', 'password') or DEFAULT_PASSWORD results_location = config.get('Store', 'results_dir') or DEFAULT_RESULTS_DIR store_location = config.get('Store', 'location') or None store_local_location = config.get('Store', 'local_dir') or DEFAULT_LOCAL_DIR temp_store = config.get('Store', 'temp_dir') or DEFAULT_TEMPORARY_DIR test_image = config.get('SimpleTest', 'test_image') or None feature_response_results = config.get('SimpleTest', 'feature_response') or DEFAULT_FEATURE_RESPONSE_HDF5 feature_past_response_results = config.get('SimpleTest','feature_sample') or DEFAULT_FEATURE_SAMPLE_HDF5 if store_location is None: raise NameError('Requre a store location to run test properly') if test_image is None: raise NameError('Requre an image to run test properly') _mkdir(store_local_location) _mkdir(results_location) _mkdir(temp_store) results_table_path = os.path.join(results_location, feature_response_results) if os.path.exists(results_table_path): os.remove(results_table_path) test_image_location = fetch_file(test_image, store_location, store_local_location) #initalize session session = BQSession().init_local(user, pwd, bisque_root=root) #set to namespace ns.root = root ns.store_location = store_location ns.session = session ns.results_location = results_location ns.store_local_location = store_local_location ns.test_image_location = test_image_location ns.feature_response_results = feature_response_results ns.feature_past_response_results = feature_past_response_results ns.test_image = test_image ns.temp_store = temp_store
def chk_cache(cache): if not os.path.exists(cache): _mkdir(cache)
def action(self, token, arg): '''arg = l,tnx,tny,tsz''' if not token.isFile(): raise ImageServiceException(400, 'Tile: input is not an image...' ) level=0; tnx=0; tny=0; tsz=512; vs = arg.split(',', 4) if len(vs)>0 and vs[0].isdigit(): level = int(vs[0]) if len(vs)>1 and vs[1].isdigit(): tnx = int(vs[1]) if len(vs)>2 and vs[2].isdigit(): tny = int(vs[2]) if len(vs)>3 and vs[3].isdigit(): tsz = int(vs[3]) log.debug( 'Tile: l:%d, tnx:%d, tny:%d, tsz:%d' % (level, tnx, tny, tsz) ) # if input image is smaller than the requested tile size dims = token.dims or {} width = dims.get('image_num_x', 0) height = dims.get('image_num_y', 0) if width<=tsz and height<=tsz: log.debug('Image is smaller than requested tile size, passing the whole image...') return token # construct a sliced filename ifname = token.first_input_file() base_name = '%s.tiles'%(token.data) _mkdir( base_name ) ofname = os.path.join(base_name, '%s_%.3d_%.3d_%.3d' % (tsz, level, tnx, tny)) hist_name = os.path.join(base_name, '%s_histogram'%(tsz)) # if input image does not contain tile pyramid, create one and pass it along if dims.get('image_num_resolution_levels', 0)<2 or dims.get('tile_num_x', 0)<1: pyramid = '%s.pyramid.tif'%(token.data) command = token.drainQueue() if not os.path.exists(pyramid): #command.extend(['-ohst', hist_name]) command.extend(['-options', 'compression lzw tiles %s pyramid subdirs'%default_tile_size]) log.debug('Generate tiled pyramid %s: from %s to %s with %s', token.resource_id, ifname, pyramid, command ) r = self.server.imageconvert(token, ifname, pyramid, fmt=default_format, extra=command) if r is None: raise ImageServiceException(500, 'Tile: could not generate pyramidal file' ) # ensure the file was created with Locks(pyramid, failonread=(not block_tile_reads)) as l: if l.locked is False: # dima: never wait, respond immediately fff = (width*height) / (10000*10000) raise ImageServiceFuture((15*fff,30*fff)) # compute the number of pyramidal levels # sz = max(width, height) # num_levels = math.ceil(math.log(sz, 2)) - math.ceil(math.log(min_level_size, 2)) + 1 # scales = [1/float(pow(2,i)) for i in range(0, num_levels)] # info = { # 'image_num_resolution_levels': num_levels, # 'image_resolution_level_scales': ',',join([str(i) for i in scales]), # 'tile_num_x': default_tile_size, # 'tile_num_y': default_tile_size, # 'converter': ConverterImgcnv.name, # } # load the number of pyramidal levels from the file info2 = self.server.getImageInfo(filename=pyramid) info = { 'image_num_resolution_levels': info2.get('image_num_resolution_levels'), 'image_resolution_level_scales': info2.get('image_resolution_level_scales'), 'tile_num_x': info2.get('tile_num_x'), 'tile_num_y': info2.get('tile_num_y'), 'converter': info2.get('converter'), } log.debug('Updating original input to pyramidal version %s: %s -> %s', token.resource_id, ifname, pyramid ) token.setImage(ofname, fmt=default_format, dims=info, input=pyramid) ifname = pyramid # compute output tile size dims = token.dims or {} x = tnx * tsz y = tny * tsz if x>=width or y>=height: raise ImageServiceException(400, 'Tile: tile position outside of the image: %s,%s'%(tnx, tny)) # the new tile service does not change the number of z points in the image and if contains all z will perform the operation info = { 'image_num_x': tsz if width-x >= tsz else width-x, 'image_num_y': tsz if height-y >= tsz else height-y, #'image_num_z': 1, #'image_num_t': 1, } #log.debug('Inside pyramid dims: %s', dims) #log.debug('Inside pyramid input: %s', token.first_input_file() ) #log.debug('Inside pyramid data: %s', token.data ) # extract individual tile from pyramidal tiled image if dims.get('image_num_resolution_levels', 0)>1 and dims.get('tile_num_x', 0)>0: # dima: maybe better to test converter, if imgcnv then enqueue, otherwise proceed with the converter path if dims.get('converter', '') == ConverterImgcnv.name: c = self.server.converters[ConverterImgcnv.name] r = c.tile(token, ofname, level, tnx, tny, tsz) if r is not None: if not os.path.exists(hist_name): # write the histogram file is missing c.writeHistogram(token, ofnm=hist_name) # if decoder returned a list of operations for imgcnv to enqueue if isinstance(r, list): #r.extend([ '-ihst', hist_name]) token.histogram = hist_name return self.server.enqueue(token, 'tile', ofname, fmt=default_format, command=r, dims=info) # try other decoders to read tiles ofname = '%s.tif'%ofname if os.path.exists(ofname): return token.setImage(ofname, fmt=default_format, dims=info, hist=hist_name, input=ofname) else: r = None for n,c in self.server.converters.iteritems(): if n == ConverterImgcnv.name: continue if callable( getattr(c, "tile", None) ): r = c.tile(token, ofname, level, tnx, tny, tsz) if r is not None: if not os.path.exists(hist_name): # write the histogram file if missing c.writeHistogram(token, ofnm=hist_name) return token.setImage(ofname, fmt=default_format, dims=info, hist=hist_name, input=ofname) raise ImageServiceException(500, 'Tile could not be extracted')
def s3_fetch_file(bucket, key, cache, creds, blocking): if not os.path.exists(cache): _mkdir (cache) localname = s3_cache_fetch(bucket, key, cache=cache,creds=creds, blocking=blocking) return localname
def irods_cache_name(path): cache_filename = os.path.join(IRODS_CACHE, path[1:]) _mkdir(os.path.dirname(cache_filename)) return cache_filename
def set_path(self, path): self.path = path self.filename = os.path.basename(self.path) _mkdir(os.path.dirname(self.path)) return self
def chk_cache(): if not os.path.exists(IRODS_CACHE): _mkdir(IRODS_CACHE)