def event_handler(): """Handle POST requests by Github Webhooks """ if request.headers.get('X-GitHub-Event') == 'release': repository = request.json.get('repository') release = request.json.get('release') m = Metadata(repository, release) entry = m.getMetadata() if entry: db = get_db() print entry db.execute( ''' INSERT INTO plugins (name, version, version_major, version_minor, version_revision, description, qgis_minimum_version, qgis_maximum_version, homepage, file_name, author_name, download_url, uploaded_by, create_date, update_date, experimental) VALUES (:name, :version, :version_major, :version_minor, :version_revision, :description, :qgis_minimum_version, :qgis_maximum_version, :homepage, :file_name, :author_name, :download_url, :uploaded_by, :create_date, :update_date, :experimental) ''', entry) db.commit() else: return "metadata invalid" return "received valid request" else: return "invalid request"
def pseudo_maxproject_positions_and_tform(posname, md_path, tforms_xy, tforms_z, bitmap, zstart=6, k=2, reg_ref='hybe1'): """ Wrapper for multiple Z codestack where each is max_projection of few frames above and below. """ md = Metadata(md_path) xy = tforms_xy z = tforms_z z = {k: int(np.round(np.mean(v))) for k, v in z.items()} z[reg_ref] = 0 xy[reg_ref] = (0, 0) cstk = [] for seq, hybe, chan in bitmap: t = xy[hybe] zindexes = list(range(zstart - z[hybe] - k, zstart - z[hybe] + k + 1)) print(zindexes) zstk = md.stkread(Channel=chan, hybe=hybe, Position=posname, Zindex=zindexes) zstk = zstk.max(axis=2) zstk = tform_image(zstk, chan, t) cstk.append(zstk) del zstk cstk = np.stack(cstk, axis=2) nf = np.percentile(cstk, 90, axis=(0, 1)) return cstk, nf
def create_metadata(args): fullpath = os.path.join(args.outputdir, args.metadata) if os.path.exists(fullpath): return from importlib import import_module d = import_module('data_formats.' + args.data_format) md = Metadata(inputdir=args.inputdir, input_filter=d.input_filter, treename=d.treename, reweight_events=d.reweight_events, reweight_bins=d.reweight_bins, metadata_events=d.metadata_events, selection=d.selection, var_groups=d.var_groups, var_blacklist=d.var_blacklist, var_no_transform_branches=d.var_no_transform_branches, label_list=d.label_list, reweight_var=d.reweight_var, reweight_classes=d.reweight_classes, reweight_method=d.reweight_method, var_img=d.var_img, var_pos=d.var_pos, n_pixels=d.n_pixels, img_ranges=d.img_ranges, ) md.produceMetadata(fullpath)
def main(folder): if folder.endswith('.mp3'): files = [folder] else: # iterate over all .mp3 files in this directory, and identify them. files = [fullpath(folder, f) for f in os.listdir(folder) if f.endswith('.mp3')] updated = 0 for path in files: print os.path.basename(path) songs = None try: songs = song.identify(path, buckets=['audio_summary']) except EchoNestAPIError, e: print "API Error: %s" % e continue s = songs and songs.pop() if not s: print "Couldn't resolve %s" % path continue m = Metadata(s) m.write_id3(path, create_only=False, replace=False) updated += 1
def FromFile(text, min_period, max_period, logger, vocab=None): print("Warning! This method of loading a Reader from file (Reader.FromFile(...))", "is deprecated, and will be removed from the next update. Use FromCard instead.") # Load a Reader from a file's text string lines = text.splitlines() version = parse_card_line(lines[0]).strip() version = version if len(version.strip()) > 1 else lines[4] logger.info("Dictionary version: {} ({} lines)".format(version, len(lines))) if version == "v4" or version == "v5": return Reader.FromCard(text, vocab, min_period, max_period, logger) # I stopped saving the chat metadata and the cache together elif version == "v3": meta = Metadata.loadl(lines[0:8]) cache = '\n'.join(lines[9:]) vocab = Generator.loads(cache) elif version == "v2": meta = Metadata.loadl(lines[0:7]) cache = '\n'.join(lines[8:]) vocab = Generator.loads(cache) elif version == "dict:": meta = Metadata.loadl(lines[0:6]) cache = '\n'.join(lines[6:]) vocab = Generator.loads(cache) else: meta = Metadata.loadl(lines[0:4]) cache = lines[4:] vocab = Generator(load=cache, mode=Generator.MODE_LIST) # raise SyntaxError("Reader: Metadata format unrecognized.") r = Reader(meta, vocab, min_period, max_period, logger) return r
def update_metadata(args): create_metadata(args) from importlib import import_module d = import_module('data_formats.' + args.data_format) md = Metadata(args.inputdir, treename=d.treename, reweight_events=d.reweight_events, reweight_bins=d.reweight_bins, metadata_events=d.metadata_events, selection=d.selection, var_groups=d.var_groups, var_blacklist=d.var_blacklist, var_no_transform_branches=d.var_no_transform_branches, label_list=d.label_list, reweight_var=d.reweight_var, reweight_classes=d.reweight_classes, var_img=d.var_img, var_pos=d.var_pos, n_pixels=d.n_pixels, img_ranges=d.img_ranges, ) md.loadMetadata(os.path.join(args.outputdir, args.metadata)) if args.remake_filelist: md.updateFilelist(args.test_sample) if args.remake_weights: md.updateWeights(args.test_sample) md.writeMetadata(os.path.join(args.jobdir, args.metadata)) njobs = int(math.ceil(float(sum(md.num_events)) / args.events_per_file)) return md,njobs
def test(self): meta=Metadata() meta.setMetadataPair('dateTime', '20021023') meta.setMetadataPair('browseType','jpeg') mess=self.buildMessage(meta, "rep.browseReport") print "message:%s" % mess return mess
def check_imaging(self): if self.verbose: self.update_user('Checking Imaging') # self.metadata = Metadata(self.metadata_path) self.acqs = [i for i in os.listdir(self.metadata_path) if 'hybe' in i] self.metadata = Metadata(os.path.join(self.metadata_path,self.acqs[0])) self.posnames = self.metadata.image_table[self.metadata.image_table.acq.isin(self.acqs)].Position.unique()
def main(): # Create instances config = Config() epublius = Epublius(config.get_config('epublius')) metadata = Metadata(config.get_config('metadata'), epublius.args) pandoc = Pandoc(config.get_config('pandoc')) # Program execution epublius.unzip_epub() ## Get ebook contents contents = epublius.get_contents() for index, content in enumerate(contents): ## Gather metadata and convert files to HTML5 metadata_path = metadata.get_metadata(contents, index) pandoc.convert_files(content, metadata_path) metadata.cleanup(metadata_path) ## Fix links in content (as the file extension is now .html) epublius.manipulate_links(content) ## Copy media to output folder epublius.copy_files('epub_media') epublius.copy_files('epublius_media') epublius.cleanup()
def photobleach_qc(md,path=True,pos=False): import matplotlib.pyplot as plt if path==True: from metadata import Metadata md = Metadata(md) if pos ==False: pos = md.image_table.Position.iloc[0] for acq in md.image_table[md.image_table.Position==pos].acq.unique(): if 'hybe' in acq: stk = md.stkread(Position=pos,Channel='FarRed',acq=acq) plt.plot(range(stk.shape[2]),np.mean(np.mean(stk,axis=0),axis=0),label=acq) plt.title('FarRed') plt.xlabel('Z index') plt.ylabel('Average Intensity') plt.legend() plt.show() for acq in md.image_table[md.image_table.Position==pos].acq.unique(): if 'hybe' in acq: stk = md.stkread(Position=pos,Channel='Orange',acq=acq) plt.plot(range(stk.shape[2]),np.mean(np.mean(stk,axis=0),axis=0),label=acq) plt.title('Orange') plt.xlabel('Z index') plt.ylabel('Average Intensity') plt.legend() plt.show()
def print_wellcome(): meta = Metadata() print(" ___ __ __ ___ ") print(" / | __ __/ /_____ / |/ /___ _ _____ ") print(" / /| |/ / / / __/ __ \ / /|_/ / __ \ | / / _ \ ") print(" / ___ / /_/ / /_/ /_/ / / / / / /_/ / |/ / __/ ") print("/_/ |_\__,_/\__/\____/ /_/ /_/\____/|___/\___/ ") print(" ______________________________________________________") print("/_____/_____/_____/_____/_____/_____/_____/_____/_____/ v" + meta.get_version()) print( " __ ___ ______ " ) print( " / |/ /___ __ __________ / ____/_ ________________ _____" ) print( " / /|_/ / __ \/ / / / ___/ _ \ / / / / / / ___/ ___/ __ \/ ___/" ) print( " / / / / /_/ / /_/ (__ ) __/ / /___/ /_/ / / (__ ) /_/ / / " ) print( "/_/ /_/\____/\__,_/____/\___/ \____/\__,_/_/ /____/\____/_/ " ) print( " __________________________________________________________________" ) print( "/_____/_____/_____/_____/_____/_____/_____/_____/_____/_____/_____/" ) print("\n *Keep open during your game session \n\n")
def load_stack(self): self.metadata = Metadata(self.metadata_path) try: self.stk = self.metadata.stkread(Position=self.posname,Channel=self.channel,hybe=self.hybe).astype(float) except: """ Issue with imaging""" self.stk = None self.completed = True self.utilities.save_data('Imaging Failed', Dataset=self.dataset, Position=self.posname, Hybe=self.hybe, Channel=self.channel, Type='log') self.utilities.save_data('Failed', Dataset=self.dataset, Position=self.posname, Hybe=self.hybe, Channel=self.channel, Type='flag') self.utilities.save_data('Registration Failed', Dataset=self.dataset, Position=self.posname, Hybe=self.hybe, Type='log') self.utilities.save_data('Failed', Dataset=self.dataset, Position=self.posname, Hybe=self.hybe, Type='flag') self.utilities.save_data(str(self.hybe)+' Failed', Dataset=self.dataset, Position=self.posname, Type='log') self.utilities.save_data('Failed', Dataset=self.dataset, Position=self.posname, Type='flag') if self.verbose: self.update_user('Loading Stack') # Should add a way to exclude cells from bead find # Filter Out Low Frequency Background # Filter Out High Frequency Noise self.create_hotpixel_kernel() if self.verbose: iterable = tqdm(range(self.stk.shape[2]),desc='Processing Stack') else: iterable = range(self.stk.shape[2]) for i in iterable: self.stk[:,:,i] = self.process_image(self.stk[:,:,i]) # Threshold to prevent False Positive Bead Calls # thresh = np.percentile(self.stk.ravel(),99.9) # self.stk = self.stk-thresh # self.stk[self.stk<0] = 0 # # Blur to Ensure Clean Center # for i in range(self.stk.shape[2]): # self.stk[:,:,i] = gaussian_filter(self.stk[:,:,i],2) if self.two_dimensional: self.stk = self.stk.mean(axis=2)
def load_data(self): if self.overwrite: if self.verbose: self.update_user('Overwriting Processing') self.proceed = True if self.proceed: """ Load Metadata """ if self.verbose: self.update_user('Loading Metadata') self.metadata = Metadata(os.path.join(self.metadata_path,self.acq)) """ Load Transformations """ if self.verbose: self.update_user('Loading Transformation') self.translation = self.fishdata.load_data('tforms', dataset=self.dataset, posname=self.posname, hybe=self.hybe) self.translation_x = self.translation['x'] self.translation_y = self.translation['y'] self.translation_z = 0#int(round(self.translation['z'])) """ Calculate Zindexes """ self.k = self.parameters['projection_k'] if self.two_dimensional: zindexes = [0] else: zindexes = list(range(self.zindex-self.k+self.translation_z,self.zindex+self.k+self.translation_z+1)) # Might be issues if the z transformation is too large """ Loading Images """ if self.verbose: self.update_user('Loading Sub Stack') try: if self.two_dimensional: """ Use all zindexes""" self.sub_stk = self.metadata.stkread(Position=self.posname, Channel=self.channel, verbose=self.verbose).astype(self.dtype) else: """ Use some zindexes""" self.sub_stk = self.metadata.stkread(Position=self.posname, Channel=self.channel, Zindex=zindexes, verbose=self.verbose).astype(self.dtype) except: # Translation in z too large for this z index # Just use an average image for this position # Zeros may be an issue here """ use the minimum of all zindexes""" if self.verbose: self.update_user('Using min of image') try: self.sub_stk = self.metadata.stkread(Position=self.posname,hybe=self.hybe,Channel=self.channel,verbose=self.verbose).astype(self.dtype) self.sub_stk = np.min(self.sub_stk,axis=2) except: print('Likely this channel wasnt imaged')
class UnzipAndMergeStreamCatFiles(): def __init__(self): self.metadata = Metadata() def unzip_files(self): file_base = self.metadata.get_file_format() file_types = self.metadata.get_file_types() #regions = ["01", "02","03N", "03S","03W", "04","05", "06","07", "08","09", "10L","10U", "11","12", "13","14","15","16","17","18"] regions = self.metadata.get_regions() for file_type in file_types: for reg in regions: file_name = file_base.format(file_type, reg, "zip") zip_file = os.path.join(file_type, file_name) zip_ref = zipfile.ZipFile(zip_file, 'r') zip_ref.extractall(file_type + "/") zip_ref.close() def merge_divided_files(self): #Only region 3 and region 10 have multiple files that need merged file_base = self.metadata.get_file_format() file_types = self.metadata.get_file_types() for file_type in file_types: file_name03N = os.path.join( file_type, file_base.format(file_type, "03N", "csv")) file_name03S = os.path.join( file_type, file_base.format(file_type, "03S", "csv")) file_name03W = os.path.join( file_type, file_base.format(file_type, "03W", "csv")) file_names = list() file_names.append(file_name03N) file_names.append(file_name03S) file_names.append(file_name03W) merged_csv = pd.concat([pd.read_csv(f) for f in file_names]) merged_csv.to_csv(os.path.join(file_type, file_type + "_Region03.csv"), index=False) file_name10L = os.path.join( file_type, file_base.format(file_type, "10L", "csv")) file_name10U = os.path.join( file_type, file_base.format(file_type, "10U", "csv")) file_names = list() file_names.append(file_name10L) file_names.append(file_name10U) merged_csv = None merged_csv = pd.concat([pd.read_csv(f) for f in file_names]) merged_csv.to_csv(os.path.join(file_type, file_type + "_Region10.csv"), index=False)
def signal_handler(signal, frame): Metadata.save_state() Metadata.stop() Database.stop() Share.stop() if os.path.exists("tmp"): shutil.rmtree("tmp") API.stop() sys.exit(0)
def is_preprocessed(self): """Returns if the raw data is preprocessed. Returns: A boolean indicating if the raw file is preprocessed. """ metadata = Metadata(self._preprocess_dir, bucket=self._preprocess_bucket) return metadata.load()
def retrieveMeta(self, name): """ Return the metadata for the given player instance """ try: proxy = self.bus.get_object(name, "/org/mpris/MediaPlayer2") device_prop = dbus.Interface( proxy, "org.freedesktop.DBus.Properties") prop = device_prop.Get( "org.mpris.MediaPlayer2.Player", "Metadata") try: artist = array_to_string(prop.get("xesam:artist")) except: artist = None try: title = str(prop.get("xesam:title")) except: title = None try: albumArtist = array_to_string(prop.get("xesam:albumArtist")) except: albumArtist = None try: albumTitle = str(prop.get("xesam:album")) except: albumTitle = None try: artURL = str(prop.get("mpris:artUrl")) except: artURL = None try: discNumber = str(prop.get("xesam:discNumber")) except: discNumber = None try: trackNumber = str(prop.get("xesam:trackNumber")) except: trackNumber = None md = Metadata(artist, title, albumArtist, albumTitle, artURL, discNumber, trackNumber) md.playerName = self.playername(name) md.fixProblems() return md except dbus.exceptions.DBusException as e: logging.debug(e)
def batch_write(args): from metadata import Metadata md = Metadata(None) md.loadMetadata(args.metadata) writeData(md, outputdir=args.outputdir, jobid=args.jobid, batch_mode=True, test_sample=args.test_sample, events=args.events_per_file)
def __init__(self, repository): self.repository = repository self.metadata = Metadata( html_url=repository.html_url, description=repository.description, stars=repository.stargazers_count, forks=repository.forks, watchers=repository.watchers ) logging.basicConfig(level=logging.DEBUG)
def init_db(self): # Fetch location location = bn.interaction.get_open_filename_input("Load SimHash database", ".simhash") if not location: bn.log_info("[*] Using default location for SimHash database: {}".format(default_sim_hash_location)) location = default_sim_hash_location # setup metadata class self.sim_hash_location = location self.metadata = Metadata(location+ '.meta')
def __init__(self, root, mountpoint): self.root = root # Retrieve FreyaFS metadata self.metadata = Metadata(os.path.join(root, ".freyafs")) # Keep track of open files self.cache = Cache() print(f"[*] FreyaFS mounted") print(f"Now, through the FreyaFS mountpoint ({mountpoint}), you can use a Mix&Slice encrypted filesystem seemlessly.") print(f"FreyaFS will persist your encrypted data at {root}.")
def __init__(self): self.metadata = Metadata() logging.basicConfig( level=logging.DEBUG, format='%(asctime)s %(name)s - %(levelname)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', filename='streamcat_csv_data.log', filemode='w') self.log_msg("Starting")
def pseudo_maxproject_positions_and_tform(posname, md_path, tforms_xy, tforms_z, bitmap, zstart=6, k=2, reg_ref='hybe1', ndecon_iter=20, nf_init_qtile=95): """ Wrapper for multiple Z codestack where each is max_projection of few frames above and below. """ global flatfield_dict, use_gpu md = Metadata(md_path) xy = tforms_xy z = tforms_z z = {k: int(np.round(np.mean(v))) for k, v in z.items()} z[reg_ref] = 0 xy[reg_ref] = (0, 0) seqs, hybes, channels = zip(*bitmap) psf_map = {'Orange': orange_psf, 'FarRed': farred_psf, 'Green': green_psf} cstk = np.stack([ md.stkread(Channel=chan, hybe=hybe, Position=posname, Zindex=list( range(zstart - z[hybe] - k, zstart - z[hybe] + k + 1))).max(axis=2) for seq, hybe, chan in bitmap ], axis=2) if use_gpu: cstk = [ dogonvole(cstk[:, :, i], psf_map[chan], niter=ndecon_iter) for i, chan in enumerate(channels) ] inputs = [(cstk[i], channels[i], hybes[i], xy[hybes[i]], flatfield_dict[channels[i]]) for i in range(len(cstk))] with multiprocessing.Pool(8) as ppool: cstk = ppool.starmap(pfunc_img_process, inputs) else: print('did not use GPU') inputs = [(cstk[:, :, i], channels[i], hybes[i], xy[hybes[i]], flatfield_dict[channels[i]], ndecon_iter) for i in range(cstk.shape[2])] with multiprocessing.Pool(ncpu) as ppool: cstk = ppool.starmap(pfunc_img_process, inputs) cstk = np.stack(cstk, axis=2) nf = np.percentile(cstk, nf_init_qtile, axis=(0, 1)) return cstk, nf
def _raw_preprocess(self, number_per_slice): """Splits raw data into slices. keep start time of each slice in a json file. Args: number_per_slice: An int of records to keep for each slice. Returns: Error string if an error occurs, None if complete. """ raw_slice_metadata = Metadata( self._preprocess_dir, strategy=None, level=RAW_LEVEL_DIR, bucket=self._preprocess_bucket) raw_data = RawDataProcessor( self._metadata['raw_file'], number_per_slice, self._raw_bucket) slice_index = 0 raw_start_times = list() record_count = 0 timespan_start = timespan_end = -1 while raw_data.readable(): slice_name = utils.get_slice_path( self._preprocess_dir, RAW_LEVEL_DIR, utils.get_slice_name(slice_index)) print("Slice name: " + slice_name) level_slice = LevelSlice( slice_name, bucket=self._preprocess_bucket) raw_slice = raw_data.read_next_slice() print(raw_slice) if isinstance(raw_slice, str): return raw_slice level_slice.save(raw_slice) raw_start_times.append(raw_slice[0][0]) slice_index += 1 record_count += len(raw_slice) if timespan_start == -1: timespan_start = raw_slice[0][0] timespan_end = raw_slice[-1][0] self._metadata['raw_number'] = record_count self._metadata['start'] = timespan_start self._metadata['end'] = timespan_end levels, level_names = self._get_levels_metadata( record_count, timespan_end-timespan_start) self._metadata['levels']['names'] = level_names for name, level in zip(level_names, levels): self._metadata["levels"][name] = level for index, raw_slice_start in enumerate(raw_start_times): raw_slice_metadata[self._metadata['levels'] [RAW_LEVEL_DIR]['names'][index]] = raw_slice_start raw_slice_metadata.save() return None
def main_loop(self): """ Main loop: - monitors state of all players - pauses players if a new player starts palyback """ finished = False md = Metadata() active_players = set() while not(finished): new_player_started = None for p in self.retrievePlayers(): if p not in self.state_table: self.state_table[p] = PlayerState() try: state = self.retrieveState(p).lower() except: logging.info("Got no state from " + p) state = "unknown" self.state_table[p].state = state # Check if playback started on a player that wasn't # playing before if state == PLAYING: if (p not in active_players): new_player_started = p active_players.add(p) md_old = self.state_table[p].metadata md = self.retrieveMeta(p) self.state_table[p].metadata = md if md is not None: if not(md.sameSong(md_old)): self.metadata_notify(md) else: if p in active_players: active_players.remove(p) if new_player_started is not None: if self.auto_pause: logging.info( "new player started, pausing other active players") self.pause_inactive(new_player_started) else: logging.debug("auto-pause disabled") time.sleep(0.2)
def generate(iq, first_frame): log.info("Info generator waiting on first frame...") first_frame.acquire() stime = time.time() log.info("Info generator got first frame! Start time: %2.2f", stime) samples = 0L while True: action = iq.get() action['time'] = stime + (samples / 44100.0) samples += action['samples'] if len(action['tracks']) == 2: m1 = Metadata(action['tracks'][0]['metadata']) s1 = action['tracks'][0]['start'] e1 = action['tracks'][0]['end'] m2 = Metadata(action['tracks'][1]['metadata']) s2 = action['tracks'][1]['start'] e2 = action['tracks'][1]['end'] log.info( "Processing metadata for %d -> %d, (%2.2fs %2.2fs) -> (%2.2fs, %2.2fs).", m1.id, m2.id, s1, s2, e1, e2, uid=m1.id) a = scwaveform.generate([s1, s2], [e1, e2], [m1.color, m2.color], [m1.waveform_url, m2.waveform_url], [m1.duration, m2.duration], action['duration']) else: for track in action['tracks']: metadata = Metadata(track['metadata']) start = track['start'] end = track['end'] log.info("Processing metadata, %2.2fs -> %2.2fs.", start, end, uid=metadata.id) a = scwaveform.generate(start, end, metadata.color, metadata.waveform_url, metadata.duration, action['duration']) action['waveform'] = "data:image/png;base64,%s" % \ base64.encodestring(a) action['width'] = int(action['duration'] * scwaveform.DEFAULT_SPEED) action['unicode'] = u"\x96\x54" yield action
def generate(self, md: metadata.Metadata, artifact: metadata.Artifact, tpl: templates.Templates, args: argparse.Namespace): output = md.path(root='output_web').joinpath('project_index.html') print(f'Generating project index at {output}') promos = md.path( root='output_meta').joinpath('tracked_promotions.json') tracked_promos = json.loads( promos.read_text('utf-8')) if promos.exists() else {} tpl.env.filters['maventopath'] = lambda p: metadata.mvn_to_path( md, p, root='empty_root') template = tpl.env.get_template('project_index.html') output.write_text(template.render(md=md, promos=tracked_promos), 'utf-8')
def md_update(self, msg): self.logger.info("Updating metadata for: '%s'" % msg["MDATA_KEY_FILEPATH"]) md_path = msg["MDATA_KEY_FILEPATH"] try: Metadata.write_unsafe(path=md_path, md=msg) except BadSongFile as e: self.logger.info("Cannot find metadata file: '%s'" % e.path) except InvalidMetadataElement as e: self.logger.info("Metadata instance not supported for this file '%s'" % e.path) self.logger.info(str(e)) except Exception as e: # TODO : add md_path to problem path or something? self.fatal_exception("Unknown error when writing metadata to: '%s'" % md_path, e)
def load_image(self,hybe,channel): """ Load Image for image based registration""" ### Need to move from hardcode acq = [i for i in os.listdir(self.metadata_path) if hybe+'_' in i][0] temp_metadata = Metadata(os.path.join(self.metadata_path,acq)) try: stk = temp_metadata.stkread(Position=self.posname,Channel=channel,hybe=hybe) except: """ Issue with imaging""" stk = None self.completed = True self.utilities.save_data('Imaging Failed', Dataset=self.dataset, Position=self.posname, Hybe=self.hybe, Channel=self.channel, Type='log') self.utilities.save_data('Failed', Dataset=self.dataset, Position=self.posname, Hybe=self.hybe, Channel=self.channel, Type='flag') self.utilities.save_data('Registration Failed', Dataset=self.dataset, Position=self.posname, Hybe=self.hybe, Type='log') self.utilities.save_data('Failed', Dataset=self.dataset, Position=self.posname, Hybe=self.hybe, Type='flag') self.utilities.save_data(str(self.hybe)+' Failed', Dataset=self.dataset, Position=self.posname, Type='log') self.utilities.save_data('Failed', Dataset=self.dataset, Position=self.posname, Type='flag') image = stk.mean(axis=2) denoised = gaussian_filter(image,self.image_blur_kernel) background = gaussian_filter(denoised,self.image_background_kernel) image = image.astype(float)-background.astype(float) zscore = image-np.median(image) zscore = image/np.std(image) return zscore
def __init__(self, strings): """ Breaks meta data off of strings, checks metadata to make sure the match, then encodes Arguments: strings -- List of strings """ symbols = [Metadata.fromstring(s) for s in strings] if not len(symbols): raise Exception("No symbols were provided to decode") # Make sure we have padding and k agreement between all symbols self.k = symbols[0][0].k self.padding = symbols[0][0].padding for meta, symbol in symbols: if not (meta.k == self.k): raise Exception("Provided symbols do not have k agreement") if not (meta.padding == self.padding): raise Exception("Provided symbols do not have padding agreement") # Invoke parent's init to set up raptor coding parameters super(StringDecoder, self).__init__(self.k) # Actually add symbols until decoding is possible for meta, symbol in symbols: self.symbols.append((meta.esi, numpy.fromstring(symbol, dtype=config.dtype))) if not self.can_decode(): raise Exception("Unable to decode with the symbols provided.") # Calculate i symbols self.calculate_i_symbols()
def movieInfo(guessData, tmdbid=None, imdbid=None): if not tmdbid and not imdbid: tmdb.API_KEY = tmdb_api_key search = tmdb.Search() title = guessData['title'] if 'year' in guessData: response = search.movie(query=title, year=guessData["year"]) if len(search.results) < 1: response = search.movie(query=title, year=guessData["year"]) else: response = search.movie(query=title) if len(search.results) < 1: return None result = search.results[0] release = result['release_date'] tmdbid = result['id'] log.debug("Guessed filename resulted in TMDB ID %s" % tmdbid) metadata = Metadata(MediaType.Movie, tmdbid=tmdbid, imdbid=imdbid, language=settings.taglanguage, logger=log) log.info("Matched movie title as: %s %s (TMDB ID: %s)" % (metadata.title, metadata.date, tmdbid)) return metadata
def main(): # Create Metadata metadata = Metadata() # Create Replay Buffer g_buffer = ReplayMem(metadata) with Game(metadata) as game: # Create Agent agent = DDQLAgent(game.env.action_space.n) # Load files file_path = Path(Constants.MODEL_PATH) metadata_file = (file_path / "metadata.p") if all([x.validate_load() for x in [g_buffer, agent]]) and metadata_file.is_file(): print("Loading state...") with metadata_file.open("rb") as metadata_file: metadata = pickle.load(metadata_file) # Metadata game.load_metadata(metadata) g_buffer.load(metadata) agent.load() print("Running Eval...") evaluate(game, metadata, agent, 0)
def check_projection(self): if self.verbose: print('Checking Projection Zindexes') self.metadata = Metadata(self.metadata_path) self.len_z = len( self.metadata.image_table[(self.metadata.image_table.Position == self.posname)].Zindex.unique()) if self.projection_function == 'None': self.projection_k = 0 if self.projection_zstart == -1: self.projection_zstart = 0 + self.projection_k elif self.projection_zstart > self.len_z: print('zstart of ', self.projection_zstart, ' is larger than stk range of', self.len_z) raise (ValueError('Projection Error')) if self.projection_zend == -1: self.projection_zend = self.len_z - self.projection_k elif self.projection_zend > self.len_z: print('zend of ', self.projection_zend, ' is larger than stk range of', self.len_z) raise (ValueError('Projection Error')) elif zend < zstart: print('zstart of ', self.projection_zstart, ' is larger than zend of', self.projection_zend) raise (ValueError('Projection Error')) self.zindexes = np.array( range(self.projection_zstart, self.projection_zend, self.projection_zskip))
def __init__(self, title): self.meta = Metadata(title) self.url = self.meta['url'] print('%(title)s from %(url)s' % self.meta) self.content = ''
def __init__(self, book_id, activity): self.book_id = book_id self.activity = activity self.connection = getDictConnection() self.conn = getConnection() self.htmlUtils = HTMLutils() # bring in yaml metadata metadata = Metadata() self.columns = metadata.loadYaml("columns") self.pages = metadata.loadYaml("pages") if activity == "edit": self.report = Report("edit") self.header = "Edit Record" self.page = "edit" self.new_activity = "update" self.button_text = "Submit" self.show_blank = "" self.cancel_button_text = "Cancel" self.cancel_button_address = "detail.py?book_id=%s&activity=view" % book_id elif activity == "view": self.report = Report("record") self.header = "Book Record" self.page = "record" self.new_activity = "edit" self.button_text = "Edit" self.show_blank = "-" self.cancel_button_address = "main.py" self.cancel_button_text = "Back to Catalog" elif activity == "add": self.header = "Enter New Record" self.page = "edit" self.new_activity = "submit_new" self.button_text = "Save" self.show_blank = "" self.cancel_button_address = "main.py" self.cancel_button_text = "Cancel" else: raise Exception("Unrecognized activity: %s" % activity) # build the dictionary of autocomplete lists by self.autoCompleteList = self._getAutoCList()
def _load_data(self): """Loads data for SRL""" # load boundary identification network and reader md_boundary = Metadata.load_from_file('srl_boundary') self.boundary_nn = load_network(md_boundary) self.boundary_reader = create_reader(md_boundary) self.boundary_itd = self.boundary_reader.get_inverse_tag_dictionary() # same for arg classification md_classify = Metadata.load_from_file('srl_classify') self.classify_nn = load_network(md_classify) self.classify_reader = create_reader(md_classify) self.classify_itd = self.classify_reader.get_inverse_tag_dictionary() # predicate detection md_pred = Metadata.load_from_file('srl_predicates') self.pred_nn = load_network(md_pred) self.pred_reader = create_reader(md_pred)
def __init__(self, book_id, activity): self.book_id = book_id self.activity = activity self.connection = getDictConnection() self.conn = getConnection() #bring in yaml metadata metadata = Metadata() self.columns = metadata.loadYaml('columns') self.pages = metadata.loadYaml('pages') self.list = [] if activity == 'edit': self.header = 'Edit Record' self.page = 'edit' self.new_activity = 'update' self.button_text = 'Submit' self.show_blank = '' self.cancel_button_text = 'Cancel' self.cancel_button_address = 'detail.py?book_id=%s&activity=view'\ %book_id if activity == 'view': self.header = 'Book Record' self.page = 'record' self.new_activity = 'edit' self.button_text = 'Edit' self.show_blank = '-' self.cancel_button_address = 'main.py' self.cancel_button_text = 'Back to Catalog' if activity == 'add': self.header = 'Enter New Record' self.page = 'edit' self.new_activity = 'submit_new' self.button_text = 'Save' self.show_blank = '' self.cancel_button_address = 'main.py' self.cancel_button_text = 'Cancel' # build the right query for the page and bring in the data if activity != 'add': self.query = Query() where = 'book.book_id =' + str(self.book_id) self.recordData = self.query.getData(self.page, where)
def Search(self, metadata): ticket = self._search_count self._search_count = self._search_count + 1 thread = BaseTaskThread(onfinish=lambda result: self.do_searchsuccess(self._app, ticket, result), onerror=lambda e: self.do_searchfailure(self._app, ticket, e), target=self.do_search, kwargs={'metadata': Metadata.from_dict(metadata)}) self._search_tasks[ticket] = thread thread.start() return ticket
def run(self): RDthread = None EDthread = None if(self.do_revocation_detection): RDthread = RevocationDetector(self.dbname, self.dbuser, self.dbhost) RDthread.start() if(self.do_expiration_detection): EDthread = ExpirationDetector(self.dbname, self.dbuser, self.dbhost) EDthread.start() if(self.do_revocation_detection): RDthread.join() if(self.do_expiration_detection): EDthread.join() if(self.do_metadata): MDthread = Metadata(self.dbname, self.dbuser, self.dbhost) MDthread.start() MDthread.join()
def __init__(self, **kwargs): self.is_docker_daemon_running() self.test_dir = self._create_temp_dir() self.imageutils = ImageUtils() self.containerutils = ContainerUtils() self.selinux_checks = SELinuxTests() self.selinux_denials_test = SELinuxDenials() self.image_inspection_test = InspectImage() self.container_inspection_test = InspectContainer() self.metadata = Metadata() self._process_kwargs(**kwargs)
def build_report(self): metadata = Metadata() display_data = metadata.interrogateMetadata(self.page, 'display') display_names = display_data['col_attributes'] table = HtmlTable(border=1, cellpadding=3) table.addHeader(['Field', 'Entry']) for column, display in display_names: if self.activity == 'view': # make a simple table, not a form for rec in self.recordData: if rec[column]: data = rec[column] else: data = self.show_blank table.addRow([display, data]) else: #use methods to build form form = self.columns[column][0]['form_type'] # type_method = {'text' :' self.getTextField(column)', # 'drop_down' : 'self.getDropDown(column)', # 'radio_static': 'self.getStaticRadio(column)', # 'autocomplete': 'self.getAutocomplete(column)' # } if form == 'text': form_field =self.getTextField(column) if form == 'drop_down': form_field =self.getDropDown(column) if form == 'radio_static': form_field =self.getStaticRadio(column) if form == 'autocomplete': form_field =self.getAutocomplete(column) table.addRow([display, form_field]) #push final product report = table.getTable() return report
def analyse_packages(self): """ Generate information about all the installed packages. """ section = 'PACKAGES' self.payload['PACKAGES'] = dict() for cpv in Packages.get_installed_CPVs(): metadata = Metadata(cpv) package_info = dict() self.set_data(package_info, section, 'REPO', metadata.get_repo_name) self.set_data(package_info, section, 'SIZE', metadata.get_size) self.set_data(package_info, section, 'KEYWORD', metadata.get_keyword) self.set_data(package_info, section, 'BUILD_TIME', metadata.get_build_time) if self.any_one_is_enabled(section, USE_FLAG_TYPES): # TODO: make this lazier use_flags = metadata.get_use_flag_information() for key in USE_FLAG_TYPES: self.set_data(package_info, section, key, lambda: use_flags[key]) self.payload['PACKAGES'][cpv] = package_info
def __init__(self, strings): """ Breaks meta data off of strings, checks metadata to make sure the match, then encodes Arguments: strings -- List of strings """ symbols = [Metadata.fromstring(s) for s in strings] if not len(symbols): raise Exception("No symbols were provided to decode") # Make sure we have padding and k agreement between all symbols self.k = symbols[0][0].k self.padding = symbols[0][0].padding for meta, symbol in symbols: if not (meta.k == self.k): raise Exception("Provided symbols do not have k agreement") if not (meta.padding == self.padding): raise Exception("Provided symbols do not " "have padding agreement") # Invoke parent's init to set up raptor coding parameters super(StringDecoder, self).__init__(self.k) # Actually add symbols until decoding is possible for meta, symbol in symbols: self.symbols.append((meta.esi, numpy.fromstring(symbol, dtype=config.dtype))) if not self.can_decode(): raise Exception("Unable to decode with the symbols provided.") # Calculate i symbols self.calculate_i_symbols()
def _load_data(self): """Loads data for NER""" md = Metadata.load_from_file('ner') self.nn = load_network(md) self.reader = create_reader(md, tagging=True) self.itd = self.reader.get_inverse_tag_dictionary()
class MetadataTest(unittest.TestCase): def setUp(self): self.meta = Metadata(100) self.root = self.meta.root self.madedir = self.meta.make_node(self.root, "made_dir", True) self.f = self.meta.make_node(self.madedir, "file", False) def test_root(self): self.assertTrue(self.meta.exists("/")) root = self.meta.get_element("/") self.assertEqual(root, self.root) self.assertTrue(root.is_dir()) self.assertIsNone(root.parent) self.assertListEqual(root.blocks, []) def test_not_existance(self): self.assertFalse(self.meta.exists("/not_existing")) def test_node_making(self): self.assertTrue(self.madedir.is_dir()) self.assertEqual(self.root, self.madedir.parent) self.assertEqual(self.madedir, self.meta.get_element("/made_dir")) self.assertEqual(self.madedir, self.meta.get_element("/made_dir/")) self.assertFalse(self.f.is_dir()) self.assertTrue(self.meta.exists("/made_dir/file")) self.assertEqual(self.f, self.meta.get_element("/made_dir/file")) def test_block_reservation(self): reserved = [] for i in range(0, 7): k = self.meta.reserve_block_for(self.f) reserved.append(k) self.assertNotIn(k, self.meta.free_blocks) self.assertListEqual(reserved, self.f.blocks) self.assertEqual(len(set(reserved)), 7) def test_removal(self): directory = self.meta.make_node(self.root, "rm_dir", True) file_one = self.meta.make_node(directory, "file_one", False) file_two = self.meta.make_node(directory, "file_two", False) reserved = [] reserved.append(self.meta.reserve_block_for(file_one)) reserved.append(self.meta.reserve_block_for(file_one)) reserved.append(self.meta.reserve_block_for(file_two)) self.meta.remove(directory) self.assertFalse(self.meta.exists("/rm_dir/file_one")) self.assertFalse(self.meta.exists("/rm_dir")) for i in reserved: self.assertIn(i, self.meta.free_blocks) def test_move(self): move_dir = self.meta.make_node(self.root, "move_dir", True) self.meta.move_node(self.f, move_dir, "blah") self.assertFalse(self.meta.exists("/made_dir/file")) self.assertTrue(self.meta.exists("/move_dir/blah")) self.meta.move_node(self.f, self.madedir, "file") self.assertTrue(self.meta.exists("/made_dir/file"))
def setUp(self): self.meta = Metadata(100) self.root = self.meta.root self.madedir = self.meta.make_node(self.root, "made_dir", True) self.f = self.meta.make_node(self.madedir, "file", False)
def _load_data(self): """Loads data for POS""" md = Metadata.load_from_file('pos') self.nn = load_network(md) self.reader = create_reader(md) self.itd = self.reader.get_inverse_tag_dictionary()
def test(self): meta=Metadata() meta.setMetadataPair(meta.METADATA_START_DATE, '20021023') mess=self.buildMessage(meta, "rep.metadataReport") print "message:%s" % mess return mess
def entry_exists(date): """Check if an entry for the given date exists.""" from metadata import Metadata data = Metadata.get(date.year, date.month).get_data_for_day(date.day) return data is not None
class Book(object): """Represtents an ebook in html format.""" def __init__(self, title): self.meta = Metadata(title) self.url = self.meta['url'] print('%(title)s from %(url)s' % self.meta) self.content = '' def make(self): #, filename): """Retrieve a book from the given url.""" # Get all the html content of the book request = Request(self.url) pages = request.retrieve(self.meta) # Remove headers, footers, fix relative links content = '' images = [] progress = ProgressBar(len(pages), message='Removing non content:\t') for i, page in enumerate(pages): progress.update(i+1) section = Section(page) section.removeHeader(**self.meta['header-attrs']) section.removeFooter(self.meta['footer-tag'], **self.meta['footer-attrs']) section.fixRelativeLinks() #section.getImages(self.meta) images += section.getImages(self.meta) content += section.soup.prettify() #.append(section) # Get all the images in the book. request.retrieveImages(self.meta, removeDup(images)) # Make a local copy of the html book. self.content = '<html><head><title>%s</title></head><body>' % self.meta['title'] self.content += content self.content += '</body></html>' filename = self.meta.filename(ext='.html') f = open(filename, 'w') f.write(self.content) f.close() def convert(self, format_): """Convert the book from html to another format.""" print('Converting from html to %s' % format_) command = ['ebook-convert', self.meta.filename(ext='.html'), self.meta.filename(ext=format_), ' --authors \"%(author)s\"' % self.meta, ' --level1-toc //h:h1', ' --level2-toc //h:h2' ] output_dir = self.meta.filename(ext='') if not os.path.exists(output_dir): os.makedirs(output_dir) log = open(os.path.join(output_dir, 'ebook-convert.log'), 'w') subprocess.call(command, stdout=log) log.close()
def test(self): meta=Metadata() meta.setMetadataPair(meta.METADATA_START_DATE, '20021023') mess=self.buildMessage(meta, "eop.EarthObservation") print "message:%s" % mess return mess
def setUp(self): self.md = Metadata() self.md.create() self.md.set_origins([('GRIB', 1, 2, 3)]) self.md.set_reference_time_info(datetime(2007, 1, 2, 3, 4, 5))
if Config._config is not None: if "api" in Config._config: run_api = Config._config["api"] if "webui" in Config._config: API.ui = Config._config["webui"] if API.ui: run_api = True if "scrape" in Config._config: run_metadata = Config._config["scrape"] if "host" in Config._config: API.host = Config._config["host"] if "port" in Config._config: API.port = Config._config["port"] if os.path.exists("tmp"): shutil.rmtree("tmp") Database.start() Share.start() if run_metadata: Metadata.start() if run_api: print "Serving Web UI/RESTful API on http://%s:%i" % (API.host, API.port) API.start() else: while True: time.sleep(100)
def run_modules(self): ag = int(self.gv.settings.args['--aggression']) self.debug.print_debug(self, u'Running at aggression level {0} {1}'.format(ag, "[grrr!]" if ag == 10 else "")) if ag > 10: self.debug.print_debug(self, "WARNING: safety bail-out features are disabled at aggression level 11") if self.args['bibscan']: BibliographyDatabase(self.gv).scan() else: # check for stylesheets self.gv.check_file_exists(self.gv.docx_style_sheet_dir) # metadata file gv.metadata_file = self.set_metadata_file() self.gv.mk_dir(self.gv.output_folder_path) if self.args['doc']: # run doc to docx conversion # then run docx to tei UnoconvToDocx(self.gv).run('doc') DocxToTei(self.gv).run(True, self.args['--proprietary']) elif self.args['odt']: # run odt to docx conversion # then run docx to tei UnoconvToDocx(self.gv).run('odt') DocxToTei(self.gv).run(True, self.args['--proprietary']) elif self.args['other']: # run other unoconv-supported format to docx conversion # then run docx to tei UnoconvToDocx(self.gv).run('unoconv') DocxToTei(self.gv).run(True, self.args['--proprietary']) elif self.args['docx']: # run docx to tei conversion # includes hooks for proprietary transforms if enabled DocxToTei(self.gv).run(True, self.args['--proprietary']) elif self.args['docxextracted']: self.debug.print_debug(self, u'Skipping docx extraction') DocxToTei(self.gv).run(False, self.args['--proprietary']) elif self.args['tei']: self.debug.print_debug(self, u'Skipping docx extraction; processing TEI file') DocxToTei(self.gv).run(False, self.args['--proprietary'], tei=True) if self.args['--puretei']: self.debug.print_debug(self, u'Exiting as TEI transform complete') return metadata = Metadata(self.gv) metadata.pre_clean() # run size classifier # aggression 5 SizeClassifier(self.gv).run() # run bibliographic addins handler # aggression 4 found_bibliography = BibliographyAddins(self.gv).run() # run list classifier # aggression 4 ListClassifier(self.gv).run() bibliography_classifier = BibliographyClassifier(self.gv) if not found_bibliography: # run bibliographic classifier # aggression 4 bibliography_classifier.run() # tei # aggression 3 TeiManipulate(self.gv).run() # run tei to nlm conversion TeiToNlm(self.gv).run(not found_bibliography) if self.gv.settings.args['--purenlm']: self.debug.print_debug(self, u'Exiting as NLM transform complete') return manipulate = NlmManipulate(self.gv) if not self.gv.used_list_method: manipulate.fuse_references() # run reference linker if not (self.args['--nolink']): rl = ReferenceLinker(self.gv) rl.run(self.args['--interactive']) rl.cleanup() # run table classifier cc = CaptionClassifier(self.gv) if int(self.args['--aggression']) > int(self.gv.settings.get_setting('tablecaptions', self, domain='aggression')): cc.run_tables() if int(self.args['--aggression']) > int(self.gv.settings.get_setting('graphiccaptions', self, domain='aggression')): cc.run_graphics() # run metadata merge metadata.run() if self.args['--interactive']: bibliography_classifier.run_prompt(True) # process any bibliography entries that are possible BibliographyDatabase(self.gv).run() # remove stranded titles and cleanup manipulate.final_clean() if self.args['--identifiers']: IdGenerator(self.gv).run() if self.args['--chain']: # construct and run an XSLT chainer XslChain(self.gv).run() if self.args['--clean']: ComplianceEnforcer(self.gv).run()
class ProbeRunner(object): """ Probe runner utility """ def __init__(self, **kwargs): self.is_docker_daemon_running() self.test_dir = self._create_temp_dir() self.imageutils = ImageUtils() self.containerutils = ContainerUtils() self.selinux_checks = SELinuxTests() self.selinux_denials_test = SELinuxDenials() self.image_inspection_test = InspectImage() self.container_inspection_test = InspectContainer() self.metadata = Metadata() self._process_kwargs(**kwargs) def setup(self): """ Setup before test run """ self.getimage = GetImage(self.offline, self.test_dir) # This method processes the image repository and return image name with # tag self.cert_container = self.cert_container_name() def _process_kwargs(self, **kwargs): """ Process the kwargs given to __init__ method """ self.image = kwargs["image"] self.dockeruser = kwargs.get("user", None) self.output_dir = kwargs.get("output_dir", None) self.offline = kwargs.get("offline", None) def is_docker_daemon_running(self): """ Raise if docker daemon is not running """ if not is_docker_running(): raise ctsexceptions.CTSDockerServiceNotRunning def cert_container_name(self): """ Returns a container name """ random_name = "".join(choice(ascii_lowercase) for _ in range(6)) return "cert_%s" % random_name def is_image_tests(self): """ If image tests to be ran. """ return True def test_scripts(self): """ Returns names of tests scripts """ return constants.TEST_SCRIPTS_NAME def test_scripts_source_path(self): """ Source path of tests scripts """ return constants.TEST_SCRIPTS_PATH def test_scripts_dir_in_container(self): """ Destination path of tests scripts in container """ return constants.TEST_SCRIPTS_DIR_IN_CONT def cert_temp_parent_dir(self): """ Parent directory of test dir at host """ return constants.CERT_TEMP_PARENT_DIR def _create_temp_dir(self): """ Create a temporary directory at host to be shared as volume """ return tempfile.mkdtemp(dir=self.cert_temp_parent_dir()) def cert_shared_dir_at_host(self): """ Shared directory path at host to be used as volume """ return self.test_dir def copy_scripts_in_test_dir(self): """ Copy tests scripts in cert dir at host """ [copy(script, self.cert_shared_dir_at_host()) for script in self.test_scripts_source_path()] def change_perm_for_test_dir(self, test_dir, perm): """ Change permission to test_dir """ os.chmod(test_dir, perm) def pkg_report_path(self): """ Package report path generated in live container test mode """ return os.path.join(self.cert_shared_dir_at_host(), constants.PACKAGE_REPORT) def _add_user_in_params(self, user, params): """ Add user in parameters """ # assumes params start with "run" params.insert(1, "--user") params.insert(2, user) return params # ------------------Image-test-utilities------------------ def _get_params_for_image_tests(self, volumes, entrypoint): """ Generate parameters for image tests """ # method self._add_user_in_params() asssumes params starts # "run", hence params should start with "run" params = ["run", "-v", volumes, "--entrypoint", entrypoint, "--name", self.cert_container, self.image] # if container needs to be run as user root if self.dockeruser: self._add_user_in_params(self.dockeruser, params) return params def _test_kickstart_path_in_container(self): """ Returns entry point """ return os.path.join(self.test_scripts_dir_in_container(), constants.TEST_KICKSTART_SCRIPT) def _get_volumes_mapping(self): """ Returns volumes mapping from host to container """ volumes = "%s:%s" % (self.cert_shared_dir_at_host(), self.test_scripts_dir_in_container()) return volumes def run_image_tests(self): """ Run image tests """ volumes = self._get_volumes_mapping() entrypoint = self._test_kickstart_path_in_container() params = self._get_params_for_image_tests(volumes, entrypoint) try: self.containerutils.create_container(params) except: raise else: msg = "Successfully ran image tests." return self.pkg_report_path() # -------------------Test-run-utilities---------------------- def pre_test_run_setup(self): """ Run pre test run setup """ self.copy_scripts_in_test_dir() self.change_perm_for_test_dir(self.cert_shared_dir_at_host(), 0777) def _run(self): """ Run all tests with clean up utility """ self.run_image_tests() # image inspection test msg = "Inspection image under test.." print msg inspect_image_report_path = os.path.join( self.cert_shared_dir_at_host(), "%s.json" % self.image_inspection_test.__class__.__name__ ) self.image_inspection_test.run( # image=self.cert_image, image=self.image, export_file=inspect_image_report_path) # metadata tests metadata_report_path = os.path.join( self.cert_shared_dir_at_host(), "%s.json" % self.metadata.__class__.__name__ ) msg = "Collecting metadata of image under test.." print msg self.metadata.run( # image=self.cert_image, image=self.image, export_file=metadata_report_path) def clean_up(self, post_run=True, during_setup=False): """ Clean up after test run """ msg = "Cleaning.." try: if during_setup: self.clean.remove_test_dir(self.test_dir) return if self.cert_container: self.clean.clean_container(self.cert_container) if post_run: if self.test_dir: self.clean.remove_test_dir(self.test_dir) if self.image: self.clean.clean_image(self.image, all_tags=True) except Exception as e: raise def create_testdata_tarball(self): """ Create tarball of test data """ source_dir = self.cert_shared_dir_at_host() tempdir = tempfile.mkdtemp(dir=source_dir) files = [os.path.join(source_dir, item) for item in os.listdir(source_dir) if os.path.isfile(os.path.join(source_dir, item))] [copy(f, tempdir) for f in files] return create_tarball(tempdir, "container_cert", "/tmp/") def move_result_to_output_dir(self, result, output): """ Move resultant test data from `result` to `output` directory data present in `result` is deleted after movement to `output` dir """ if not os.path.isdir(output): raise ctsexceptions.CTSOutputDirectoryDoesNotExist(output) if os.path.isdir(result): files = [os.path.join(result, item) for item in os.listdir(result) if os.path.isfile(os.path.join(result, item))] # if -t option is given for archiving the output, it will generate a # tarfile as output and thus it will be a file elif os.path.isfile(result): files = [result] try: [copy(src, output) for src in files] except IOError as e: raise else: if os.path.isfile(result): return os.path.join(output, os.path.basename(result)) else: return output def remove_test_scripts_from_result(self): """ Remove test scripts from result directory if any """ for item in os.listdir(self.cert_shared_dir_at_host()): if item in self.test_scripts(): os.unlink(os.path.join(self.cert_shared_dir_at_host(), item)) def _post_run(self): """ Operations to be performed post test run """ self.remove_test_scripts_from_result() result = self.cert_shared_dir_at_host() print result # if testrun data (dir/tarfile) needs to be exported in particular dir if self.output_dir: result = self.move_result_to_output_dir(result, self.output_dir) return result def run(self): """ Run all tests """ self.setup() self.pre_test_run_setup() self._run() self._post_run()
class Test(unittest.TestCase): def setUp(self): self.md = Metadata() self.md.create() self.md.set_origins([('GRIB', 1, 2, 3)]) self.md.set_reference_time_info(datetime(2007, 1, 2, 3, 4, 5)) def testOriginMatcher(self): m = Matcher.MatchOrigin('GRIB') self.assert_(m.match(self.md)) m = Matcher.MatchOrigin('GRIB,,,') self.assert_(m.match(self.md)) m = Matcher.MatchOrigin('GRIB,1,,') self.assert_(m.match(self.md)) m = Matcher.MatchOrigin('GRIB,,2,') self.assert_(m.match(self.md)) m = Matcher.MatchOrigin('GRIB,,,3') self.assert_(m.match(self.md)) m = Matcher.MatchOrigin('GRIB,1,2,') self.assert_(m.match(self.md)) m = Matcher.MatchOrigin('GRIB,1,,3') self.assert_(m.match(self.md)) m = Matcher.MatchOrigin('GRIB,,2,3') self.assert_(m.match(self.md)) m = Matcher.MatchOrigin('GRIB,1,2,3') self.assert_(m.match(self.md)) m = Matcher.MatchOrigin('GRIB,2') self.assert_(not m.match(self.md)) m = Matcher.MatchOrigin('GRIB,,3') self.assert_(not m.match(self.md)) m = Matcher.MatchOrigin('GRIB,,,1') self.assert_(not m.match(self.md)) m = Matcher.MatchOrigin('BUFR,1,2,3') self.assert_(not m.match(self.md)) def testReftimeMatcher(self): m = Matcher.MatchReftime(">=2007") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("<=2007") self.assert_(m.match(self.md)) m = Matcher.MatchReftime(">2006") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("<2008") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("==2007") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("=2007") self.assert_(m.match(self.md)) m = Matcher.MatchReftime(">=2008") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime("<=2006") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime(">2007") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime("<2007") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime("==2006") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime(">=2007-01") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("<=2007-01") self.assert_(m.match(self.md)) m = Matcher.MatchReftime(">2006-12") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("<2007-02") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("==2007-01") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("=2007-01") self.assert_(m.match(self.md)) m = Matcher.MatchReftime(">=2007-02") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime("<=2006-12") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime(">2007-01") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime("<2007-01") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime("==2007-02") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime(">=2007-01-02") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("<=2007-01-02") self.assert_(m.match(self.md)) m = Matcher.MatchReftime(">2007-01-01") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("<2007-01-03") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("==2007-01-02") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("=2007-01-02") self.assert_(m.match(self.md)) m = Matcher.MatchReftime(">=2007-01-03") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime("<=2006-01-01") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime(">2007-01-02") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime("<2007-01-02") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime("==2007-01-01") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime(">=2007-01-02 03") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("<=2007-01-02 03") self.assert_(m.match(self.md)) m = Matcher.MatchReftime(">2007-01-02 02") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("<2007-01-02 04") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("==2007-01-02 03") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("=2007-01-02 03") self.assert_(m.match(self.md)) m = Matcher.MatchReftime(">=2007-01-02 04") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime("<=2006-01-02 02") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime(">2007-01-02 03") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime("<2007-01-02 03") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime("==2007-01-02 02") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime(">=2007-01-02 03:04") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("<=2007-01-02 03:04") self.assert_(m.match(self.md)) m = Matcher.MatchReftime(">2007-01-02 03:03") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("<2007-01-03 03:05") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("==2007-01-02 03:04") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("=2007-01-02 03:04") self.assert_(m.match(self.md)) m = Matcher.MatchReftime(">=2007-01-02 03:05") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime("<=2006-01-02 03:03") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime(">2007-01-02 03:04") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime("<2007-01-02 03:04") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime("==2007-01-02 03:03") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime(">=2007-01-02 03:04:05") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("<=2007-01-02 03:04:05") self.assert_(m.match(self.md)) m = Matcher.MatchReftime(">2007-01-02 03:04:04") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("<2007-01-03 03:04:06") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("==2007-01-02 03:04:05") self.assert_(m.match(self.md)) m = Matcher.MatchReftime("=2007-01-02 03:04:05") self.assert_(m.match(self.md)) m = Matcher.MatchReftime(">=2007-01-02 03:04:06") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime("<=2006-01-02 03:04:04") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime(">2007-01-02 03:04:05") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime("<2007-01-02 03:04:05") self.assert_(not m.match(self.md)) m = Matcher.MatchReftime("==2007-01-02 03:04:04") self.assert_(not m.match(self.md))
def __init__(self): self.connection = getDictConnection() metadata = Metadata() self.columns = metadata.loadYaml('columns') self.authorData = self.columns['author'][0]