Esempio n. 1
0
def main():
    args = get_arguments()

    log.basicConfig(format='[%(asctime)s] [%(levelname)s] %(message)s',
                    level=LOG_LEVEL)
    log.info("Start of '{}'.".format(__file__))

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
        log.info("Created output directory '{}'.".format(args.output_dir))

    for f in find_files(args.input_dir, '*.mid', path=False):
        midi_old = pretty_midi.PrettyMIDI(os.path.join(args.input_dir, f))
        midi_new = trim_midi_file(args.sec_from, args.sec_to, midi_old)
        midi_new.write(os.path.join(args.output_dir, f))
        log.info("Processed file '{}'".format(f))

    for f in find_files(args.input_dir, '*.wav', path=False):
        audio_old, sr = librosa.load(os.path.join(args.input_dir, f))
        audio_new = trim_audio_file(args.sec_from, args.sec_to, audio_old, sr)
        librosa.output.write_wav(os.path.join(args.output_dir, f), audio_new,
                                 sr)
        log.info("Processed file '{}'".format(f))

    log.info("End of '{}'.".format(__file__))
Esempio n. 2
0
def test_preprocessing():
    # make arguments
    args = make_args()

    # prepare dummy wav files
    wavdir = "data/wav"
    if not os.path.exists(wavdir):
        os.makedirs(wavdir)
    for i in range(5):
        make_dummy_wav(wavdir + "/%d.wav" % i, 8000, args.fs)

    # feature extract
    wav_list = find_files(wavdir, "*.wav")
    if not os.path.exists(args.wavdir):
        os.makedirs(args.wavdir)
    world_feature_extract(wav_list, args)

    # calc_stats
    file_list = find_files(args.hdf5dir, "*.h5")
    calc_stats(file_list, args)

    # noise shaping
    wav_list = find_files(args.wavdir, "*.wav")
    if not os.path.exists(args.writedir):
        os.makedirs(args.writedir)
    noise_shaping(wav_list, args)

    # assert list length
    wav_ns_list = find_files(args.writedir, "*.wav")
    assert len(wav_list) == len(file_list)
    assert len(wav_list) == len(wav_ns_list)
Esempio n. 3
0
    def compile(self):
        artifacts_path = os.path.join(self.paths["arch"], "artifacts")

        with open(artifacts_path, "wb") as artifacts_file:
            for path in utils.find_files(self.paths["arch"], ext=".art") + \
                    utils.find_files(self.paths["maps"], ext=".art"):
                utils.file_copy(path, artifacts_file)
Esempio n. 4
0
    def compile(self):
        treasures_path = os.path.join(self.paths["arch"], "treasures")

        with open(treasures_path, "wb") as treasures_file:
            for path in utils.find_files(self.paths["arch"], ext=".trs") + \
                    utils.find_files(self.paths["maps"], ext=".trs"):
                utils.file_copy(path, treasures_file)
Esempio n. 5
0
def load_piece_data(directory, audio_sr, velocity, fac, valid=True):
    '''Loader that reads tune from directory and yields audio
    waveform and encoded piano roll as tuple of 3 arrays:
    (W, T, I). If more audio files represent single midi file,
    one is chosen randomly.
    '''

    midi_files = find_files(directory, '*.mid')
    randomized_midi_files = randomize_files(midi_files)

    for midi_filename in randomized_midi_files if valid else midi_files:
        # load piano roll from midi file
        proll = pretty_midi.PrettyMIDI(midi_filename).get_piano_roll(
            fs=int(audio_sr / fac))
        proll /= 127  # velocity to <0;1>
        if not velocity:
            proll[proll > 0] = 1
        # encode piano roll
        table, indices = roll_encode(proll, fac)
        # add 0-roll if not present (we will need it later for padding)
        if get_roll_index(table, np.zeros(128)).shape[0] == 0:
            table = np.concatenate((table, np.zeros(shape=(1, 128))))
        # get respective audio file names and choose 1 randomly
        base = midi_filename.rsplit('/', 1)[-1]
        base = re.sub(r'(.*)%s$' % re.escape('.mid'), r'\1', base)
        audio_files = find_files(directory, base + '*.wav')
        if not audio_files:
            raise ValueError('No files found for \'{}\'.'.format(base +
                                                                 '*.wav'))
        audio_filename = random.choice(audio_files)
        # load audio waveform
        audio, _ = librosa.load(audio_filename, sr=audio_sr, mono=True)
        yield audio, table, indices
Esempio n. 6
0
def main():
    # Get paths to directories inside the root directory.
    for path in utils.find_files(paths["root"],
                                 rec=False,
                                 ignore_dirs=False,
                                 ignore_files=True):
        paths[os.path.basename(path)] = path

    if "collect_none" not in what_collect:
        # Nothing was set to collect, so by default we'll collect everything.
        if not what_collect:
            for entry in dict(globals()):
                if entry.startswith("collect_"):
                    what_collect.append(entry)

        # Call the collecting functions.
        for collect in what_collect:
            print("Collecting {}...".format(collect.split("_")[-1]))
            globals()[collect]()

    # Copy all files in the arch directory to specified directory.
    if copy_dest:
        files = utils.find_files(paths["arch"], rec=False)

        for path in files:
            shutil.copyfile(path,
                            os.path.join(copy_dest, os.path.basename(path)))
Esempio n. 7
0
def train_model():
    train_files = find_files(train_data_path)
    test_files = find_files(test_data_path)
    train_samples = []
    test_samples = []
    for f in train_files:
        train_samples.extend(random.sample(list(read_file(f)), 93000))
    for f in test_files:
        test_samples.extend(list(read_file(f)))
    print("Total number of train samples: %d" % len(train_samples))
    print("Total number of test samples: %d" % len(test_samples))
    train_loader = DataLoader(dataset=TrainDataset(train_samples),
                              batch_size=8,
                              shuffle=True)
    test_loader = DataLoader(dataset=TestDataset(test_samples),
                             batch_size=1,
                             shuffle=True)
    model = CNN()
    model.load_state_dict(torch.load(os.path.join(save_path,
                                                  "model10-21.pkl")))
    trainer = Trainer(model=model,
                      iter_num=iter_num,
                      save_dir=save_path,
                      save_log=False)
    trainer.push_data(train_loader)
    trainer.push_data(test_loader, is_train=False)
    trainer.train()
Esempio n. 8
0
    def _validate_and_prepare_dir(self, app_dir_path, undo_ops, python_exe):
        results = run_installed_tests_as_subprocess(app_dir_path,
                                                    self.django_settings_module,
                                                    python_exe_path=python_exe,
                                                    read_config_file=False)
        if len(results.error_messages)>0:
            for error in results.error_messages:
                logger.error("ERROR>> %s" % error)
        if len(results.warning_messages)>0:
            for warning in results.warning_messages:
                logger.warning("WARNING>> %s" % warning)
        results.print_final_status_message(logger)
        if results.get_return_code() != SUCCESS_RC:
            return results.get_return_code()
        config = django_config_from_validation_results(self.django_settings_module,
                                                       VERSION,
                                                       results)
        write_json(config.to_json(), os.path.join(app_dir_path, DJANGO_CFG_FILENAME))

        # undo the changes we made
        for (op_fun, args) in undo_ops:
            op_fun(*args)
        # delete all the .pyc files
        find_files(app_dir_path, "\.pyc$", os.remove)
        # (re)create the archive
        if self.archive_type == "zip":
            archive = ZipfileHandler(self.archive_file, "w")
        else:
            archive = TarfileHandler(self.archive_file, "w:gz")
        archive.create_new_from_dir(app_dir_path)
        archive.close()
        return SUCCESS_RC
Esempio n. 9
0
def main():
    args = get_arguments()

    log.basicConfig(format='[%(asctime)s] [%(levelname)s] %(message)s',
                    level=LOG_LEVEL)
    log.info("Start of '{}'.".format(__file__))

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
        log.info("Created output directory '{}'.".format(args.output_dir))

    for f in find_files(args.input_dir, '*.mid', path=False):
        midi_old = pretty_midi.PrettyMIDI(
            os.path.join(args.input_dir, f))
        midi_new = trim_midi_file(args.sec_from, args.sec_to, midi_old)
        midi_new.write(
            os.path.join(args.output_dir, f))
        log.info("Processed file '{}'".format(f))

    for f in find_files(args.input_dir, '*.wav', path=False):
        audio_old, sr = librosa.load(os.path.join(args.input_dir, f))
        audio_new = trim_audio_file(args.sec_from, args.sec_to, audio_old, sr)
        librosa.output.write_wav(
            os.path.join(args.output_dir, f), audio_new, sr)
        log.info("Processed file '{}'".format(f))

    log.info("End of '{}'.".format(__file__))
Esempio n. 10
0
    def compile(self):
        num_images = 0
        dev_dir = os.path.join(self.paths["arch"], "dev")
        bmaps_path = os.path.join(self.paths["arch"], "bmaps")
        images_path = os.path.join(self.paths["arch"], "atrinik.0")

        with open(bmaps_path, "wb") as bmaps_file, \
                open(images_path, "wb") as images_file:
            # 'bug.101' must be the first entry.
            for path in utils.find_files(dev_dir, ext="bug.101.png") + \
                    sorted(utils.find_files(self.paths["arch"], ext=".png",
                                            ignore_paths=(dev_dir,)),
                           key = lambda s: os.path.basename(s)[:-4]):
                name = os.path.basename(path)[:-4]
                # Write it to the bmaps file.
                bmaps_file.write("{}\n".format(name).encode())

                # Get the image's file size.
                size = os.path.getsize(path)
                # Write information about the image to the atrinik.0 file.
                images_file.write("IMAGE {} {} {}\n".format(
                    num_images, size, name).encode())

                with open(path, "rb") as image_file:
                    images_file.write(image_file.read())

                num_images += 1
Esempio n. 11
0
 def __init__(self, speech_dir, noisy_dir, sr=16000, pairs=None):
     super(TestDataset, self).__init__()
     self.speech_dir = speech_dir
     self.noisy_dir = noisy_dir
     self.pairs = pairs
     self.sr = sr
     self.speech_wav_files = utils.find_files(self.speech_dir)
     self.noisy_wav_files = utils.find_files(self.noisy_dir)
Esempio n. 12
0
    def __init__(self, dirloc):
        self.dirloc = dirloc

        # Obtain the dirname
        self.dirname = os.path.basename(self.dirloc)

        # Parse directory name
        regex = re.compile(self.pattern)
        self.attrs = regex.match(self.dirname)

        # Fall back to legacy mode if no match
        if not self.attrs:
            regex = re.compile(self.pattern_legacy)
            self.attrs = regex.match(self.dirname)
            if not self.attrs:
                raise Exception("Error: could not match directory '%s' to known pattern" % self.dirname)

        # Find the ISO file in the driver repo
        iso_files = utils.find_files(self.dirloc, "*.iso")
        if len(iso_files) > 1:
            raise Exception("Error: more than one file found! (%s)" % iso_files)
        if not iso_files:
            raise Exception("Error: could not find any ISOs in directory %s" % self.dirloc)

        self.driver_iso = DriverISO(iso_files[0])

        # Find the repo metadata file
        metadata_files = utils.find_files(self.dirloc, "*.metadata.md5")

        if len(metadata_files) > 1:
            raise Exception("Error: more than one metadata files returned! (%s)" % metadata_files)
        if not metadata_files:
            raise Exception("Error: could not find any metadata files")

        self.metadata_file = FileObject(metadata_files[0])

        # Find RPM info file
        rpminfo_files = utils.find_files(self.dirloc, "*.rpminfo")
        if len(rpminfo_files) > 1:
            raise Exception("Error: more than one rpminfo file returned! (%s)" % rpminfo_files)
        if not rpminfo_files:
            raise Exception("Error: could not find any rpminfo files")

        self.rpminfo_file = FileObject(rpminfo_files[0])

        # Find the ZIP file
        zip_files = utils.find_files(self.dirloc, "*.zip")
        if len(zip_files) > 1:
            raise Exception("Error: more than on zip fil returned! (%s)" % zip_files)
        if not zip_files:
            raise Exception("Error: could not find any zip files")

        self.zip_file = FileObject(zip_files[0])
Esempio n. 13
0
    def __init__(self,
                 speech_dir,
                 noise_dir,
                 noisy_dir=None,
                 mode='train',
                 sr=16000,
                 frame_len=512,
                 frame_shift=256,
                 block_size=32,
                 block_shift=16,
                 snr_level=[-10, -5, 0, 5, 10, 15, 20]):
        super(MyDataset, self).__init__()
        if os.path.exists(speech_dir):
            NotADirectoryError('Speech dir does not exist')
        if os.path.exists(noise_dir):
            NotADirectoryError('Noise dir does not exist')
        self.speech_dir = speech_dir
        self.noise_dir = noise_dir
        self.mode = mode
        self.noisy_dir = noisy_dir
        self.snr_level = snr_level
        self.sr = sr
        self.frame_len = frame_len
        self.frame_shift = frame_shift
        self.block_size = block_size
        self.block_shift = block_shift
        n = np.log2(self.frame_len)
        if int(n) < n:
            n = int(n) + 1
        self.n_fft = int(2**n)
        self.mu = None
        self.sigma = None

        self.speech_wav_files = utils.find_files(self.speech_dir)
        random.shuffle(self.speech_wav_files)
        self.noise_wav_files = utils.find_files(self.noise_dir)
        random.shuffle(self.noise_wav_files)
        if self.mode == 'test':
            if os.path.exists(self.noisy_dir) is False:
                os.mkdir(self.noisy_dir)
            self.noisy_wav_files = utils.find_files(self.noisy_dir)
            self.build_test_pairs()
        else:
            self.data_stats(sample_size=100)  # stats snr information
            self.noise_buffer = np.array([])
            self.speech_buffer = np.array([])
            self.noisy_buffer = np.array([])
            self.wav_idx = 0
Esempio n. 14
0
    def merge(self, batch_size: int = 100, _step: int = 0) -> List[Entry]:
        """Merge blocks into a single final list of entries.

        Blocks are batched in groups and merged recursively.

        Parameters
        ----------
        batch_size : int, optional
            The number of blocks in a merge batch. Defaults to 100.
        _step : int, optional
            Private parameter corresponding to the current processing stage.
        """
        block_paths = [path for _, path in find_files(self.temp_path)]

        if len(block_paths) == 1:
            # Only one block remaining => we're done.
            # Read the entries from it.
            last_block_path = block_paths[0]
            with open(last_block_path) as f:
                return list(_read_block(f))

        # Otherwise, batch blocks together and merge each of them into a
        # new block.
        for idx, batch in enumerate(grouped(batch_size, block_paths)):
            # The last `batch` may be end-padded with nones if the
            # number of items in `block_paths` is not a multiple of
            # `batch_size`.
            batch = filter(None, batch)

            out_path = os.path.join(self.temp_path, f"{_step}-{idx}")
            self._merge(out_path, *batch)

        # Merge the new blocks recursively.
        return self.merge(batch_size=batch_size, _step=_step + 1)
Esempio n. 15
0
def corpus_contents():
    args = get_args()
    ret = {}
    for fullfn, contents in find_files(args.corpusdir, ext='.xd'):
        xdid = parse_xdid(fullfn)
        ret[xdid.lower()] = contents
    return ret
Esempio n. 16
0
def id_list():
    samples = find_files(PREDICTED_BASE, 'npy')
    base = set()
    for sample in samples:
        sp = sample.split('/')[-2]
        base.add(sp)
    return list(sorted(base))
Esempio n. 17
0
    def _populate_database(self, Session, filepath):
        for fq_path, root, basename in find_files(filepath,
                                                  self._create_regex()):
            dbsession = Session()
            try:
                hash = calculate_hash(fq_path)
                contents = self.get_contents(fq_path)
                existing = dbsession.query(Document).filter_by(
                    filepath=fq_path).first()
                if existing is None:
                    doc = Document(filename=basename,
                                   filepath=fq_path,
                                   body=contents,
                                   hash=hash)
                    dbsession.add(doc)

                elif existing.hash != hash:
                    existing.hash = hash
                    existing.body = contents

                dbsession.commit()
            except Exception:
                dbsession.rollback()
            finally:
                dbsession.close()
Esempio n. 18
0
 def thread_loader(self, sess):
     stop = False
     # Count tune data files
     n_midi_files = len(find_files(self.data_dir, '*.mid'))
     if n_midi_files == 0:
         raise ValueError('No files found for \'{}\'.'.format(
             directory+'/*.mid'))
     one_percent = int(np.ceil(n_midi_files/100))
     print('files length: {}'.format(n_midi_files))
     # Go through the dataset repeatedly until stopped
     while not stop:
         # Randomly iterate over files and fetch tune data
         file_iterator = load_piece_data(self.data_dir,
                                         self.audio_sample_rate,
                                         self.velocity,
                                         self.compress_fac)
         for audio, table, indices in file_iterator:
             sess.run(self.queues['tune']['enQ'],
                      feed_dict={self.audio_placeholder: audio,
                                 self.table_placeholder: table,
                                 self.indices_placeholder: indices})
             # Track and report progress
             sess.run(self.increment_file_counter)
             file_counter = sess.run(self.file_counter)
             if file_counter % one_percent == 0:
                 print('Training progress: {:.02f} epochs '
                       '(file {} of {})'.format(file_counter/n_midi_files,
                                                file_counter, n_midi_files))
             if self.coord.should_stop():
                 stop = True
                 break
Esempio n. 19
0
 def thread_loader(self, sess):
     stop = False
     # Count tune data files
     n_midi_files = len(find_files(self.data_dir, '*.mid'))
     if n_midi_files == 0:
         raise ValueError('No files found for \'{}\'.'.format(directory +
                                                              '/*.mid'))
     one_percent = int(np.ceil(n_midi_files / 100))
     print('files length: {}'.format(n_midi_files))
     # Go through the dataset repeatedly until stopped
     while not stop:
         # Randomly iterate over files and fetch tune data
         file_iterator = load_piece_data(self.data_dir,
                                         self.audio_sample_rate,
                                         self.velocity, self.compress_fac)
         for audio, table, indices in file_iterator:
             sess.run(self.queues['tune']['enQ'],
                      feed_dict={
                          self.audio_placeholder: audio,
                          self.table_placeholder: table,
                          self.indices_placeholder: indices
                      })
             # Track and report progress
             sess.run(self.increment_file_counter)
             file_counter = sess.run(self.file_counter)
             if file_counter % one_percent == 0:
                 print('Training progress: {:.02f} epochs '
                       '(file {} of {})'.format(file_counter / n_midi_files,
                                                file_counter, n_midi_files))
             if self.coord.should_stop():
                 stop = True
                 break
Esempio n. 20
0
    def compile(self):
        l = []

        for path in utils.find_files(self.paths["arch"], ext=".anim"):
            with open(path) as anim_file:
                for line in anim_file:
                    line = line.strip()

                    # Blank line or comment.
                    if not line or line.startswith("#"):
                        continue

                    if line.startswith("anim "):
                        l.append([line])
                    elif line != "mina":
                        l[len(l) - 1].append(line)

        animations_path = os.path.join(self.paths["arch"], "animations")

        with open(animations_path, "wb") as animations_file:
            for anim in sorted(l, key=lambda node: node[0][5:]):
                for line in anim:
                    animations_file.write("{}\n".format(line).encode())

                animations_file.write("mina\n".encode())
Esempio n. 21
0
 def __init__(self, audio_dir,model_settings,background_noise,silence_label, unknown_label,classes,augmentation_ops = [],augmentation_percentage = 0,
              validation_percentage=10,testing_percentage = 10,unknown_percentage = 10,silence_percentage = 10,testing_list = {},validation_list= {},fingerprint_type='mfcc' ,mode='test', train=True):
     print('audio_dir = {}'.format(audio_dir))
     self.model_settings = model_settings
     print('sample_rate = {}'.format(self.model_settings['sample_rate']))
     self.background_data = []
     self.data_index = {}
     self.word_to_index = {}
     self.audio_dir = audio_dir
     self.audio_files = find_files(audio_dir)
     self.background_noise = background_noise
     self.words_list = [silence_label, unknown_label] + classes
     self.augmentation_ops = augmentation_ops
     self.augmentation_percentage = augmentation_percentage
     self.validation_percentage = validation_percentage
     self.testing_percentage = testing_percentage
     self.unknown_percentage = unknown_percentage
     self.fingerprint_type = fingerprint_type
     self.silence_percentage = silence_percentage
     self.testing_list = testing_list
     self.validation_list = validation_list
     print('Found {} files in total in {}.'.format(len(self.audio_files), audio_dir))
     assert len(self.audio_files) != 0
     self.mode = mode
     self.train = train
     self.prepare_data_index()
     if self.train:
         self.prepare_background_data()
     self.prepare_processing_graph()
Esempio n. 22
0
def main():
    parser = ArgumentParser()
    parser.add_argument("calibration_folder")
    parser.add_argument("image_folder")
    parser.add_argument("--bm_settings", default="")
    args = parser.parse_args()

    settings = args.bm_settings

    if args.bm_settings:
        with open(settings) as settings_file:
            if settings_file:
                settings = args.bm_settings
            else:
                settings = None

    calibration = StereoCalibration(input_folder=args.calibration_folder)
    input_files = find_files(args.image_folder)
    block_matcher = BlockMatcher(settings=settings)
    image_pair = [cv2.imread(image) for image in input_files[:2]]
        
    tuner = BMTuner(block_matcher, calibration, image_pair)

    if args.bm_settings:
        block_matcher.save_settings(args.bm_settings)
        print "Settings saved."
Esempio n. 23
0
def remove_one_agent_config_files(context):
    dir_path = configs.get("local_orb_path")
    all_files_generated = find_files(context.agent_file_name, ".yaml",
                                     dir_path)
    if len(all_files_generated) > 0:
        for file in all_files_generated:
            os.remove(file)
Esempio n. 24
0
def load_pages(directory, output_path, kernel):
    """
    Find all .text files in the specified directory and return a map of Page objects, keyed by the
    url for the page.

    \param directory
        starting directory to search
    \param output_path
        the directory we'll be writing output to
    \param configs
        the config map which we'll use to get the template for each page
    \param templates
        the Templates singleton
    """
    page_map = {}
    length = len(directory)
    for root, name in utils.find_files(directory, False, '.text'):
        path = os.path.join(root, name)
        base_path = root[length:]
        if not base_path.startswith('/'):
            base_path = '/' + base_path
        name = name[0:-5]

        url = utils.url_join(base_path, name)
        config = utils.find_config(kernel.configs, base_path)
        page = Page(kernel, path, output_path, url, config)
        page_map[url] = page

    return page_map
    def test_find_files(self):
        """ Test the function find_files. """
        files = [
            os.path.join('folder_0', 'subfolder_0', 'file_0'),
            os.path.join('folder_0', 'subfolder_0', 'file_1'),
            os.path.join('folder_0', 'subfolder_1', 'file_0'),
            os.path.join('folder_0', 'subfolder_1', 'file_1'),
            os.path.join('folder_1', 'subfolder_0', 'file_0'),
            os.path.join('folder_1', 'subfolder_0', 'file_1'),
            os.path.join('folder_1', 'subfolder_1', 'file_0'),
            os.path.join('folder_1', 'subfolder_1', 'file_1'),
        ]

        sources_0 = {'folder_0': files[:4], 'folder_1': files[4:]}

        sources_1 = {
            'folder_0': ['1', '2', '3', '4'],
            'folder_1': files[4:],
        }

        sources_2 = {
            'folder_1': ['1', '2', '3', '4'],
            'folder_0': files[:4],
        }

        sources_3 = {
            'folder_1': ['1', '2', '3', '4'],
            'folder_0': ['1', '2', '3', '4'],
        }

        self.assertEqual(
            find_files(basename_prefix='file', sources=sources_0),
            files[:4],
        )
        self.assertEqual(
            find_files(basename_prefix='file', sources=sources_1),
            files[4:],
        )
        self.assertEqual(
            find_files(basename_prefix='file', sources=sources_2),
            files[:4],
        )
        self.assertEqual(
            find_files(basename_prefix='file', sources=sources_3),
            [],
        )
Esempio n. 26
0
def make_images_list(images_folders, root_folder):
    found_files, unknown_extensions = find_files(images_folders,
                                                 ['.jpg', '.png'], ['.txt'])
    if len(unknown_extensions) > 0:
        print('Unknown extensions: {}'.format(unknown_extensions))
    found_files = list(
        map(lambda x: os.path.relpath(x, root_folder), found_files))
    return found_files
Esempio n. 27
0
def load_data(data_path):
    data_file = find_files(data_path)
    data = []
    for d in data_file:
        samples = list(read_file(d))
        data.extend(samples)
    random.shuffle(data)
    return data
def local_info():
    result = {}
    local_files_to_check = utils.find_files()
    for f in local_files_to_check:
        h = hashlib.md5()
        h.update(open(f, "rb").read())
        result[f[2:]] = {"ETag": h.hexdigest()}
    return result
Esempio n. 29
0
def get_sigs_in_dir(directory, viname=None, searchstring='mean.ref', recursivesearch=False):
    signatures = signatureCollection(viName=viname)
    sigFiles = find_files(directory, searchstring, recursive=recursivesearch)

    for f in sigFiles:
        signatures.add(f)

    return signatures
Esempio n. 30
0
def main():
    search_pattern = sys.argv[1]
    search_regex = re.compile(search_pattern)
    shell_file_pattern = sys.argv[2]

    files = find_files('.', shell_pattern=shell_file_pattern)

    for filename in files:
        grep_file(filename, search_regex)
Esempio n. 31
0
    def compile(self):
        for path in utils.find_files(
                os.path.join(self.paths["maps"], "interfaces"), ".xml"):
            self.path = path
            self.compile_file()

        with open(
                os.path.join(self.paths["maps"], "python",
                             "InterfaceQuests.py"), "wb") as quests_file:
            quests_file.write(self.quests.getvalue().encode())
Esempio n. 32
0
 def __init__(self, data_dir, fps=16000):
     # define the state of the object
     self.data_dir = data_dir
     self.fps = fps
     # setup the files for reading
     self.files = list(find_files(data_dir, '*.wav'))
     self.classes = [f for f in sorted(os.listdir(data_dir))]
     # self.classes.remove('.DS_Store')
     self.classes.sort()
     print(self.classes)
Esempio n. 33
0
def test_preprocessing():
    # make arguments
    args = make_args()

    # prepare dummy wav files
    wavdir = "data/wav"
    if not os.path.exists(wavdir):
        os.makedirs(wavdir)
    for i in range(5):
        make_dummy_wav(wavdir + "/%d.wav" % i, 8000, args.fs)

    # feature extract
    wav_list = find_files(wavdir, "*.wav")
    if not os.path.exists(args.wavdir):
        os.makedirs(args.wavdir)
    args.feature_type = "world"
    world_feature_extract(wav_list, args)
    args.feature_type = "melspc"
    melspectrogram_extract(wav_list, args)
    args.feature_type = "mcep"
    melcepstrum_extract(wav_list, args)

    # calc_stats
    file_list = find_files(args.hdf5dir, "*.h5")
    args.feature_type = "world"
    calc_stats(file_list, args)
    args.feature_type = "melspc"
    calc_stats(file_list, args)
    args.feature_type = "mcep"
    calc_stats(file_list, args)

    # noise shaping
    wav_list = find_files(args.wavdir, "*.wav")
    args.feature_type = "world"
    args.writedir = "data/wav_ns/world"
    if not os.path.exists(args.writedir):
        os.makedirs(args.writedir)
    world_noise_shaping(wav_list, args)
    args.feature_type = "mcep"
    args.writedir = "data/wav_ns/mcep"
    if not os.path.exists(args.writedir):
        os.makedirs(args.writedir)
    melcepstrum_noise_shaping(wav_list, args)
Esempio n. 34
0
def get_pred(csv_path):
    if os.path.isdir(csv_path):
        csv_files = find_files(csv_path, pattern="*.csv")
        csv_files = sorted(csv_files)
    else:
        csv_files = [csv_path]
    print(csv_files)
    preds = np.concatenate([load_pred(fname) for fname in csv_files], axis=0)
    print(preds.shape)
    return preds
Esempio n. 35
0
 def build_cache(self, audio_dir, sample_rate):
     logger.info(f'audio_dir: {audio_dir}.')
     logger.info(f'sample_rate: {sample_rate:,} hz.')
     audio_files = find_files(audio_dir, ext=self.ext)
     audio_files_count = len(audio_files)
     assert audio_files_count != 0, f'Could not find any {self.ext} files in {audio_dir}.'
     logger.info(f'Found {audio_files_count:,} files in {audio_dir}.')
     with tqdm(audio_files) as bar:
         for audio_filename in bar:
             bar.set_description(audio_filename)
             self.cache_audio_file(audio_filename, sample_rate)
 def __init__(self, cache_dir: str, audio_dir: str = None, sample_rate: int = SAMPLE_RATE, ext='flac'):
     self.ext = ext
     self.cache_dir = os.path.join(cache_dir, 'audio-fbanks')
     ensures_dir(self.cache_dir)
     if audio_dir is not None:
         self.build_cache(os.path.expanduser(audio_dir), sample_rate)
     self.speakers_to_utterances = defaultdict(dict)
     for cache_file in find_files(self.cache_dir, ext='npy'):
         # /path/to/speaker_utterance.npy
         speaker_id, utterance_id = Path(cache_file).stem.split('_')
         self.speakers_to_utterances[speaker_id][utterance_id] = cache_file
Esempio n. 37
0
def main(argv):
    parser = argparse.ArgumentParser(
        description="Load in a pose and transform superquadrics")

    parser.add_argument(
        "primitives_directory",
        help=
        "Path to the directory containing the superquadrics of the instance")

    parser.add_argument("pose_directory",
                        help="Path to the directory containing the poses file")

    parser.add_argument("pose_index",
                        type=int,
                        default=0,
                        help="Index of the pose to be displayed")

    parser.add_argument("--prob_threshold",
                        type=float,
                        default=0.5,
                        help="Probability threshold")

    args = parser.parse_args(argv)
    primitives = load_all_primitive_parameters(args.primitives_directory,
                                               args.prob_threshold)
    paths = find_files(args.pose_directory + "meta/**meta.txt")

    # Loop over every meta file
    pose_path = sorted(paths)[0]
    _, rt, _ = load_pose(pose_path)
    q_obj = rotation_matrix_to_quaternion(rt[:3, :3])

    M = len(primitives)
    M_poses = np.zeros(shape=(M, 3, 4))

    i = 0
    for prim in primitives:
        prim["obj_pose"] = q_obj

    print(args.pose_directory)
    with open(os.path.join(args.pose_directory, "outputs/labels.txt"),
              'r') as fp:
        for i in np.arange(0, args.pose_index + 1):
            label = fp.readline()
            if i == args.pose_index:
                pose = string_to_pose(label.split("\n", 1)[0])
                pose = np.reshape(pose, (-1, 3, 4))

                for m in np.arange(0, np.shape(pose)[0]):
                    primitives[m]["transform"] = pose[m]

    save_params_as_ply(os.path.join("../results/test/", "primitives.ply"),
                       primitives)
Esempio n. 38
0
def load_piece_data(directory,
                    audio_sr,
                    velocity,
                    fac,
                    valid=True):
    '''Loader that reads tune from directory and yields audio
    waveform and encoded piano roll as tuple of 3 arrays:
    (W, T, I). If more audio files represent single midi file,
    one is chosen randomly.
    '''

    midi_files = find_files(directory, '*.mid')
    randomized_midi_files = randomize_files(midi_files)

    for midi_filename in randomized_midi_files if valid else midi_files:
        # load piano roll from midi file
        proll = pretty_midi.PrettyMIDI(
            midi_filename).get_piano_roll(fs=int(audio_sr/fac))
        proll /= 127 # velocity to <0;1>
        if not velocity:
            proll[proll > 0] = 1
        # encode piano roll
        table, indices = roll_encode(proll, fac)
        # add 0-roll if not present (we will need it later for padding)
        if get_roll_index(table, np.zeros(128)).shape[0] == 0:
            table = np.concatenate((table, np.zeros(shape=(1, 128))))
        # get respective audio file names and choose 1 randomly
        base = midi_filename.rsplit('/', 1)[-1]
        base = re.sub(r'(.*)%s$' % re.escape('.mid'), r'\1', base)
        audio_files = find_files(directory, base+'*.wav')
        if not audio_files:
            raise ValueError('No files found for \'{}\'.'.format(base+'*.wav'))
        audio_filename = random.choice(audio_files)
        # load audio waveform
        audio, _ = librosa.load(audio_filename, sr=audio_sr, mono=True)
        yield audio, table, indices
Esempio n. 39
0
    def _validate_and_prepare_dir_old(self, app_dir_path, undo_ops):
        """Called by the subclass run() method after setting up the directory.
        """
        results = validate_settings(app_dir_path, self.django_settings_module)
        if results.get_return_code() != SUCCESS_RC:
            results.print_final_status_message(logger)
            return results.get_return_code()
        config = django_config_from_validation_results(self.django_settings_module,
                                                       VERSION, results)
        write_json(config.to_json(), os.path.join(app_dir_path, DJANGO_CFG_FILENAME))

        # undo the changes we made
        for (op_fun, args) in undo_ops:
            op_fun(*args)
        # delete all the .pyc files
        find_files(app_dir_path, "\.pyc$", os.remove)
        # (re)create the archive
        if self.archive_type == "zip":
            archive = ZipfileHandler(self.archive_file, "w")
        else:
            archive = TarfileHandler(self.archive_file, "w:gz")
        archive.create_new_from_dir(app_dir_path)
        archive.close()
        return SUCCESS_RC
Esempio n. 40
0
    def __init__(self,
                 data_dir,
                 coord,
                 audio_sample_rate,
                 receptive_field,
                 velocity,
                 sample_size,
                 queues_size,
                 compress_fac=10):
        self.data_dir = data_dir
        self.audio_sample_rate = audio_sample_rate
        self.compress_fac = compress_fac
        self.coord = coord
        self.receptive_field = receptive_field
        self.velocity = velocity
        self.sample_size = sample_size
        self.threads = []

        # Init queues and placeholders.
        self.queues = {'tune': {}, 'batch': {}}
        self.audio_placeholder = tf.placeholder(dtype=tf.float32, shape=(None,))
        self.table_placeholder = tf.placeholder(dtype=tf.float32, shape=(None,
                                                                         128))
        self.indices_placeholder = tf.placeholder(dtype=tf.int32, shape=(None,))
        self.queues['tune']['Q'] = tf.FIFOQueue(queues_size[0],
                                                ['float32', 'float32', 'int32'])
        self.queues['tune']['enQ'] = self.queues['tune']['Q'].enqueue(
            [self.audio_placeholder,
             self.table_placeholder,
             self.indices_placeholder])
        self.wave_placeholder = tf.placeholder(dtype=tf.float32,
                                               shape=(None, 1))
        self.roll_placeholder = tf.placeholder(dtype=tf.float32,
                                               shape=(None, 128))
        self.queues['batch']['Q'] = tf.PaddingFIFOQueue(
            queues_size[1], ['float32', 'float32'],
            shapes=[(None, 1), (None, 128)])
        self.queues['batch']['enQ'] = self.queues['batch']['Q'].enqueue(
            [self.wave_placeholder, self.roll_placeholder])

        self.file_counter = tf.Variable(0, trainable=True)
        self.increment_file_counter = tf.assign(
            self.file_counter, self.file_counter+1)

        files = find_files(data_dir, '*.mid')
        if not files:
            raise ValueError('No midi files found in \'{}\'.'.format(data_dir))
def postproc_sos():
    if os.name != 'posix':
        return

    print S_PPF, "postproc_sos (strip, chrpath)"

    res = re.compile(
    "^(.*):.*ELF.*(executable|relocatable|shared object).*, not stripped"
    )
    rec = re.compile('.*\.(so$|so\.)')

    # use 'file' command to find all strippable files
    print PPF, "Creating complete file list..."
    all_files, _ = utils.find_files(BDIPaths.dre_dest, '.*')

    print PPF, "Searching for strippable / chrpathable files"
    for f in all_files:
        status, output = utils.get_status_output('%s %s' % (FILE, f))
        mo = re.match(res, output)
        stripped = chrpathed = False
        if mo:
            sfn = mo.groups()[0]
            ret = os.system('%s %s' % (STRIP, sfn))
            if ret != 0:
                print "Error stripping %s." % (sfn,)
            else:
                stripped = True

        # now check if f can be chrpathed
        if re.match(rec, f):
            # remove rpath information
            ret = os.system('%s --delete %s' % (CHRPATH, f))
            if ret != 0:
                print "Error chrpathing %s." % (f,)
            else:
                chrpathed = True

        if stripped or chrpathed:
            actions = [] 
            if stripped:
                actions.append('STRIPPED')
            if chrpathed:
                actions.append('CHRPATHED')
            
            print "%s: %s" % (f, ','.join(actions))
Esempio n. 42
0
def main():
    args = get_arguments()

    log.basicConfig(format='[%(asctime)s] [%(levelname)s] %(message)s',
                    level=LOG_LEVEL)
    log.info("Start of '{}'.".format(__file__))

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
        log.info("Created output directory '{}'.".format(args.output_dir))

    for fname in find_files(args.input_dir, '*.csv', path=False):
        with open(os.path.join(args.input_dir, fname), 'r') as f:
            csv_data = list(csv.reader(f, delimiter='\t'))
        midi = csv2mid(csv_data[1:], args.audio_sr)
        midi.write(os.path.join(args.output_dir, fname.replace('.csv', '.mid')))
        log.info("Processed file '{}'".format(fname))

    log.info("End of '{}'.".format(__file__))
Esempio n. 43
0
def plot_sigs(signaturedirectory, outputdirectory, name, signaturename):
    """

    """
    import os
    from utils import find_files, unique_name
    from core import signatureCollection
    from plotting import SignaturePlot

    if not outputdirectory:
        outputdirectory = signaturedirectory

    sigs = find_files(signaturedirectory, "mean.ref")

    if not sigs:
        click.BadParameter("Did not find any signature files in the specified directory.")

    if signaturename:
        filteredsigs = []
        for searchstring in signaturename:
            for sig in sigs:
                if searchstring.upper() in sig.upper():
                    filteredsigs.append(sig)
        sigs = filteredsigs

    signatures = signatureCollection()

    for sig in sigs:
        try:
            signatures.add(sig)
        except Exception as e:
            print e

        #TODO Fix core temporalSignature to use exceptions so they can be properly handled here

    name, ext = os.path.splitext(name)
    path = unique_name(outputdirectory, name, ext=ext)

    print("Outputting to {0}".format(path))

    plot = SignaturePlot(outputdirectory, os.path.basename(path))
    plot.plot_collection(signatures)
Esempio n. 44
0
 def loop(self,events):
     """Mandatory method.
     This is the main eventloop called by the core 30 times a minute."""
     if self.stopme:
         self.SPG.tellcore_enable_menubuttons()
         self.SPG.tellcore_game_end()
     for event in events:
         if event.type in (MOUSEBUTTONDOWN, MOUSEBUTTONUP,  MOUSEMOTION):
             result = self.actives.update(event)
             if self.button_action:
                 self.button_action = False
                 return
             if event.type == MOUSEBUTTONDOWN:
                 if self.state == 'play':
                     self.stop_slide_show()
                     self.prev_mouse_pos = 0
                     return
                 elif self.state == 'thumbs':
                     if result:
                         self.logger.debug("loop, got a thumb")
                         self.state = 'show'
                         album = result[0][1][0]
                         xml = os.path.join(album, 'text.xml')
                         self.textxml = self.parse_xml(xml)# could return None
                         self.actives.remove(self.thumbslist)
                         self.filelist = [f for f in utils.find_files(album, IMAGE_EXT_PATTERN)]
                         self.filelist.sort()
                         self.show()
                         return
                 elif self.state != 'thumbs':
                     self.show_all_buttons()
                     self.prev_mouse_pos = pygame.mouse.get_pos()[0]
             # gestures
             if event.type == MOUSEBUTTONUP and self.prev_mouse_pos and not self.state == 'thumbs':
                 x = pygame.mouse.get_pos()[0]
                 if self.prev_mouse_pos - x > 20:
                     self.hide_all_buttons()
                     self.next_photo()
                 elif x - self.prev_mouse_pos > 20:
                     self.hide_all_buttons()
                     self.prev_photo()
     return 
def rebase_dlls():
    """Rebase all DLLs in the distdevide tree on Windows.
    """

    if os.name == 'nt':
        print S_PPF, "rebase_dlls"

        # sqlite3.dll cannot be rebased; it even gets corrupted in the
        # process!  see this test:
        # C:\TEMP>rebase -b 0x60000000 -e 0x1000000 sqlite3.dll
        # REBASE: *** RelocateImage failed (sqlite3.dll).  
        # Image may be corrupted

        # get list of pyd / dll files, excluding sqlite3
        # this returns full path names
        so_files, excluded_files = utils.find_files(
                BDIPaths.dre_dest, '.*\.(pyd|dll)', ['sqlite3\.dll'])
        # add newline to each and every filename
        so_files = ['%s\n' % (i,) for i in so_files]

        print "Found %d DLL PYD files..." % (len(so_files),)
        print "Excluded %d files..." % (len(excluded_files),)

        # open file in specfile_dir, write the whole list
        dll_list_fn = os.path.join(
                config.working_dir, 'dll_list.txt')
        dll_list = file(dll_list_fn, 'w')
        dll_list.writelines(so_files)
        dll_list.close()

        # now run rebase on the list
        os.chdir(config.working_dir)
        ret = os.system(
                '%s -b 0x60000000 -e 0x1000000 @dll_list.txt -v' %
                (REBASE,))

        # rebase returns 99 after rebasing, no idea why.
        if ret != 99:
            raise RuntimeError('Could not rebase DLLs.')
Esempio n. 46
0
def extract_signatures(image, shapefiledirectory, startdoy, doyinterval, outputdir, filelabel, plotsigs):
    """
    Extracts temporal signatures for a set of point geometry shapefiles in a specified directory and outputs them to a
    set of .ref files in an output directory.
    """
    import os
    from plotting import SignaturePlot
    from utils import find_files, create_output_dir, unique_name
    from signatureFunctions import get_sigs_in_dir, get_reference_curves

    if outputdir is None:
        outputdir = create_output_dir(os.path.dirname(image), "signatures", usetime=True)

    shapefiles = find_files(shapefiledirectory, ".shp", recursive=False)

    #TODO: Need a method to find only valid shapefiles in the directory

    get_reference_curves(image, shapefiles, startdoy, doyinterval, outdir=outputdir, filepostfix=filelabel)

    if plotsigs:
        path = unique_name(outputdir, "signaturePlot", ext=".pdf")
        sigs = get_sigs_in_dir(outputdir)
        plot = SignaturePlot(outputdir, os.path.basename(path))
        plot.plot_collection(sigs)
Esempio n. 47
0
                  #("Wheat", "wheat_" + extrafilter),
                  #("Wheat/Soy Double Crop", "wwheatsoydbl" + extrafilter),
                 ]
# directory to search for sigs
signaturedirectory = "/Users/phoetrymaster/Documents/School/Geography/Thesis/Data/Pellegrini/MODIS_10-13_6-14_resampled/Summer2014/multidate_image_aqua105/interpolatedband10/signatures_2014-08-24_0935/"

# dir for output
outputdir = "/Users/phoetrymaster/Documents/School/Geography/Thesis/ThesisTeX/plots/"
#outputdir = "/Users/phoetrymaster/Documents/TeX/deletetest/plots"


# plot name
name = "othersigsAR.pgf"

# find sigs in directory and add to sig collection
sigs = find_files(signaturedirectory, "mean.ref")

signatures = signatureCollection()

for sig in sigs:
    try:
        signatures.add(sig)
    except Exception as e:
        print e


# fonts and other formatting settings
pgf_with_rc_fonts = {"pgf.texsystem": "/usr/texbin/xelatex",
                     'text.color': 'black',
                     "font.family": "serif",
                     "font.serif": ["/Library/Fonts/Baskerville.ttc"],
Esempio n. 48
0
def get_stats(data_dir):
    '''Calculates statistics from directory contents.

    Args:
        data_dir: Path to directory containing dataset
        to be analyzed.

    Returns:
        A python dictionary `stats` with keys and values described as follows.

        *   `n_mids`: Number of `mid` files found in `data_dir`.
        *   `n_wavs`: Number of `wav` files found in `data_dir`.
        *   `n_fonts`: Number of soundfonts used for generation, assumed from
            and calculated as proportion of `wav` to `mid` files.
        *   `time_tunes`: Duration [s] of tunes based on midi events, with
            subfields for `total` dataset duration, `avg` tune duration,
            `max` tune duration and `min` tune duration.
        *   `notes`: Average note duration [s] `time_avg` and total count of
            notes in data set `n_total`.
        *   `time_total_notes`: Total duration [s] of individual notes. Vector
            of 128 floats.
        *   `n_total_notes`: Total number of occurrences of individual notes.
            Vector of 128 floats.
        *   `time_avg_notes`: Average durations [s] of individual notes.
        *   `n_avg_notes`: Average counts of individual notes in singe tune.
    '''

    mids = find_files(data_dir, '*.mid')
    wavs = find_files(data_dir, '*.wav')
    if not mids or not wavs:
        raise ValueError("No mid/wav files found "
                         "in '{}'.".format(data_dir))

    stats = OrderedDict()
    stats['n_mids'] = len(mids)
    stats['n_wavs'] = len(wavs)
    stats['n_fonts'] = stats['n_wavs'] / stats['n_mids']

    time_tunes = []

    stats['time_tunes'] = OrderedDict()
    stats['notes'] = OrderedDict()
    stats['n_total_notes'] = np.zeros(128) + MIN_FLOAT
    stats['time_total_notes'] = np.zeros(128)

    polyphony = [0] * 128

    # Count totals by iterating over midi files and their notes.
    for f in mids:
        m = pretty_midi.PrettyMIDI(f)
        polyphony = update_poly_stats(polyphony, m)
        time_tunes.append(m.get_end_time())

        for i in m.instruments:
            if not i.is_drum:
                for n in i.notes:
                    stats['time_total_notes'][n.pitch] += n.end - n.start
                    stats['n_total_notes'][n.pitch] += 1

    # Calculate tune duration stats.
    stats['time_tunes']['total'] = units_format(sum(time_tunes))
    stats['time_tunes']['avg'] = units_format(np.mean(time_tunes))
    stats['time_tunes']['max'] = units_format(max(time_tunes))
    stats['time_tunes']['min'] = units_format(min(time_tunes))

    # Calculate additional stats.
    stats['time_avg_notes'] = stats['time_total_notes'] / \
                              stats['n_total_notes']
    stats['n_total_notes'] = stats['n_total_notes'].astype(int)
    stats['notes']['time_avg'] = np.mean(stats['time_avg_notes'])
    stats['notes']['n_total'] = np.sum(stats['n_total_notes'])
    stats['n_avg_notes'] = stats['n_total_notes'] / stats['n_mids']

    # Save polyphony stats.
    stats['polyphony_degree'] = polyphony

    return stats
Esempio n. 49
0
        answers[posdir[0] + str(posnum)] = answer

    try:
        for number, clue in puzzle.clues.across():
            cluenum = "A" + str(number)
            if cluenum not in answers:
                raise xdfile.IncompletePuzzleParse(xd, "Clue number doesn't match grid: " + cluenum)
            xd.clues.append((("A", number), decode(clue), answers.get(cluenum, "")))

        # xd.append_clue_break()

        for number, clue in puzzle.clues.down():
            cluenum = "D" + str(number)
            if cluenum not in answers:
                raise xdfile.IncompletePuzzleParse(xd, "Clue doesn't match grid: " + cluenum)
            xd.clues.append((("D", number), decode(clue), answers.get(cluenum, "")))
    except KeyError as e:
        raise xdfile.IncompletePuzzleParse(xd, "Clue doesn't match grid: " + str(e))

    return xd

if __name__ == "__main__":
    import sys
    from utils import get_args, find_files

    args = get_args(desc='parse .puz files')
    for fn, contents in find_files(*sys.argv[1:]):
        xd = parse_puz(contents, fn)
        print(xd.to_unicode())

import sys
from baseline import BaselineEvaluator
from utils import find_files

dataset_path = sys.argv[1]
key_files = list(find_files(dataset_path, "*.test.key"))
print len(key_files)
evaluator = BaselineEvaluator(key_files)
print "MFS\t{:.3}".format(evaluator.mfs_baseline())
print "RandomShuffle\t{:.3}".format(evaluator.random_shuffled_baseline())
print "Random\t{:.3}".format(evaluator.random_baseline())
Esempio n. 51
0
    def submit_complete(self, machine, build_url, project, revision, build_type,
                        build_abi, build_platform, build_sdk, builder_type, tests=None):
        """Submit test results for the worker's current job to Treeherder.

        :param machine: machine id
        :param build_url: url to build being tested.
        :param project: repository of build.
        :param revision: Either a URL to the changeset or the revision id.
        :param tests: Lists of tests to be reported.
        """
        logger = utils.getLogger()
        logger.debug('AutophoneTreeherder.submit_complete: %s', tests)

        if not self.url or not revision:
            logger.debug('AutophoneTreeherder.submit_complete: no url/revision')
            return

        tjc = TreeherderJobCollection()

        for t in tests:
            logger.debug('AutophoneTreeherder.submit_complete for %s %s', t.name, project)

            t.end_timestamp = timestamp_now()
            # A usercancelled job may not have a start_timestamp
            # since it may have been cancelled before it started.
            if not t.start_timestamp:
                t.start_timestamp = t.end_timestamp

            tj = self._create_job(tjc, machine, build_url, project, revision,
                                  build_type, build_abi, build_platform,
                                  build_sdk, builder_type, t)
            tj.add_state(TestState.COMPLETED)
            tj.add_result(t.status)
            tj.add_submit_timestamp(t.submit_timestamp)
            tj.add_start_timestamp(t.start_timestamp)
            tj.add_end_timestamp(t.end_timestamp)

            t.job_details.append({
                'value': os.path.basename(t.config_file),
                'title': 'Config'})
            t.job_details.append({
                'url': build_url,
                'value': os.path.basename(build_url),
                'title': 'Build'})
            t.job_details.append({
                'value': utils.host(),
                'title': 'Host'})

            if t.passed + t.failed + t.todo > 0:
                if t.failed == 0:
                    failed = '0'
                else:
                    failed = '<em class="testfail">%s</em>' % t.failed

                t.job_details.append({
                    'value': "%s/%s/%s" % (t.passed, failed, t.todo),
                    'title': "%s-%s" % (t.job_name, t.job_symbol)
                })

            if hasattr(t, 'phonedash_url'):
                t.job_details.append({
                    'url': t.phonedash_url,
                    'value': 'graph',
                    'title': 'phonedash'
                    })

            # Attach log, ANRs, tombstones, etc.

            if self.s3_bucket:
                # We must make certain that S3 keys for uploaded files
                # are unique even in the event of retries. The
                # Treeherder logviewer limits the length of the log
                # url to 255 bytes. If the url length exceeds 255
                # characters it is truncated in the Treeherder
                # logviewer url field even though the file is
                # successfully uploaded to s3 with the full url. The
                # logviewer will fail to parse the log since it
                # attempts to retrieve it from a truncated url.

                # We have been creating unique keys through the use of
                # human readable "log_identifiers" combined with the
                # test's job_guid and base filename to create unique
                # keys for s3. Unfortunately, the choice of the aws
                # host name, a path based on the path to the build,
                # test names and config file names has resulted in
                # overly long urls which exceed 255 bytes. Given that
                # the s3 hostname and build url path currently consume
                # 100 bytes and the test's job-guid and filename
                # consume another 51, we only have a maximum of 104
                # bytes for the log_identifier. The safest course of
                # action is to eliminate the test name, test config
                # filename, the chunk and device name and rely solely
                # on the test's job_guid to provide uniqueness.

                log_identifier = t.job_guid

                key_prefix = os.path.dirname(
                    urlparse.urlparse(build_url).path)
                key_prefix = re.sub('/tmp$', '', key_prefix)

                # Upload directory containing ANRs, tombstones and other items
                # to be uploaded.
                if t.upload_dir:
                    for f in utils.find_files(t.upload_dir):
                        try:
                            lname = os.path.relpath(f, t.upload_dir)
                            try:
                                fname = '%s-%s' % (log_identifier, lname)
                            except UnicodeDecodeError, e:
                                logger.exception('Ignoring artifact %s',
                                                 lname.decode('utf-8',
                                                              errors='replace'))
                                continue
                            url = self.s3_bucket.upload(f, "%s/%s" % (
                                key_prefix, fname))
                            t.job_details.append({
                                'url': url,
                                'value': lname,
                                'title': 'artifact uploaded'})
                        except (S3Error, IOError), e:
                            logger.exception('Error uploading artifact %s', fname)
                            t.job_details.append({
                                'value': 'Failed to upload artifact %s: %s' % (fname, e),
                                'title': 'Error'})

                # Autophone Log
                # Since we are submitting results to Treeherder, we flush
                # the worker's log before uploading the log to
                # Treeherder. When we upload the log, it will contain
                # results for a single test run with possibly an error
                # message from the previous test if the previous log
                # upload failed.
                try:
                    # Emit the final step marker, flush and close the
                    # log prior to uploading.
                    t.worker_subprocess.log_step('Submitting Log')
                    t.worker_subprocess.close_log()
                    fname = '%s-autophone.log' % log_identifier
                    lname = 'Autophone Log'
                    key = "%s/%s" % (key_prefix, fname)
                    url = self.s3_bucket.upload(
                        t.worker_subprocess.logfile, key)
                    # Truncate the log once it has been submitted to S3
                    # but do not close the filehandler as that messes with
                    # the next test's log.
                    t.worker_subprocess.filehandler.stream.truncate(0)
                    t.job_details.append({
                        'url': url,
                        'value': lname,
                        'title': 'artifact uploaded'})
                    tj.add_log_reference('buildbot_text', url,
                                         parse_status='pending')
                except Exception, e:
                    logger.exception('Error %s uploading %s',
                                     e, fname)
                    t.job_details.append({
                        'value': 'Failed to upload Autophone log: %s' % e,
                        'title': 'Error'})
Esempio n. 52
0
def build_multiband_image(rootDIR, outName, newfoldername, find, drivercode, ndvalue, outputdir=None):
    """
    ##Set Args##
    rootdirectory = "/Users/phoetrymaster/Documents/School/Geography/Thesis/Data/MODIS_KANSAS_2012/"
    outputfilename = "test"
    newfoldername = "kansas"
    VItofind = "EVI"
    drivercode = "ENVI"
    nodatavalue = -3000
    #projection = "PROJCS[\"Sinusoidal\",GEOGCS[\"GCS_Undefined\",DATUM[\"D_Undefined\",
                   SPHEROID[\"User_Defined_Spheroid\",6371007.181,0.0]],PRIMEM[\"Greenwich\",0.0],UNIT[\"Degree\",
                   0.017453292519943295]],PROJECTION[\"Sinusoidal\"],PARAMETER[\"False_Easting\",0.0],
                   PARAMETER[\"False_Northing\",0.0],PARAMETER[\"Central_Meridian\",0.0],UNIT[\"Meter\",1.0]]"

    sys.exit(build_multiband_image(rootdirectory, outputfilename, newfoldername, VItofind, drivercode, nodatavalue)
    """

    #TODO docstrings

    if outputdir is None:
        outputdir = rootDIR

    outdir = create_output_dir(outputdir, newfoldername)
    print "\nOutputting files to : {0}".format(outdir)

    print "\nFinding HDF files in directory/subfolders: {0}".format(rootDIR)
    hdfs = find_files(rootDIR, ".hdf")
    print "\tFound {0} files.".format(len(hdfs))

    print "\nGetting images to process of type {0}...".format(find)
    toprocess = []

    for hdf in hdfs:
        sds = get_hdf_subdatasets(hdf)
        for ds in sds:
            if find.upper() in ds[1].upper():
                toprocess.append(ds[0])
                print "\t\t{0}".format(ds[0])

    bands = len(toprocess)
    print "\tFound {0} images of type {1}.".format(bands, find)

    #print "\nGetting output parameters..."
    #rows, cols, datatype, geotransform, projection = open_image(toprocess[0])
    #print "\tParameters: rows: {0}, cols: {1}, datatype: {2}, projection: {3}.".format(rows, cols, datatype, projection)

    outfile = os.path.join(outdir, outName)
    print "\nOutput file is: {0}".format(outfile)

    ## Create output file from first file to process ##
    template = openImage(toprocess[0])
    templateproperties = gdalProperties(template)
    outds = copySchemaToNewImage(templateproperties, outfile, numberofbands=bands, drivername=drivercode)
    template = ""
    del template
    print "\tCreated output file."

    print"\nAdding bands to output file..."
    for i in range(0, bands):
        print "\tProcessing band {0} of {1}...".format(i + 1, bands)
        print toprocess[i]
        image = openImage(toprocess[i])
        band = image.GetRasterBand(1)

        outband = outds.GetRasterBand(i + 1)

        print "\t\tReading band data to array..."
        data = band.ReadAsArray(0, 0, templateproperties.cols, templateproperties.rows)

        print "\t\tWriting band data to output band..."
        outband.WriteArray(data, 0, 0)
        outband.SetNoDataValue(ndvalue)
        outband.FlushCache()

        outband = ""
        del data, outband
        band = ""
        image = ""

    print "\tFinished adding bands to output file."

    outds = ""
    del outds

    print "\nProcess completed."
Esempio n. 53
0
    parser = argparse.ArgumentParser(description='Functional tests runner')

    parser.add_argument('--mc',
                        action='store_true',
                        required=False,
                        default=False,
                        help='model check the result (default: not set)')
    parser.add_argument('-a',
                        '--args',
                        required=False,
                        default='',
                        help='arguments to pass to the tool. '
                             'Enclose the arguments in "".')

    parser.add_argument('-v', '--verbose', action='count', default=0)

    args = parser.parse_args()

    tests_dir = os.path.dirname(os.path.abspath(__file__))
    test_files = find_files(tests_dir + '/aag-files/', extension='aag')

    logger = setup_logging(args.verbose)

    failed_tests = run_tests(test_files,
                             lambda test_file, result_file: run_tool(test_file, result_file, args.args),
                             [check_answer, check_answer_with_mc][args.mc],
                             True,
                             logger)

    exit(len(failed_tests))
Esempio n. 54
0
                  #("Wheat/Soy Double Crop", "wwheatsoydbl_" + extrafilter),
                 ]
# directory to search for sigs
signaturedirectory = "/Users/phoetrymaster/Documents/School/Geography/Thesis/Data/MODIS_KANSAS_2007-2012/reprojected/Refs/2012clip1test2/clip1/"
oldsigdir = "/Users/phoetrymaster/Documents/School/Geography/Thesis/Data/MODIS_KANSAS_2007-2012/reprojected/Refs/2012/clip1/corn_NDVI_mean.ref"

# dir for output
outputdir = "/Users/phoetrymaster/Documents/School/Geography/Thesis/ThesisTeX/plots/"
#outputdir = "/Users/phoetrymaster/Documents/TeX/deletetest/plots"


# plot name
name = "refinedsorghumKS.pgf"

# find sigs in directory and add to sig collection
meansigs = find_files(signaturedirectory, "mean.ref")


meansignatures = signatureCollection()

for sig in meansigs:
    try:
        meansignatures.add(sig)
    except Exception as e:
        print e

pointsigs = find_files(signaturedirectory, "points.ref")

pointsignatures = signatureCollection()

    def install(self):

        # copy dreams dir and relevant driver scripts to inst dir
        if os.path.exists(self.dreams_dest_dir):
            utils.output('DREAMs already in inst_dir, not copying.')
        else:
            shutil.copytree(
                    os.path.join( self.dre_src_dir, 'dreams'),
                    self.dreams_dest_dir)
            utils.output('Copied %s.' % (self.dreams_dest_dir,))

        driver_paths = ((self.drepy_src, self.drepy_dest), (self.dresh_src, self.dresh_dest),
                        (self.dreshpy_src, self.dreshpy_dest))

        for d in driver_paths:
            if os.path.exists(d[1]):
                utils.output('%s already present.' % (d[1],))
            else:
                shutil.copy2(d[0], d[1])
                utils.output('Copied %s.' % (d[1],))
        
        vardict = {'python_binary_path' : config.python_binary_path,
                   'python_library_path' : config.python_library_path,
                   'python_scripts_path' : config.python_scripts_path,
                   'devide_inst_dir' : config.DEVIDE_INST_DIR,
                   'wx_lib_path' : config.WX_LIB_PATH,
                   'vtk_sodir' : config.VTK_SODIR,
                   'vtk_python' : config.VTK_PYTHON,
                   'dcmtk_lib' : config.DCMTK_LIB,
                   'gdcm_lib' : config.GDCM_LIB,
                   'gdcm_python' : config.GDCM_PYTHON,
                   'vtkdevide_lib' : config.VTKDEVIDE_LIB,
                   'vtkdevide_python' : config.VTKDEVIDE_PYTHON,
                   'vtktudoss_lib' : config.VTKTUDOSS_LIB,
                   'vtktudoss_python' : config.VTKTUDOSS_PYTHON,
                   'itk_bin' : config.ITK_BIN,
                   'itk_lib' : config.ITK_DIR,
                   'wrapitk_lib' : config.WRAPITK_LIB,
                   'wrapitk_python' : config.WRAPITK_PYTHON}

        # replace all instances of the installation dir with the
        # variable $MYDIR / %MYDIR%
        vardict2 = {}
        idir = config.inst_dir
        if idir.endswith(os.path.sep):
            idir = idir[:-1]
            
        for k,v in vardict.items():
            vardict2[k] = v.replace(idir, '%(dre_top)s')

        if os.name == 'nt':
            cfg = nt_cfg
        else:
            cfg = posix_cfg

        # let's write out the CFG file
        fname = os.path.join(config.inst_dir, 'dre.cfg')
        cf = file(fname, 'w')
        cfg2 = cfg % vardict2
        cf.write(cfg2)
        cf.close()
        utils.output('Write DRE CFG.')

        # and then we have to fix all of the shebangs that distutils sets as absolute paths!
        if os.name == 'posix':
            pyscripts = utils.find_files(config.python_binary_path, 
                    '.*', exclude_pats=['python$','python[0-9]\.[0-9]$'])[0]
            
            for pyscript in pyscripts:
                utils.re_sub_filter_file([('#!.*', '#!/usr/bin/env python')], pyscript)
import pylab
import numpy as np
import utils
import sys

info_text = 'Please give the folder containing the voltage recordings after the script_name'
info_text += '\n USAGE:\n\t python plot_voltages.py [FOLDER_NAME]' 
assert (len(sys.argv) > 1), info_text
folder_name = sys.argv[1]

filenames = utils.find_files(folder_name, 'exc_volt_')
# since you can not decide on the final name your data files will have in NEST (it will depend on the number of processes, for example)
# you need to browse through the folder to retrieve all relevant files to be plotted
print 'Found filenames:', filenames


fig = pylab.figure()
ax = fig.add_subplot(111)

for fn in filenames:
    path = folder_name 
    d = np.loadtxt(fn)
    gids = np.unique(d[:, 0])
    for gid in gids:
        time_axis, volt_trace = utils.extract_trace(d, gid)
        ax.plot(time_axis, volt_trace)


pylab.show()
Esempio n. 57
0
    rc, out, err = execute_shell(SPEC_FRAMEWORK_DIR + '/check_model.sh ' + tmp_file_name)

    os.remove(tmp_file_name)

    return rc, out, err


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Functional tests runner')

    parser.add_argument('--mc', action='store_true',
                        required=False, default=False,
                        help='model check the result, default: False')

    args = parser.parse_args()

    TEST_FILES = find_files('./tests/safety/', extension='aag', ignore_mark='notest') + \
                 find_files('./tests/buechi/', extension='aag', ignore_mark='notest') + \
                 find_files('./tests/syntcomp-format/', extension='aag', ignore_mark='notest') + \
                 find_files('./tests/1-streett/', extension='aag', ignore_mark='notest')
    RUN_TOOL = run_tool
    CHECK_RESULT = [check_answer, check_answer_with_mc][args.mc]

    logger = setup_logging()
    exit(run_tests(TEST_FILES,
                   RUN_TOOL,
                   CHECK_RESULT,
                   True,
                   logger))