def __init__(self,
                 sampling,
                 type):
        super().__init__(sampling=sampling)
        if type==0:
            self.path = os.path.join(cmu_data,'train')
        elif type ==1:
            self.path = os.path.join(cmu_data,'test')
        else:
            self.path = os.path.join(cmu_data, 'val')

        file_paths, file_names = get_files(self.path,"mat")
        self.index_file =[]
        self.index_number =[]
        self.total_length =0
        self._logger.info("start")
        data_used = 10 ** 7
        self._logger.info("Remember amount of data is: %s" % data_used)
        for i in file_paths:
            if "info" in i:
                data = sio.loadmat(i)
                assert 'pose' in data.keys() and 'shape' in data.keys()
                _, N =data['pose'].shape
                self.total_length += N
                self.index_file.append(i)
                self.index_number.append(N)
                if len(self.index_number)>1:
                    self.index_number[-1] = self.index_number[-1]+self.index_number[-2]
                if self.total_length >= data_used:
                    break
        self._logger.info("dataset size %s" % self.total_length)
Exemplo n.º 2
0
def read_metafile(path: str):
    csv_files = get_files(path, extension='.csv')
    assert len(csv_files
               ) == 1, f'Expected a single csv file, found: {len(csv_files)} '
    text_dict = {}
    with open(csv_files[0], encoding='utf-8') as f:
        for line in f:
            split = line.split('|')
            text_dict[split[0]] = split[-1]
    return text_dict
Exemplo n.º 3
0
def train_styles():
    style_images = get_files(settings.DIRS['STYLES'],
                             string=False,
                             recursive=False,
                             file_type='.png')
    test_images = get_files(settings.DIRS['TEST_IMAGES'], string=False)
    for style_image in style_images:
        print('training', style_image.name)
        save_model_dir = train_style(style_image)
        model = next(Path(save_model_dir).rglob('*.model'))
        test_dir = os.path.join(save_model_dir, 'test')
        os.mkdir(test_dir)
        for test_image in test_images:
            output_image = os.path.join(test_dir, test_image.name)
            script = 'examples/fast_neural_style/neural_style/neural_style.py'
            cmd = 'python "%s" eval --content-image "%s" --model "%s" --output-image "%s" --cuda 1' \
                  % (script, str(test_image), str(model), str(output_image))
            print('cmd', cmd)
            os.system(cmd)
Exemplo n.º 4
0
    def create_index_file(self, contents, content_lists):
        """
        creates nested dictionary from function above self.index_file[s][act][subact][ca][fno]=path
        :param content: one of: 's', 'act', 'subact' ,'ca', 'fno'
        :param content_list: list of contents
        :param sampling: sampling of fno
        """

        self._logger.info('Indexing dataset...')
        self.all_metadata = {}
        if self.index_as_dict:
            self.index_file = {}
        else:
            self.index_file = []
        # get list of sequences
        names, paths = get_sub_dirs(self.h_36m_loc)
        for name, path in zip(names, paths):
            # check data to load
            breaking = False
            for i, content in enumerate(contents):
                if self.get_content(name, content) not in content_lists[i]:
                    breaking = True
                    continue
            if breaking:
                continue
            s, act, subact, ca = self.get_all_content_file_name(name,
                                                                file=False)
            self.append_metadata(s, act, subact, ca)
            _, file_names = get_files(path, 'jpg')
            for name in file_names:  # add only sequences sampled
                s, act, subact, ca, fno = self.get_all_content_file_name(
                    name, file=True)
                if not self.get_intermediate_frames:
                    #CHANGED FOR LACK OF DATA
                    if (
                            fno - 1
                    ) % self.sampling != 0 and self.sampling != 1:  # starts from 1
                        continue
                else:
                    # CHANGED FOR LACK OF DATA
                    if fno % self.sampling != 1:  # starts from 1
                        continue
                if self.index_as_dict:
                    self.index_file=\
                        self.append_index_to_dic(self.index_file, s, act, subact, ca, fno)
                else:
                    self.index_file=\
                        self.append_index_to_list(self.index_file, s, act, subact, ca, fno)
Exemplo n.º 5
0
    def _index_dir(self, path):
        """Recursively add paths to the set of
        indexed files

        Arguments:
            path {str} -- folder path

        Returns:
            dict -- indexed files per root dir
        """

        indexed_paths = dict()
        sub_dirs, _ = io.get_subdirs(path)
        if set(self.ROOT_DIRS) <= set(sub_dirs):

            # get files from subdirs
            n_frames = -1

            # let's extract the rgba and json data per frame
            for sub_dir in self.ROOT_DIRS:
                d_path = os.path.join(path, sub_dir)
                _, paths = io.get_files(d_path)

                if n_frames < 0:
                    n_frames = len(paths)
                else:
                    if len(paths) != n_frames:
                        self.logger.error(
                            'Frames info in {} not matching other passes'.
                            format(d_path))

                encoded = [p.encode('utf8') for p in paths]
                indexed_paths.update({sub_dir: encoded})

            return indexed_paths

        # initialize indexed_paths
        for sub_dir in self.ROOT_DIRS:
            indexed_paths.update({sub_dir: []})

        # check subdirs of path and merge info
        for sub_dir in sub_dirs:
            indexed = self._index_dir(os.path.join(path, sub_dir))

            for r_dir in self.ROOT_DIRS:
                indexed_paths[r_dir].extend(indexed[r_dir])

        return indexed_paths
Exemplo n.º 6
0
def restructrure_dirs():
    style_images = get_files(settings.DIRS['STYLES'],
                             string=False,
                             recursive=False,
                             file_type='.png')
    models = sorted(f
                    for f in Path(settings.DIRS['STYLES']).glob('*/*/*.model'))
    for style_image, style_model in zip(style_images, models):
        name = style_image.name.split('.')[0]
        if name not in str(style_model):
            raise Exception('Please reconsider')
        name = style_image.name.split('.')[0]
        new_folder = os.path.join(settings.DIRS['TRAINED_STYLES'], name)
        os.mkdir(new_folder)
        copyfile(str(style_image), os.path.join(new_folder, name + '.png'))
        copyfile(str(style_model), os.path.join(new_folder, name + '.model'))
Exemplo n.º 7
0
 def __init__(self, wav):
     self.wav = wav
     self.states = {}
     self.frame_pattern = 'frame_%05d.png'
     self.stylist = Stylist()
     self.fractals = get_files(
         os.path.join(settings.DIRS['FRACTALS'], 'other'))
     self.photos = get_files(os.path.join(settings.DIRS['PHOTOS'], 'athina')) + \
                   get_files(os.path.join(settings.DIRS['PHOTOS'], 'athina1')) + \
                   get_files(os.path.join(settings.DIRS['PHOTOS'], 'athina_laptop')) + \
                   get_files(os.path.join(settings.DIRS['PHOTOS'], 'pictures'))
     self.stab_pictures = get_files(
         os.path.join(settings.DIRS['FRACTALS'], 'stabs'))
     self.timelapse_index = np.random.randint(0, len(self.photos))
     self.name = self.wav.replace('.wav', '')
     self.transients, self.energy, self.stabs = None, None, None
     self.wip_path, self.wav_path, self.frames_path, self.output_path = None, None, None, None
Exemplo n.º 8
0
    def _index_sub_dir(self, path) -> list:
        """Index given directory

        This is specific to each dataset; each has a different
        structure.

        Args:
            path (str): directory path

        Returns:
            list: indexed file paths ([file_path, internal_index])
        """

        # mocap data is in a leaf folder
        _, dirs = io.get_sub_dirs(path)
        if not dirs:

            # relative path
            r_path = io.make_relative(path, self.path)

            # h36m has one mat file for all images
            data = '{}/h36m_meta.mat'.format(r_path).encode('utf8')
            _, files = io.get_files(path, 'jpg')

            list_data = []
            for i in range(0, len(files), self.sampling):
                list_data.append([data, i])
            return list_data

        # collect from sub-dirs
        indexed = []

        for d in dirs:
            data = self._index_sub_dir(d)
            indexed.extend(data)

        return indexed
Exemplo n.º 9
0
    parser.add_argument(
        '--path',
        '-p',
        help='Point to the data path, expects LJSpeech-like folder.')
    parser.add_argument('--config',
                        '-c',
                        help='Point to the config.',
                        default='config.yaml')
    args = parser.parse_args()
    cfg = Config.load(args.config)

    audio = Audio(cfg)
    paths = Paths()
    preprocessor = Preprocessor(audio, paths.mel)

    files = get_files(args.path)
    n_workers = min(cpu_count() - 1, cfg.n_workers)
    pool = Pool(processes=n_workers)
    map_func = pool.imap_unordered(preprocessor.process_wav, files)
    dataset = []

    text_dict = read_metafile(args.path)
    display_params([
        ('Num Train', len(files) - cfg.n_val),
        ('Num Val', cfg.n_val),
        ('Num Mels', cfg.n_mels),
        ('Win Length', cfg.win_length),
        ('Hop Length', cfg.hop_length),
        ('Min Frequency', cfg.fmin),
        ('Sample Rate', cfg.sample_rate),
        ('CPU Usage', f'{n_workers}/{cpu_count()}'),
Exemplo n.º 10
0

# class StillMachine:
#     def __init__(self):
#         self.stylist = Stylist()
#         self.stills = get_files(settings.DIRS['STILLS'], string=False)
#         self.wip_path = os.path.join(settings.DIRS['WIP'], 'stills')
#         # os.mkdir(self.wip_path)
#
#     def run(self):
#         for still in self.stills:
#             for style_index in self.stylist.style_indexes:
#                 name = ''.join(still.name.split('.')[:-1]) + '_%s.png' % style_index
#                 output_image = os.path.join(self.wip_path, name)
#                 self.stylist.apply_style(
#                     style_index=style_index,
#                     content_image=str(still),
#                     output_image=output_image,
#                 )
#
#
# still_machine = StillMachine()
# still_machine.run()

# state_machine = StateMachine(wav='Decadent.wav')
# state_machine.run()

for wav in get_files('wip', string=False, file_type='.wav'):
    state_machine = StateMachine(wav=wav.name)
    state_machine.run()
Exemplo n.º 11
0
def file_list():
    print("/files")
    abort_if_not_authorized()
    template = Template(filename='views/files.html')
    return template.render(files=get_files())
Exemplo n.º 12
0
import settings
from utils.image import convert
from utils.io import get_files

images = get_files(settings.DIRS['SEED'])
for image in images:
    convert(image, png=True)

fractals = get_files(settings.DIRS['FRACTALS'])
for fractal in fractals:
    convert(fractal, super_res=True)

images = get_files(settings.DIRS['SEED'])
for image in images:
    convert(image, size=settings.VIDEO['SIZE'], scale=True)

# bef 82783 items, totalling 13,5 GB
# aft 82783 items, totalling 1,2 GB
images = get_files(settings.DIRS['DATASET'])
for image in images:
    convert(image, size=settings.VIDEO['DATASET_SIZE'], scale=True)

styles = get_files(settings.DIRS['STYLES'])
for style in styles:
    convert(style, super_res=True, png=True, scale=True)