Example #1
0
def import_datasources(path: str, sync: str, recursive: bool) -> None:
    """Import datasources from YAML"""
    from superset.utils import dict_import_export

    sync_array = sync.split(",")
    path_object = Path(path)
    files = []
    if path_object.is_file():
        files.append(path_object)
    elif path_object.exists() and not recursive:
        files.extend(path_object.glob("*.yaml"))
        files.extend(path_object.glob("*.yml"))
    elif path_object.exists() and recursive:
        files.extend(path_object.rglob("*.yaml"))
        files.extend(path_object.rglob("*.yml"))
    for file_ in files:
        logger.info("Importing datasources from file %s", file_)
        try:
            with file_.open() as data_stream:
                dict_import_export.import_from_dict(
                    db.session, yaml.safe_load(data_stream), sync=sync_array)
        except Exception as ex:  # pylint: disable=broad-except
            logger.error("Error when importing datasources from file %s",
                         file_)
            logger.error(ex)
def json2sorted(img_path, train_data_path, test_data_path, ds_flag, req_data):
    """
    sort image according to the json file
    :param img_path:user's data-img dir
    :param train_data_path:user's train data dir
    :param test_data_path:user's test data dir
    :param ds_flag:train/test flag 1 -> train 2 -> test
    :param req_data: args of train request
    :return:a path which is user's data sorted (str)
    """
    json_path = Path(img_path)
    all_json_file = list(json_path.glob('**/*.json'))
    class_image_dict = {}
    for json_file in all_json_file:
        with json_file.open() as f:
            json_string = json.load(f)
        class_name = json_string['classification'][0]['category_name']
        img_file = json_string['images']['file_name']
        if class_name not in class_image_dict:
            class_image_dict[class_name] = [os.path.join(img_path, img_file)]
        else:
            class_image_dict[class_name].append(
                os.path.join(img_path, img_file))
    class_keys = class_image_dict.keys()
    for i in range(len(class_keys)):
        class_image_dict[i] = class_image_dict.pop(class_keys[i])
    if ds_flag == 1:
        make_sorted_dir(class_image_dict, train_data_path)
        move_img_2_sorted_dir(class_image_dict, train_data_path)
        train_task_send(req_data)
    else:
        make_sorted_dir(class_image_dict, test_data_path)
        move_img_2_sorted_dir(class_image_dict, test_data_path)
Example #3
0
def find_files(paths):
    result = []
    basePath = Path('nuxeo-tools-hooks/nxtools/hooks')
    for path in [basePath.glob(path) for path in paths]:
        result += path

    return [str(path.relative_to(basePath)) for path in result if not path.relative_to(basePath).match('tests/**/*')]
Example #4
0
def FileSummary(txt_file_name='All_selected_suffix_files',
                file_suffix='jpg',
                sort=False):
    '''
    将folder文件夹下的特定后缀名file_suffix的文件名都存储在txt_file文件里面
    txt文件中存储目标文件的绝对路径
    '''
    import tkinter as tk
    from tkinter import filedialog
    from pathlib2 import Path, PureWindowsPath
    import numpy as np

    #file_suffix = 'jpg'

    root = tk.Tk()
    folder = Path(
        filedialog.askdirectory(initialdir='D:/',
                                title=file_suffix + 'FOLDER'))
    root.withdraw()

    file_list = [str(file) for file in folder.glob('*.' + file_suffix)]
    if sort == True:
        file_list.sort(key=lambda x: int(x.split('_')[-1].split('.')[0])
                       )  #从1-9-10-11排序 不然就是1-10-11-2这样排序
    txt_file = folder / (txt_file_name + '.txt')
    np.savetxt(txt_file, np.array(file_list), '%s')

    return txt_file
Example #5
0
def patch_files(path=None, replace_list=None):
    """
    Search and replace content from a list of file based on a pattern
    replace_list[
        {
            'filename_pattern': '*.css',
            'content_patterns': [
                {
                    'search': '',
                    'replace': '',
                }
            ]
        }
    ]
    """
    if PY3:
        file_open_mode = 'r+'
    else:
        file_open_mode = 'rb+'

    path_object = Path(path)
    for replace_entry in replace_list or []:
        for path_entry in path_object.glob('**/{}'.format(replace_entry['filename_pattern'])):
            if path_entry.is_file():
                for pattern in replace_entry['content_patterns']:
                    with path_entry.open(mode=file_open_mode) as source_file_object:
                        with tempfile.TemporaryFile(mode=file_open_mode) as temporary_file_object:
                            source_position = 0
                            destination_position = 0

                            while(True):
                                source_file_object.seek(source_position)
                                letter = source_file_object.read(1)

                                if len(letter) == 0:
                                    break
                                else:
                                    if letter == pattern['search'][0]:
                                        text = '{}{}'.format(letter, source_file_object.read(len(pattern['search']) - 1))

                                        temporary_file_object.seek(destination_position)
                                        if text == pattern['search']:
                                            text = pattern['replace']
                                            source_position = source_position + len(pattern['search'])
                                            destination_position = destination_position + len(pattern['replace'])
                                            temporary_file_object.write(text)

                                        else:
                                            source_position = source_position + 1
                                            destination_position = destination_position + 1
                                            temporary_file_object.write(letter)
                                    else:
                                        source_position = source_position + 1
                                        destination_position = destination_position + 1
                                        temporary_file_object.write(letter)

                            source_file_object.seek(0)
                            source_file_object.truncate()
                            temporary_file_object.seek(0)
                            shutil.copyfileobj(fsrc=temporary_file_object, fdst=source_file_object)
Example #6
0
    def test_integration_targets_none(self):
        def read_img(i):
            img = Image.new(mode='RGB',
                            size=(64, 64),
                            color=(i % 255, (i * 2) % 255, (i * 3) % 255))
            return img

        n = 100
        s = 32
        m = 5
        max_n_rows = 5
        n_cols = 10
        de = DatasetExporter(read_img_fn=read_img,
                             img_id_fn=lambda x: str(x),
                             max_output_img_size=(s, s),
                             margins=(m, m),
                             n_cols=n_cols,
                             max_n_rows=max_n_rows)

        indices = [i for i in range(n)]

        with tempfile.TemporaryDirectory() as tmpdir:
            de.export(indices, None, output_folder=tmpdir)
            path = Path(tmpdir)
            out_files = list(path.glob("*.png"))
            self.assertEqual(len(out_files),
                             int(np.ceil(n / (n_cols * max_n_rows))))
            for fp in out_files:
                out_img = Image.open(fp)
                self.assertEqual(out_img.size,
                                 ((s + m) * n_cols, (s + m) * max_n_rows))
Example #7
0
    def get_one_in_batch(self, index, size):
        # seq name
        seq_name, anno = self.annos[index]
        seq_dir = Path(self.data_dir) / seq_name
        imgs_list = sorted(list(seq_dir.glob('*.png')))
        seq = []
        for i, im_path in enumerate(imgs_list):
            im = cv2.imread(str(im_path))
            seq.append(im)

        real_len = len(seq)
        scores = anno['scores']
        state = anno['labels']
        label = np.int32(anno['is_blinked'])
        # Check consistency
        assert real_len == len(scores)
        # Is augment?
        if self.is_augment:
            seq = ulib.aug(seq, color_rng=[0.8, 1.2])

        # Padding and resize to out
        seq = ulib.resize(seq, size)

        # Padding same dims matrix to seq to make sure it can be run by batch
        seq = xutil.pad_to_max_len(seq,
                                   self.max_seq_len,
                                   pad=np.zeros(seq[0].shape, dtype=np.int32))
        scores = xutil.pad_to_max_len(list(scores), self.max_seq_len, pad=0)
        state = xutil.pad_to_max_len(list(state), self.max_seq_len, pad=0)
        return seq, real_len, scores, state, label, seq_name
Example #8
0
def findFiles(basedir, patterns):
    """find files relative to a base directory according to a list of patterns.
    Returns a set of file names."""
    files = set()
    path = Path(basedir)
    for pattern in patterns:
        files.update([m.relative_to(basedir).as_posix() for m in path.glob(pattern)])
    return files
Example #9
0
def get_downloadable_file_path(url):
    file_hash = [i for i in url.split("/") if i != ""][-1]
    downloads_dir = Path(settings.HEV_E["general"]["downloads_dir"])
    try:
        result = list(downloads_dir.glob("*{}*".format(file_hash)))[0]
    except IndexError:
        result = None
    return result
Example #10
0
 def test_copy(self):
     path = Path('resource/monkey')
     flist = path.glob("*")
     for f in flist:  # type: Path
         if f.is_file():
             continue
         else:
             print('dir')
def segment_eval_struct_output(args):
    folder = Path(args.eval_dir)
    for ds in ['icdar', 'iamdb', 'iclef']:
        ev_dir = folder.glob('*%s_*/**/eval' % ds)
        for ev in ev_dir:
            source = ev.parent.parent.stem.split('_')[0]
            target = ev.parent.parent.stem.split('_')[1]
            th = 0.9 if target == 'icdar' else 0.6
            print ('[%s -> %s %2.2f] %s' % (source, target, th, str(ev)))
            calculate_results_folder(str(ev), th)
Example #12
0
def import_datasources(path: str, sync: str, recursive: bool) -> None:
    """Import datasources from YAML"""
    from superset.datasets.commands.importers.v0 import ImportDatasetsCommand

    sync_array = sync.split(",")
    path_object = Path(path)
    files: List[Path] = []
    if path_object.is_file():
        files.append(path_object)
    elif path_object.exists() and not recursive:
        files.extend(path_object.glob("*.yaml"))
        files.extend(path_object.glob("*.yml"))
    elif path_object.exists() and recursive:
        files.extend(path_object.rglob("*.yaml"))
        files.extend(path_object.rglob("*.yml"))
    contents = {path.name: open(path).read() for path in files}
    try:
        ImportDatasetsCommand(contents, sync_array).run()
    except Exception:  # pylint: disable=broad-except
        logger.exception("Error when importing dataset")
def get_pkl_iterator(pkl_path, infinite=False, suffix='pkl'):
    pkl_path = Path(pkl_path)
    c = 0
    while True:
        for pk in pkl_path.glob('*.%s' % suffix):
            mi = pklRick.load(pk.open('rb'))
            c += 1
            yield mi

        assert c > 0, "Iterator from %s has zero meta images" % str(pkl_path)
        if not infinite:
            break
Example #14
0
def import_datasources(path, sync, recursive):
    """Import datasources from YAML"""
    sync_array = sync.split(',')
    p = Path(path)
    files = []
    if p.is_file():
        files.append(p)
    elif p.exists() and not recursive:
        files.extend(p.glob('*.yaml'))
        files.extend(p.glob('*.yml'))
    elif p.exists() and recursive:
        files.extend(p.rglob('*.yaml'))
        files.extend(p.rglob('*.yml'))
    for f in files:
        logging.info('Importing datasources from file %s', f)
        try:
            with f.open() as data_stream:
                dict_import_export.import_from_dict(
                    db.session, yaml.safe_load(data_stream), sync=sync_array)
        except Exception as e:
            logging.error('Error when importing datasources from file %s', f)
            logging.error(e)
Example #15
0
def load_raw_files(werks_dir):
    if werks_dir is None:
        werks_dir = Path(cmk.utils.paths.share_dir) / "werks"
    werks = {}
    for file_name in werks_dir.glob("[0-9]*"):
        werk_id = int(file_name.name)
        try:
            werk = _load_werk(file_name)
            werk["id"] = werk_id
            werks[werk_id] = werk
        except Exception as e:
            raise MKGeneralException(_("Failed to load werk \"%s\": %s") % (werk_id, e))
    return werks
Example #16
0
def import_datasources(path, sync, recursive=False):
    """Import datasources from YAML"""
    sync_array = sync.split(',')
    p = Path(path)
    files = []
    if p.is_file():
        files.append(p)
    elif p.exists() and not recursive:
        files.extend(p.glob('*.yaml'))
        files.extend(p.glob('*.yml'))
    elif p.exists() and recursive:
        files.extend(p.rglob('*.yaml'))
        files.extend(p.rglob('*.yml'))
    for f in files:
        logging.info('Importing datasources from file %s', f)
        try:
            with f.open() as data_stream:
                dict_import_export_util.import_from_dict(
                    db.session,
                    yaml.safe_load(data_stream),
                    sync=sync_array)
        except Exception as e:
            logging.error('Error when importing datasources from file %s', f)
            logging.error(e)
Example #17
0
def load_dir(dirpath: pl.Path) -> Catalog:
    """Load from multiple files.

    This is used to read from a temporary directory where
    multiple processes write to concurrently. The files then
    need to be read back and joined together.

    We don't have to care about sorting here as that is done
    in the final write
    """
    full_catalog: Catalog = {}
    for fpath in dirpath.glob("*.md"):
        partial_catalog = load(fpath)
        full_catalog.update(partial_catalog)

    return full_catalog
Example #18
0
def test_no_exeption(site):
    """
    The execution of a special agent should not lead to an exception
    if the agent is called without any arguments.
    Possible reasons for an exception are e.g. a wrong shebang, import
    errors or a wrong PYTHONPATH.
    """
    special_agent_dir = Path(
        site.root) / 'share' / 'check_mk' / 'agents' / 'special'
    for special_agent_path in special_agent_dir.glob('agent_*'):  # pylint: disable=no-member
        command = [str(special_agent_path)]
        p = site.execute(command,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE,
                         stdin=open(os.devnull))
        stderr = p.communicate()[1]
        assert "Traceback (most recent call last):" not in stderr
Example #19
0
def import_dashboards(path, recursive):
    """Import dashboards from JSON"""
    p = Path(path)
    files = []
    if p.is_file():
        files.append(p)
    elif p.exists() and not recursive:
        files.extend(p.glob("*.json"))
    elif p.exists() and recursive:
        files.extend(p.rglob("*.json"))
    for f in files:
        logging.info("Importing dashboard from file %s", f)
        try:
            with f.open() as data_stream:
                dashboard_import_export.import_dashboards(db.session, data_stream)
        except Exception as e:
            logging.error("Error when importing dashboard from file %s", f)
            logging.error(e)
Example #20
0
    def patch_files(self, path=None, replace_list=None):
        """
        Search and replace content from a list of file based on a pattern
        replace_list[
            {
                'filename_pattern': '*.css',
                'content_patterns': [
                    {
                        'search': '',
                        'replace': '',
                    }
                ]
            }
        ]
        """
        print(_('Patching files... '), end='')

        try:
            sys.stdout.flush()
        except AttributeError:
            pass

        if not path:
            path = self.get_install_path()

        if not replace_list:
            replace_list = self.replace_list

        path_object = Path(path)
        for replace_entry in replace_list or []:
            for path_entry in path_object.glob('**/{}'.format(
                    replace_entry['filename_pattern'])):
                if path_entry.is_file():
                    # PY3
                    # Don't use context processor to allow working on Python 2.7
                    # Update on Mayan EDMS version >= 4.0
                    file_object = fileinput.FileInput(force_text(path_entry),
                                                      inplace=True)
                    for line in file_object:
                        for pattern in replace_entry['content_patterns']:
                            print(line.replace(pattern['search'],
                                               pattern['replace']),
                                  end='')
                    file_object.close()
Example #21
0
def main(data, output):
    data = Path(data).resolve()
    output = Path(output).resolve()
    
    assert data != output, f'postprocess data will replace original data, use another output path'
    
    if not output.exists():
        output.mkdir(parents=True)
    
    predictions = sorted(data.glob('prediction_*.nii.gz'))
    for pred in tqdm(predictions):
        vol_nii = nib.load(str(pred))
        affine = vol_nii.affine
        vol = vol_nii.get_data()
        vol = post_processing(vol)
        vol_nii = nib.Nifti1Image(vol, affine)
        
        vol_nii_filename = output / pred.name
        vol_nii.to_filename(str(vol_nii_filename))
Example #22
0
def import_dashboards(path, recursive=False):
    """Import dashboards from JSON"""
    p = Path(path)
    files = []
    if p.is_file():
        files.append(p)
    elif p.exists() and not recursive:
        files.extend(p.glob('*.json'))
    elif p.exists() and recursive:
        files.extend(p.rglob('*.json'))
    for f in files:
        logging.info('Importing dashboard from file %s', f)
        try:
            with f.open() as data_stream:
                dashboard_import_export.import_dashboards(
                    db.session, data_stream)
        except Exception as e:
            logging.error('Error when importing dashboard from file %s', f)
            logging.error(e)
Example #23
0
def main(args):
    output_dir = Path(__file__).parent.parent / 'output'
    output_dir.mkdir(parents=True, exist_ok=True)

    if args.persist:
        solution_dir = Path(__file__).parent
        persist(args.persist, output_dir, solution_dir)
        return

    # erase previous results
    for f in output_dir.glob('./*.out'):
        f.unlink()

    jobs = [
        delayed(solve)(input_file, output_dir, args.checker)
        for input_file in args.input_files
    ]
    cpus = -1 if args.parallel else 1
    Parallel(n_jobs=cpus)(jobs)
Example #24
0
    def import_dashboards(path: str, recursive: bool, username: str) -> None:
        """Import dashboards from ZIP file"""
        from superset.dashboards.commands.importers.v0 import ImportDashboardsCommand

        path_object = Path(path)
        files: List[Path] = []
        if path_object.is_file():
            files.append(path_object)
        elif path_object.exists() and not recursive:
            files.extend(path_object.glob("*.json"))
        elif path_object.exists() and recursive:
            files.extend(path_object.rglob("*.json"))
        if username is not None:
            g.user = security_manager.find_user(username=username)
        contents = {path.name: open(path).read() for path in files}
        try:
            ImportDashboardsCommand(contents).run()
        except Exception:  # pylint: disable=broad-except
            logger.exception("Error when importing dashboard")
Example #25
0
class DS(Dataset):
    def __init__(self, images, labels):
        self.images = Path(images)
        self.labels = open(labels).readlines()
        self.n_images = len(list(self.images.glob('*.jpg')))

    def __len__(self):
        return self.n_images

    def __getitem__(self, idx):
        'Returns a random batch of len `seq`'
        if idx != 0: idx -= 1
        f1 = f'{self.images}\\frame_{str(idx)}.jpg'
        f2 = f'{self.images}\\frame_{str(idx + 1)}.jpg'
        image1 = Image.open(f1)
        image2 = Image.open(f2)
        img1 = transform(image1)
        img2 = transform(image2)
        x = torch.cat((img1, img2))
        y = float(self.labels[idx + 1].split()[0])
        return x, torch.Tensor([y])
Example #26
0
def import_dashboards(path, recursive, username):
    """Import dashboards from JSON"""
    p = Path(path)
    files = []
    if p.is_file():
        files.append(p)
    elif p.exists() and not recursive:
        files.extend(p.glob('*.json'))
    elif p.exists() and recursive:
        files.extend(p.rglob('*.json'))
    if username is not None:
        g.user = security_manager.find_user(username=username)
    for f in files:
        logging.info('Importing dashboard from file %s', f)
        try:
            with f.open() as data_stream:
                dashboard_import_export.import_dashboards(
                    db.session, data_stream)
        except Exception as e:
            logging.error('Error when importing dashboard from file %s', f)
            logging.error(e)
Example #27
0
def import_dashboards(path: str, recursive: bool, username: str) -> None:
    """Import dashboards from JSON"""
    from superset.utils import dashboard_import_export

    path_object = Path(path)
    files = []
    if path_object.is_file():
        files.append(path_object)
    elif path_object.exists() and not recursive:
        files.extend(path_object.glob("*.json"))
    elif path_object.exists() and recursive:
        files.extend(path_object.rglob("*.json"))
    if username is not None:
        g.user = security_manager.find_user(username=username)
    for file_ in files:
        logger.info("Importing dashboard from file %s", file_)
        try:
            with file_.open() as data_stream:
                dashboard_import_export.import_dashboards(data_stream)
        except Exception as ex:  # pylint: disable=broad-except
            logger.error("Error when importing dashboard from file %s", file_)
            logger.error(ex)
Example #28
0
    def refresh(self):
        card_objs = []
        cc_path = Path(settings.CONTENT_CARDS_PATH, 'content')
        with transaction.atomic(using=self.db):
            self.all().delete()
            cc_files = cc_path.glob('*/*.json')
            for ccf in cc_files:
                path_data = get_data_from_file_path(ccf)
                with ccf.open(encoding='utf-8') as ccfo:
                    data = json.load(ccfo)

                card_objs.append(ContentCard(
                    id=path_data['page_id'],
                    card_name=path_data['card_name'],
                    page_name=path_data['page_name'],
                    locale=path_data['locale'],
                    content=data.pop('html_content', ''),
                    data=data,
                ))
            self.bulk_create(card_objs)

        return len(card_objs)
class FolderLoader(object):
    """ Loads images (resizes if needed) and their names from folder"""

    _supported_image_formats = ['jpg', '.jpg', 'png', 'tif']

    def __init__(self, folder, target_size=None):
        if target_size is not None:
            assert all([isinstance(target_size, (list, tuple)),
                        len(target_size) == 2]), \
                "target size must be list or tuple of the format (x,y)"
            self._target_size = target_size

        self._p = Path(folder)

    def _resize(self, image):
        """Resize the loaded image to a target size"""
        return image_resize(image=image,
                            target_x=self._target_size[0],
                            target_y=self._target_size[1])

    def _load(self, adress):
        """load image"""
        try:
            img = cv2.imread(adress)
        except:
            return None

        maybe_resized_image = self._resize(
            image=img) if self._target_size is not None else img

        return maybe_resized_image

    def generator(self):
        for f in self._p.glob('*.*'):
            if f.is_file() and f.suffix in self._supported_image_formats:
                img = self._load(adress=str(f))
                name = f.stem
                if img is not None:
                    yield img, name
Example #30
0
def list_files(input_dir, input_patt):
    """List all files in a directory with a specified extension.

    Parameters
        input_dir: string
            Full path of the directory of which the files are to be listed.
        input_patt: string or list of strings
            Extension(s) of the files to be listed.
    """
    input_dir = Path(input_dir)
    files_list = []

    if isinstance(input_patt, str):
        patterns = ['**/*' + input_patt]

    elif isinstance(input_patt, list):
        patterns = ['**/*' + i for i in input_patt]

    for patt in patterns:
        files_list.extend(input_dir.glob(pattern=patt))

    return ([str(i) for i in files_list])
Example #31
0
def list_files(parent_dir, ext):
    """List all files in a directory with a specified extension.

    Parameters
        parent_dir: string
            Full path of the directory of which the files are to be listed.
        ext: string or list of strings
            Extension(s) of the files to be listed.
    """
    parent_dir = Path(parent_dir)
    files_list = []

    if isinstance(ext, str):
        patterns = ['**/*' + ext]

    elif isinstance(ext, list):
        patterns = ['**/*' + i for i in ext]

    for patt in patterns:
        files_list.extend(parent_dir.glob(pattern=patt))

    return (files_list)
Example #32
0
    def compare_dirs(self, path_a, path_b, skip_missing=False, timestep=None):
        """
        :param path_a
        :param path_b
        :param skip_missing (bool, default False). If True, ignore files that are in path_a and not in path_b
        :param timestep (default None). If passed, comparison happens only at the defined timestep(s)
        :type timestep int, datetime.datetime, list[datetime.datetime]
        """
        if timestep and isinstance(timestep, datetime.datetime):
            timestep = [timestep]
        if timestep and not isinstance(timestep,
                                       (int, datetime.datetime, Iterable)):
            raise ValueError(
                'timestep must be of type int for TSS, datetime.datetime or a range of dates for netCDF, but type {} was found'
                .format(str(type(timestep))))
        logger.info(
            'Comparing %s and %s %s [skip missing: %s]', path_a, path_b,
            '(from %s to %s)' % (min(timestep), max(timestep))
            if timestep and isinstance(timestep, Iterable) else timestep or '',
            skip_missing)
        path_a = Path(path_a)
        path_b = Path(path_b)
        for fa in itertools.chain(*(path_a.glob(e) for e in self.glob_expr)):
            fb = path_b.joinpath(fa.name)
            if not fb.exists():
                if skip_missing:
                    logger.info('skipping %s as it is not in %s', fb.name,
                                path_b.as_posix())
                    continue
                else:
                    message = '{} is missing in {}'.format(
                        fb.name, path_b.as_posix())
                    if self.for_testing:
                        assert False, message
                    else:
                        self.errors.append(message)
                        continue

            self.compare_files(fa.as_posix(), fb.as_posix(), timestep)
Example #33
0
def gettags(paths):
    for fpath in paths:
        p = Path(fpath)
        for filepath in list(p.glob('**/*.tag')):
            filepath = str(filepath)
            if os.path.isfile(filepath):
                print("[ INFO ] tag file path:", filepath)
                paths.append(filepath)
    labels = []
    for tagfile in paths:
        config = configparser.ConfigParser()
        defaultencoding = locale.getpreferredencoding(False)
        try:
            config.read(tagfile, encoding=defaultencoding)
            # print('gbk read')
        except:
            config.read(tagfile, encoding='utf-8')
            # print('utf8 read')
        # if ostype == 'win32':
        #     config.read(tagfile, encoding='utf-8')
        # else:
        #     config.read(tagfile, encoding='utf-8')
        for i in config.sections():
            label = {}
            print("[ INFO ] Label:", i)
            label["Label"] = i
            tags = []
            for j in config.items(i):
                tagitem = {}
                tagitem["Key"] = j[0]
                tagitem["Value"] = j[1]
                print("[ INFO ] tag: ", j[0] + " = " + j[1])
                tags.append(tagitem)
            label["Tags"] = tags
            labels.append(label)
    # print labels
    print("[ INFO ] Json Dump data is :", json.dumps(labels))
    return json.dumps(labels)
Example #34
0
    def refresh(self):
        card_objs = []
        cc_path = Path(settings.CONTENT_CARDS_PATH, 'content')
        with transaction.atomic(using=self.db):
            self.all().delete()
            cc_files = cc_path.glob('*/*.json')
            for ccf in cc_files:
                path_data = get_data_from_file_path(ccf)
                with ccf.open(encoding='utf-8') as ccfo:
                    data = json.load(ccfo)

                card_objs.append(
                    ContentCard(
                        id=path_data['page_id'],
                        card_name=path_data['card_name'],
                        page_name=path_data['page_name'],
                        locale=path_data['locale'],
                        content=data.pop('html_content', ''),
                        data=data,
                    ))
            self.bulk_create(card_objs)

        return len(card_objs)
def Deepzoom(image_or_directory_path,create_static_cache=False,**kwargs):
    """
    Returns a Deepzoom interface corresponding to the given image. Can accept
    either a filepath (and will read tiles on the fly) or a directory path (that
    contains an image with the same name as the directory) that contains/will
    contain a static DeepZoom image directory.
    :param image_or_directory_path: String or Pathlib object
    :param create_static_cache: If True, creates a static DeepZoom image
     directory directory structure *around* the given image (or in the given
     directory). This is done lazily, saving each tile as it's requested.
    :param kwargs: Same as DeepzoomInterface
    :return: DeepZoom
    """
    p = Path(image_or_directory_path).resolve()
    img = None
    if p.is_file():
        img = _ImageFactory(p)
    elif p.is_dir():
        fList = list(p.glob('%s.*'%p.name))
        if len(fList)==0:
            raise IOError('Invalid Deepzoom directory (%s). '
                          'Must contain and image named (%s) to be valid.'
                          ''%(p,'%s.<EXT>'%p.name))
        for f in fList:
            try: img = _ImageFactory(f)
            except IOError: pass

    if img is None:
        raise IOError('Invalid Deepzoom target (%s). '
                      'Not a supported image format.'%(p))

    if create_static_cache:
        # do something to DeepZoomGenerator so that it saves on get_tile()
        dzGen = _CachedInterface(img,**kwargs)
    else:
        dzGen = _DeepzoomInterface(img,**kwargs)
    return dzGen