예제 #1
0
파일: setup.py 프로젝트: jmartign/clufter
 def _pkg_prepare_install(self):
     build_lib = self.get_finalized_command('build_binary').build_lib
     # two-staged copy in case of built_files
     for filedef in self.built_files:
         src = src_orig = self.pkg_params[filedef['src']]
         src = path_basename(src_orig)
         assert src == src_orig, "built_files contains dirs"
         src = path_join(build_lib, src)
         dst = self.pkg_params[filedef['dst']]
         dst = path_join(build_lib, path_basename(dst))
         if src != dst:
             self._pkg_prepare_file(src, dst)
         self.pkg_params[filedef['src']] = dst
     icmd = self.distribution.get_command_obj('install', create=False)
     for filedef in (self.data_files + self.built_files):
         src = self.pkg_params[filedef['src']]
         dst = self.pkg_params[filedef['dst']]
         no_glob = all(c not in path_basename(src) for c in '?*')
         if dst.startswith(sys_prefix):
             dst = path_join(icmd.install_base,
                             dst[len(sys_prefix) + 1:])
         self.distribution.data_files.append((path_dirname(dst), [
             path_join(path_dirname(src), path_basename(dst)),
         ]) if no_glob else (dst, glob(src)))
         if DEBUG:
             print(
                 DBGPFX + "\tinstall data_files: %s" %
                 self.distribution.data_files)
예제 #2
0
파일: setup.py 프로젝트: jnpkrn/clufter
 def _pkg_prepare_install(self):
     build_lib = self.get_finalized_command('build_binary').build_lib
     # two-staged copy in case of built_files
     for filedef in self.built_files:
         src = src_orig = self.pkg_params[filedef['src']]
         src = path_basename(src_orig)
         assert src == src_orig, "built_files contains dirs"
         src = path_join(build_lib, src)
         dst = self.pkg_params[filedef['dst']]
         dst = path_join(build_lib, path_basename(dst))
         if src != dst:
             self._pkg_prepare_file(src, dst)
         self.pkg_params[filedef['src']] = dst
     icmd = self.distribution.get_command_obj('install', create=False)
     for filedef in (self.data_files + self.built_files):
         src = self.pkg_params[filedef['src']]
         dst = self.pkg_params[filedef['dst']]
         no_glob = all(c not in path_basename(src) for c in '?*')
         if dst.startswith(sys_prefix):
             dst = path_join(icmd.install_base, dst[len(sys_prefix)+1:])
         self.distribution.data_files.append((
             path_dirname(dst), [
                 path_join(
                     path_dirname(src),
                     path_basename(dst)
                 ),
             ]
         ) if no_glob else (
             dst,
             glob(src)
         ))
         if DEBUG:
             print(DBGPFX + "\tinstall data_files: %s"
                   % self.distribution.data_files)
예제 #3
0
def _win_xbuild_path_x86():
    if not is_windows():
        return None

    try:
        with win_open_reg_key_hklm(r'SOFTWARE\WOW6432Node\Mono') as key:

            values = win_values_dict_reg_key(key)
            install_root = values.get('SdkInstallRoot', None)
            if install_root is None: return None

            dirs = path_join(install_root.strip(), 'lib', 'mono', 'xbuild',
                             '*') + path_sep
            results = []

            for dir in (i for i in glob(dirs) if MATCH_2VER_REGEX.match(
                    path_basename(i.rstrip(path_sep)))):
                bin_dir = path_join(dir, 'bin')
                if path_isdir(bin_dir):
                    results += parse_xbuild_ver_output(path_join(
                        dir, 'bin', 'xbuild.exe'),
                                                       arch=ARCH32)

            return results
    except OSError:
        return None
예제 #4
0
def test_all_imports_pyx():
   """ Tests: test_all_imports_pyx: for rebuild, syntax correctness and internal imports
   """
   print('::: TEST: test_all_imports_pyx()')
   remove_files = []
   remove_dirs = []
   all_modules_path = []
   for root, dirnames, filenames in walk(ROOT_PACKAGE_PATH):
      all_modules_path.extend(glob(root + '/*.pyx'))
   for pyx_module_file_path in all_modules_path:
      module_filename = path_basename(pyx_module_file_path)
      module_filename_no_ext = path_splitext(module_filename)[0]

      cython_extension_module_path, cython_module_c_file_path, cython_build_dir_path = build_cython_extension(
         pyx_module_file_path,
         cython_force_rebuild=True
      )

      so_loader = ExtensionFileLoader(module_filename_no_ext, cython_extension_module_path)
      so_loader.load_module(module_filename_no_ext)
      # add for cleanup
      remove_files.append(cython_module_c_file_path)
      remove_dirs.append(cython_build_dir_path)

   # Cleanup
   try:
      for file_ in remove_files:
         if path_exists(file_):
            os_remove(file_)
      for dir_ in remove_dirs:
         if path_exists(dir_):
            rmtree(dir_)
   except Exception as err:
      raise Exception('test_all_imports_pyx', 'Could not cython_clean_up: Exception: <{}>'.format(err))
예제 #5
0
파일: app.py 프로젝트: Gallardot/blog
    def _find_handlers(self):
        handlers = []

        # handlers directory
        paths = [BASE_PATH] + \
                [p for (b, p) in subdirs(BASE_PATH) if b not in IGNORE_PATHS]

        # search & load handler
        for path in paths:
            pys = [f.split(".")[0] for f in listdir(path) if f.endswith(".py") and "__init__" not in f]
            package_prefix = path_basename(path) + "." if path != BASE_PATH else ""

            for py in pys:
                module = __import__(package_prefix + py)

                for n, o in module.__dict__.items():
                    if isclass(o) and issubclass(o, BaseHandler) and hasattr(o, "url"):
                        # support multi urls
                        urls = o.url if hasattr(o.url, "__iter__") else [o.url]
                        for url in urls: handlers.append((url, o))

                        log_info("Load {0} from module {1} ...".format(o.__name__, o.__module__))

        # support sorted order
        handlers.sort(cmp, lambda x: x[1].order)
        return handlers
예제 #6
0
def standard_output_version(version, dependencies, output_file=None):
    main_module_name = path_basename(sys.argv[0])
    version_string = None
    if dependencies:
        deps = { }

        def get_dependencies_set(this_module_name, deps_list):
            for module_name in deps_list:
                if module_name not in deps:
                    m = None
                    try:
                        m = __import__(module_name, globals(), locals(),
                                       ['__version__', '__dependencies__'])
                    except ImportError:
                        print "Failed to import %s, listed in dependencies " \
                            "for %s" % (module_name, this_module_name)
                        exit(1)
                    else:
                        # Test is the module actually has a version attribute
                        try:
                            version_ = m.__version__
                        except AttributeError as e:
                            print 'No __version__ attribute for tool %s' \
                                % m.__name__
                            print ' >> %s' % str(e)
                        else:
                            deps[module_name] = m

                    if m is not None:
                        try:
                            get_dependencies_set(module_name,
                                                 m.__dependencies__)
                        except AttributeError:
                            pass

        get_dependencies_set(main_module_name, dependencies)

        module_names = deps.keys()
        module_names.sort()

        module_list = ', '.join(['%s %s' % (deps[m].__name__, deps[m].__version__) for m in module_names])
        version_string = '%s %s (%s)' % (main_module_name, version, module_list)
    else:
        version_string = '%s %s' % (main_module_name, version)

    # If we are given an output file, write the versions info there if
    # either:
    #   the file doesn't exist already, or
    #   the file contains different data
    # If we are given no output file, just write to stdout.

    print version_string
    if output_file is not None:
        if path_exists(output_file):
            with open(output_file, "rb") as f:
                old_version = f.read()
            if old_version == version_string:
                return
        with open(output_file, "wb") as f:
            f.write(version_string)
예제 #7
0
def versions_from_parentdir(_parentdir_prefix, root, verbose=False):
   # Source tarballs conventionally unpack into a directory that includes both the project name and a version string.
   dirname = path_basename(root)
   if not dirname.startswith(_parentdir_prefix):
      if verbose:
         print('guessing rootdir is <{}>, but <{}> does not start with prefix <{}>'.format(root, dirname, _parentdir_prefix))
      return None
   return {'version': dirname[len(_parentdir_prefix):], 'full': ''}
예제 #8
0
def versions_from_parentdir(parentdir_prefix_, root, verbose=False):
   # Source tarballs conventionally unpack into a directory that includes both the project name and a version string.
   dirname = path_basename(root)
   if not dirname.startswith(parentdir_prefix_):
      if verbose:
         print('guessing rootdir is <{}>, but <{}> does not start with prefix <{}>'.format(root, dirname, parentdir_prefix_))
      return None
   return {'version': dirname[len(parentdir_prefix_):], 'full': ''}
예제 #9
0
파일: setup.py 프로젝트: jnpkrn/clufter
 def _pkg_prepare_build(self):
     package_data = self.package_data or {}
     for pkg_name in package_data:
         dst_top = self.distribution.package_dir.get('', '')
         dst_pkg = path_join(
                       dst_top,
                       self.distribution.package_dir.get(pkg_name, pkg_name)
         )
         if DEBUG: print(DBGPFX + "\tbuild dst_pkg %s" % dst_pkg)
         for filedef in package_data[pkg_name]:
             self._pkg_prepare_file(
                 self.pkg_params[filedef['src']],
                 path_join(dst_pkg, self.pkg_params[filedef['dst']]),
                 filedef.get('substitute', False)
             )
             self.distribution.package_data[pkg_name].append(
                 self.pkg_params[filedef['dst']]
             )
     for filedef in (self.data_files + self.buildonly_files):
         src = self.pkg_params[filedef['src']]
         src_basename = path_basename(src)
         dst_basename = path_basename(self.pkg_params[filedef['dst']])
         substitute = filedef.get('substitute', False)
         if all(c not in src_basename for c in '?*'):
             if src_basename != dst_basename or substitute:
                 self._pkg_prepare_file(
                     self.pkg_params[filedef['src']],
                     path_join(
                         path_dirname(self.pkg_params[filedef['src']]),
                         dst_basename
                     ),
                     substitute
                 )
             # eliminate sources from which we prepared files so they
             # will not end up at build dir and, in turn, installed;
             # consider only one-level of package at maximum
             hd, tl = (lambda s, r='': (s, r))(*src.split(sep, 1))
             if not tl:
                 hd, tl = '', filedef['src']
             try:
                 self.distribution.package_data.get(hd, []).remove(tl)
             except ValueError:
                 pass
         else:
             assert not substitute
예제 #10
0
파일: cozyfs.py 프로젝트: petergtz/cozy
    def update_node(self, node, old_path):
        basename = path_basename(node.path)
        if self.__has_current_node(node.node_id):
            self.storage.db_execute("update Nodes set nodename = ?, parent_node_id = ? where node_id = ? and backup_id = ? and version = ?", (basename, node.parent_node_id, node.node_id, self.backup_id, self.versions[0]))
        else:
            self.storage.db_execute("insert into Nodes (backup_id, version, node_id, nodename, parent_node_id, inode_number) values (?,?,?,?,?,?) ", (self.backup_id, self.versions[0], node.node_id, basename, node.parent_node_id, node.inode_number))

        del self.cached_nodes[old_path]
        if node.path != '':
            self.cached_nodes[node.path] = node
예제 #11
0
파일: utils.py 프로젝트: Python3pkg/LCONF
def build_cython_extension(py_or_pyx_file_path, cython_force_rebuild=True):
    """ Build a cython extension from a `.py` or `.pyx` file

   - build will be done in a sub-folder named `_pyxbld` in the py_or_pyx_file_path

   :param py_or_pyx_file_path: (str) path to a `.py` or `.pyx` file
   :param cython_force_rebuild: (bool) If True the cython extension is rebuild even if it was already build
   :return: (tuple) cython_extension_module_path, cython_module_c_file_path, cython_build_dir_path
   """
    module_dir = path_dirname(py_or_pyx_file_path)
    module__cython_name = path_splitext(path_basename(py_or_pyx_file_path))[0]
    cython_module_c_file_path = path_join(module_dir,
                                          module__cython_name + '.c')
    cython_build_dir_path = path_join(module_dir, '_pyxbld')

    args = ['--quiet', 'build_ext', '--build-lib', module_dir]
    if cython_force_rebuild:
        args.append('--force')
    dist = Distribution({'script_name': None, 'script_args': args})
    dist.ext_modules = [
        Extension(name=module__cython_name, sources=[py_or_pyx_file_path])
    ]
    dist.cmdclass = {'build_ext': cython_build_ext}
    build = dist.get_command_obj('build')
    build.build_base = cython_build_dir_path

    try:
        dist.parse_command_line()
    except DistutilsArgError as err:
        raise Err('utils.build_cython_extension', [
            'py_or_pyx_file_path: <{}>'.format(py_or_pyx_file_path),
            '  DistutilsArgError: <{}>'.format(err)
        ])

    try:
        obj_build_ext = dist.get_command_obj('build_ext')
        dist.run_commands()
        cython_extension_module_path = obj_build_ext.get_outputs()[0]
        if path_dirname(py_or_pyx_file_path) != module_dir:
            raise Err('utils.build_cython_extension', [
                'py_or_pyx_file_path: <{}>'.format(py_or_pyx_file_path),
                '  <module_dir> differs from final <cython_module_dir>',
                '   module_dir: <{}>'.format(module_dir),
                '   cython_module_dir: <{}>'.format(
                    path_dirname(py_or_pyx_file_path))
            ])
    except Exception as err:
        raise Err('utils.build_cython_extension', [
            'py_or_pyx_file_path: <{}>'.format(py_or_pyx_file_path),
            '  Exception: <{}>'.format(err)
        ])

    return cython_extension_module_path, cython_module_c_file_path, cython_build_dir_path
예제 #12
0
def test_all_imports_py():
   """ Tests: test_all_imports_py: for syntax correctness and internal imports
   """
   print('::: TEST: test_all_imports_py()')
   all_modules_path = []
   for root, dirnames, filenames in os_walk(ROOT_PACKAGE_PATH):
      all_modules_path.extend(glob(root + '/*.py'))
   for py_module_file_path in all_modules_path:
      module_filename = path_basename(py_module_file_path)
      module_filename_no_ext = path_splitext(module_filename)[0]

      py_loader = SourceFileLoader(module_filename_no_ext, py_module_file_path)
      py_loader.load_module(module_filename_no_ext)
예제 #13
0
파일: setup.py 프로젝트: jmartign/clufter
 def _pkg_prepare_build(self):
     for pkg_name, filedefs in (self.package_data or {}).iteritems():
         dst_top = self.distribution.package_dir.get('', '')
         dst_pkg = path_join(
             dst_top,
             self.distribution.package_dir.get(pkg_name, pkg_name))
         if DEBUG: print(DBGPFX + "\tbuild dst_pkg %s" % dst_pkg)
         for filedef in filedefs:
             self._pkg_prepare_file(
                 self.pkg_params[filedef['src']],
                 path_join(dst_pkg, self.pkg_params[filedef['dst']]),
                 filedef.get('substitute', False))
             self.distribution.package_data[pkg_name].append(
                 self.pkg_params[filedef['dst']])
     for filedef in (self.data_files + self.buildonly_files):
         src = self.pkg_params[filedef['src']]
         src_basename = path_basename(src)
         dst_basename = path_basename(self.pkg_params[filedef['dst']])
         substitute = filedef.get('substitute', False)
         if all(c not in src_basename for c in '?*'):
             if src_basename != dst_basename or substitute:
                 self._pkg_prepare_file(
                     self.pkg_params[filedef['src']],
                     path_join(
                         path_dirname(self.pkg_params[filedef['src']]),
                         dst_basename), substitute)
             # eliminate sources from which we prepared files so they
             # will not end up at build dir and, in turn, installed;
             # consider only one-level of package at maximum
             hd, tl = (lambda s, r='': (s, r))(*src.split(sep, 1))
             if not tl:
                 hd, tl = '', filedef['src']
             try:
                 self.distribution.package_data.get(hd, []).remove(tl)
             except ValueError:
                 pass
         else:
             assert not substitute
예제 #14
0
def test_all_imports_py():
    """ Tests: test_all_imports_py: for syntax correctness and internal imports
   """
    print('::: TEST: test_all_imports_py()')
    all_modules_path = []
    for root, dirnames, filenames in os_walk(ROOT_PACKAGE_PATH):
        all_modules_path.extend(glob(root + '/*.py'))
    for py_module_file_path in all_modules_path:
        module_filename = path_basename(py_module_file_path)
        module_filename_no_ext = path_splitext(module_filename)[0]

        py_loader = SourceFileLoader(module_filename_no_ext,
                                     py_module_file_path)
        py_loader.load_module(module_filename_no_ext)
예제 #15
0
def build_cython_extension(py_or_pyx_file_path, cython_force_rebuild=True):
   """ Build a cython extension from a `.py` or `.pyx` file

   - build will be done in a sub-folder named `_pyxbld` in the py_or_pyx_file_path

   :param py_or_pyx_file_path: (str) path to a `.py` or `.pyx` file
   :param cython_force_rebuild: (bool) If True the cython extension is rebuild even if it was already build
   :return: (tuple) cython_extension_module_path, cython_module_c_file_path, cython_build_dir_path
   """
   module_dir = path_dirname(py_or_pyx_file_path)
   module__cython_name = path_splitext(path_basename(py_or_pyx_file_path))[0]
   cython_module_c_file_path = path_join(module_dir, module__cython_name + '.c')
   cython_build_dir_path = path_join(module_dir, '_pyxbld')

   args = ['--quiet', 'build_ext', '--build-lib', module_dir]
   if cython_force_rebuild:
      args.append('--force')
   dist = Distribution({'script_name': None, 'script_args': args})
   dist.ext_modules = [Extension(name=module__cython_name, sources=[py_or_pyx_file_path])]
   dist.cmdclass = {'build_ext': cython_build_ext}
   build = dist.get_command_obj('build')
   build.build_base = cython_build_dir_path

   try:
      dist.parse_command_line()
   except DistutilsArgError as err:
      raise Err('utils.build_cython_extension', [
         'py_or_pyx_file_path: <{}>'.format(py_or_pyx_file_path),
         '  DistutilsArgError: <{}>'.format(err)
      ])

   try:
      obj_build_ext = dist.get_command_obj('build_ext')
      dist.run_commands()
      cython_extension_module_path = obj_build_ext.get_outputs()[0]
      if path_dirname(py_or_pyx_file_path) != module_dir:
         raise Err('utils.build_cython_extension', [
            'py_or_pyx_file_path: <{}>'.format(py_or_pyx_file_path),
            '  <module_dir> differs from final <cython_module_dir>',
            '   module_dir: <{}>'.format(module_dir),
            '   cython_module_dir: <{}>'.format(path_dirname(py_or_pyx_file_path))
         ])
   except Exception as err:
      raise Err('utils.build_cython_extension', [
         'py_or_pyx_file_path: <{}>'.format(py_or_pyx_file_path),
         '  Exception: <{}>'.format(err)
      ])

   return cython_extension_module_path, cython_module_c_file_path, cython_build_dir_path
예제 #16
0
def _env_msbuild_paths():
    values = []

    msbuild = os_environ.get('MSBUILD_PATH', None)
    xbuild = os_environ.get('XBUILD_PATH', None)

    if msbuild:
        if path_basename(msbuild).lower() == 'dotnet':
            values += finder_util.parse_dotnetcli_msbuild_ver_output(
                msbuild, ARCH32)
        else:
            values += finder_util.parse_msbuild_ver_output(msbuild, ARCH32)

    if xbuild:
        values += finder_util.parse_xbuild_ver_output(xbuild, ARCH32)

    return values
예제 #17
0
    def getSkinData(skinList, skinDir, currentSkinValue):
        SkinFinder.skinList = skinList[:]  # we don't want the passed list to be modified, let's use a copy instead
        SkinFinder.skinDir = skinDir

        if currentSkinValue == "":
            firstRun = True
        else:
            firstRun = False

        # build a list of the filenames from our (default) skin list
        skinListFiles = [x[0] for x in SkinFinder.skinList]

        # try to find additional skins and add them to our list
        path = resolveFilename(SCOPE_CURRENT_PLUGIN,
                               ''.join([skinDir, "*.xml"]))
        for fileName in iglob(path):
            if not fileName in skinListFiles:
                baseName = path_basename(fileName)
                SkinFinder.skinList.append((fileName, baseName))
                if not firstRun:
                    skinListFiles.append(fileName)

        if not firstRun:
            # try to find the config value in our list of files
            if currentSkinValue in skinListFiles:
                skinIndex = skinListFiles.index(currentSkinValue)
            else:
                # fall back to the default skin
                print(
                    '[SkinFinder] unable to find skin file %s, tryig to load a default skin'
                    % currentSkinValue)
                skinIndex = SkinFinder.getDefaultSkinEntry()
        else:
            # get the index of the detected skin in our list of default skins
            skinIndex = SkinFinder.getDefaultSkinEntry()

        if skinIndex is not None:
            skinFile = SkinFinder.skinList[skinIndex][0]
            print('[SkinFinder] found skin file', skinFile)
            return skinFile, SkinFinder.skinList
        else:
            print('[SkinFinder] unable to find any skin!')
            return None
예제 #18
0
def add_overlay(plt,
                overlay,
                overlay_scale=None,
                overlay_offset=0.,
                overlay_style='o',
                overlay_label=None):
    """Overlay data points from file over existing plot

    Args:
        plt (matplotlib.pyplot): Pyplot object with target figure/axes active
        overlay (str): Path to overlay data file
        overlay_scale (float): y-axis scale factor for overlay data. If None,
            scale to match maximum and print this value.
        overlay_offset (float): x-xaxis offset for overlay data
        overlay_style (str): Matplotlib short code for marker/line style
        overlay_label (str): Legend label for data overlay (default: filename)

    """

    if galore.formats.is_csv(overlay):
        xy_data = galore.formats.read_csv(overlay)
    else:
        xy_data = galore.formats.read_txt(overlay)

    ax = plt.gca()
    if overlay_scale is None:
        ymax = np.max(xy_data[:, 1])
        lines = ax.lines
        ymax_plot = max(max(line.get_xydata()[:, 1]) for line in lines)

        overlay_scale = ymax_plot / ymax
        logging.info("Scaling overlay intensity by {0}".format(overlay_scale))

    if overlay_label is None:
        overlay_label = path_basename(overlay)

    plt.plot(xy_data[:, 0] + overlay_offset,
             xy_data[:, 1] * overlay_scale,
             overlay_style,
             label=overlay_label)

    return plt
예제 #19
0
def test_all_py_to_cython_compiled():
   """ Tests: test_all_py_to_cython_compiled: for syntax correctness and internal imports: all .py files compiled with
   cython: except '__init__'
   """
   print('::: TEST: test_all_py_to_cython_compiled()')
   remove_files = []
   remove_dirs = []

   all_modules_path = []
   for root, dirnames, filenames in walk(ROOT_PACKAGE_PATH):
      all_modules_path.extend(glob(root + '/*.py'))
   for py_module_file_path in all_modules_path:
      module_filename = path_basename(py_module_file_path)
      module_filename_no_ext = path_splitext(module_filename)[0]
      if '__init__' in module_filename:
         continue

      cython_extension_module_path, cython_module_c_file_path, cython_build_dir_path = build_cython_extension(
         py_module_file_path,
         cython_force_rebuild=True
      )

      # noinspection PyUnusedLocal
      so_loader = ExtensionFileLoader(module_filename_no_ext, cython_extension_module_path)

      # sometimes (if a extension module is build previously) the loading does not work with 'nose tests'
      # so_loader.load_module(module_filename_no_ext)

      # add for cleanup : inclusive the .so extension file
      remove_files.extend([cython_module_c_file_path, cython_extension_module_path])
      remove_dirs.append(cython_build_dir_path)

   # Cleanup
   try:
      for file_ in remove_files:
         if path_exists(file_):
            os_remove(file_)
      for dir_ in remove_dirs:
         if path_exists(dir_):
            rmtree(dir_)
   except Exception as err:
      raise Exception('test_all_py_to_cython_compiled', 'Could not cython_clean_up: Exception: <{}>'.format(err))
예제 #20
0
	def getSkinData(skinList, skinDir, currentSkinValue):
		SkinFinder.skinList = skinList[:] # we don't want the passed list to be modified, let's use a copy instead
		SkinFinder.skinDir = skinDir
		
		if currentSkinValue == "":
			firstRun = True
		else:
			firstRun = False
			
		# build a list of the filenames from our (default) skin list
		skinListFiles = [x[0] for x in SkinFinder.skinList]
		
		# try to find additional skins and add them to our list
		path = resolveFilename(SCOPE_CURRENT_PLUGIN, ''.join([skinDir, "*.xml"]))
		for fileName in iglob(path):
			if not fileName in skinListFiles:
				baseName = path_basename(fileName)
				SkinFinder.skinList.append((fileName, baseName))
				if not firstRun:
					skinListFiles.append(fileName)
					
		if not firstRun:
			# try to find the config value in our list of files
			if currentSkinValue in skinListFiles:
				skinIndex = skinListFiles.index(currentSkinValue)
			else:
				# fall back to the default skin
				print '[SkinFinder] unable to find skin file %s, tryig to load a default skin' % currentSkinValue
				skinIndex = SkinFinder.getDefaultSkinEntry()
		else:
			# get the index of the detected skin in our list of default skins
			skinIndex = SkinFinder.getDefaultSkinEntry()
			
		if skinIndex is not None:
			skinFile = SkinFinder.skinList[skinIndex][0]
			print '[SkinFinder] found skin file', skinFile
			return skinFile, SkinFinder.skinList
		else:
			print '[SkinFinder] unable to find any skin!'
			return None
예제 #21
0
def reformat_prog():
    print('\t', '*' * 10, 'Reformat program', '*' * 10)
    master_filename = input(
        "Enter the filepath of master field list to use"
        " (TB import format) or press Enter to use default\n"
        "\t('master/TB_default_fields.csv'):\n>>> ")
    reorder_filename = input(
        "Enter the filepath of list to reorder "
        "according to the TB field format or press Enter to use default\n"
        "(whatever lone file is in the 'input_files' directory):\n>>> ")

    if reorder_filename == "":
        avail_input_files = listdir('./input_data/')
        if len(avail_input_files) == 1:
            reorder_filename = avail_input_files[0]
            [input_list_type, df_to_reorder
             ] = lr.list_read('./input_data/' + avail_input_files[0])
        else:
            raise InputDirectoryError(
                "There must be exactly one file in the 'input_data' dir.")
    elif path_exists(reorder_filename):
        [input_list_type, df_to_reorder] = lr.list_read(reorder_filename)
    else:
        raise InputDirectoryError("Invalid response. Start over.")

    if master_filename:
        df_out = fr.field_reorder(df_to_reorder, master_filename)
    else:
        df_out = fr.field_reorder(df_to_reorder)

    if "master" in path_basename(reorder_filename):
        desc = input_list_type + "_master_TB_format"
    else:
        desc = input_list_type + "_TB_format"
    new_file = lw.list_write(df_out, desc)

    print("Reformatted list (in TB format) %s written to output_data "
          "folder." % new_file)
예제 #22
0
파일: cozyfs.py 프로젝트: petergtz/cozy
    def __node_from_path_from_database(self, path):
        parent_node_id = '0'
        built_path = '/'
        for nodename in path.split('/')[1:-1]:
            built_path = path_join(built_path, nodename)
            if self.cached_nodes.has_key(built_path):
                parent_node_id = str(self.cached_nodes[built_path].node_id)
            else:
                parent_node_id = "select node_id from Nodes where " + \
                                 self.__backup_id_versions_where_statement('Nodes') + \
                                 " group by node_id having nodename='" + nodename + \
                                 "' and parent_node_id=(" + parent_node_id + ") order by version desc"


        query = "select node_id, inode_number from Nodes where " + \
                         self.__backup_id_versions_where_statement('Nodes') + \
                         " group by node_id having nodename='" + path_basename(path) + \
                         "' and parent_node_id=(" + parent_node_id + ") order by version desc"
        row = self.storage.db_execute(query).fetchone()
        if row is None:
            return None
        else:
            return Node(path, row['inode_number'], row['node_id'], parent_node_id)
예제 #23
0
def parse_censuario(basepath):
    censuario = {}

    basename = path_basename(basepath).upper()
    print basename

    # check basename #modifico la condizione lunghezza ponendola anche a 10, nella forma: 'G087618390'
    assert len(basename) == 7 or len(basename) == 8 or len(basename) == 10 or len(basename) == 11 or len(basename) == 12 or len(basename) == 13 or len(basename) == 14, basename

    censuario['CODICE_COMUNE'] = basename[:4]
    if len(basename) == 8:
        censuario['SEZIONE'] = basename[4]
    censuario['IDENTIFICATIVO RICHIESTA'] = basename[-4:]

    censuario['FABBRICATI'] = {}
    censuario['FABBRICATI_TERRENI'] = {}


############################# INIZIO ANALISI FILE .FAB ###############################
    try:
        fab = open(basepath + '.FAB')
    except:
        fab = ()

    for i, raw_line in enumerate(fab):
        record = raw_line.strip()
        fields = record.split('|')
        global oggetto
        oggetto = {}
        tipo_record = fields[5]
        assert tipo_record in ['1', '2', '3', '4', '5']
        ###Definisco "oggetto" a seconda del tipo_record per considerare i multi-record di certe tavole
        #oggetto['codice_ammvo'] = censuario['CODICE_COMUNE']
        #oggetto['sezione'] = fields[1]
        #oggetto['identificativo_immobile'], oggetto['tipo_immobile'], oggetto['progressivo'], oggetto['tipo_record'] = fields[2:6]
        #assert tipo_immobile == 'F'
        #assert oggetto['tipo_record'] in ['1', '2', '3', '4', '5']
        #if oggetto['tipo_record'] == '1':
        if tipo_record == '1':
            record_len = 40
            #CAMPI COMUNI per qualsiasi tipo_record:
            oggetto['codice_ammvo'] = censuario['CODICE_COMUNE']
            oggetto['sezione'] = fields[1]
            oggetto['identificativo_immobile'], oggetto['tipo_immobile'], oggetto['progressivo'], oggetto['tipo_record'] = fields[2:6]
            assert oggetto['tipo_immobile'] == 'F'
            assert oggetto['tipo_record'] == '1'
            #CAMPI PROPRI di ogni tipo_record:
            id_immobile = "%s_%s" % (oggetto['identificativo_immobile'], oggetto['tipo_record'])
            oggetto['zona'], oggetto['categoria'], oggetto['classe'] = fields[6:9]
            to_float('consistenza', fields[9])
            to_float('superficie', fields[10])
            to_float('rendita_lire', fields[11])
            to_float('rendita_euro', fields[12])
            # Dati relativi all'ubicazione dell'immobile nel fabbricato:
            oggetto['lotto'], oggetto['edificio'], oggetto['scala'], oggetto['interno1'], oggetto['interno2'], oggetto['piano1'], oggetto['piano2'], oggetto['piano3'], oggetto['piano4'] = fields[13:22]
            # Dati relativi all'atto che ha generato la situazione oggettiva dell'unita':
            to_date('data_di_efficacia00', fields[22])
            to_date('data_di_registrazione00', fields[23])
            oggetto['tipo_nota00'], oggetto['numero_nota00'], oggetto['progressivo_nota00'], oggetto['anno_nota00'] = fields[24:28]
            oggetto['numero_nota00'] = oggetto['numero_nota00'].lstrip('0')
            # Dati relativi all'atto che ha concluso la situazione oggettiva dell'unita':
            to_date('data_di_efficacia99', fields[28])
            to_date('data_di_registrazione99', fields[29])
            oggetto['tipo_nota99'], oggetto['numero_nota99'], oggetto['progressivo_nota99'], oggetto['anno_nota99'] = fields[30:34]
            oggetto['numero_nota99'] = oggetto['numero_nota99'].lstrip('0')
            oggetto['partita'], oggetto['annotazione'], oggetto['identificativo_mutazione_iniziale'], oggetto['identificativo_mutazione_finale'], oggetto['protocollo_notifica'] = fields[34:39]
            oggetto['annotazione'] = remove_accents(oggetto['annotazione'])
            to_date('data_notifica', fields[39])
            #Dati aggiunti dal 2007:
            if len(fields) > 40:
                # Dati relativi all'atto che ha generato la situazione oggettiva dell'unita':
                oggetto['codice_causale_atto_generante'], oggetto['descrizione_atto_generante'] = fields[40:42]
                oggetto['descrizione_atto_generante'] = remove_accents(oggetto['descrizione_atto_generante']).replace(u'\xb0', u'.') #soluzione poco elegante per risolvere il problema del simbolo \xb0 cioe' il "grado" -- 14-01-2022
                # Dati relativi all'atto che ha concluso la situazione oggettiva dell'unita':
                oggetto['codice_causale_atto_conclusivo'], oggetto['descrizione_atto_conclusivo'] = fields[42:44]
                oggetto['flag_classamento'] = fields[44]
                try:
                    esiste_questo_campo = fields[45]
                except:
                    record_len = 45
                else:
                    record_len = 46
                    if fields[45] is None or fields[45]=='':
                        print "***** WARNING: Trovato un campo in piu non mappato, comunque in questo caso NON e' VALORIZZATO quindi il warning e' trascurabile *****"
                    else:
                        print "***** WARNING: Trovato un campo in piu VALORIZZATO ma NON MAPPATO! Verificare non sia importante *****"
            #fields = fields[6:] #togli questo controllo sulla lunghezza del record
            censuario['FABBRICATI'][id_immobile] = {}
            censuario['FABBRICATI'][id_immobile].update(oggetto)
        #elif oggetto['tipo_record'] == '2':
        elif tipo_record == '2':
            record_len = 6
            #CAMPI COMUNI per qualsiasi tipo_record:
            oggetto['codice_ammvo'] = censuario['CODICE_COMUNE']
            oggetto['sezione'] = fields[1]
            oggetto['identificativo_immobile'], oggetto['tipo_immobile'], oggetto['progressivo'], oggetto['tipo_record'] = fields[2:6]
            assert oggetto['tipo_record'] == '2'
            assert oggetto['tipo_immobile'] == 'F'
            #CAMPI PROPRI di ogni tipo_record - cerco di recuperare i multi-record:
            fields = fields[6:]
            n_record = len(fields) / 6 #numero di multi-record, essendo 6 i campi per ogni record
            for ii in range(int(n_record)):
                #print "n_record: ", n_record, "; valore di ii: ", ii
                mm = ii*6
                id_immobile = "%s_%s_%s" % (oggetto['identificativo_immobile'], oggetto['tipo_record'], ii)
                oggetto['sezione_urbana'], oggetto['foglio'], oggetto['numero'], oggetto['denominatore'], oggetto['subalterno'], oggetto['edificialita'] = fields[mm:(mm+6)]
                oggetto['foglio'] = oggetto['foglio'].lstrip('0')
                oggetto['numero'] = oggetto['numero'].lstrip('0')
                oggetto['subalterno'] = oggetto['subalterno'].lstrip('0')
                oggetto['id_particella'] = censuario['CODICE_COMUNE'] + '_' + oggetto['foglio'] + '_' + oggetto['numero']
                censuario['FABBRICATI'][id_immobile] = {}
                censuario['FABBRICATI'][id_immobile].update(oggetto)
            fields = fields[:6]

            #Stringhe originali del codice che ancora non elimino perche' non so bene a cosa servono:
            terreno_id = (oggetto['sezione_urbana'], oggetto['foglio'], oggetto['numero'])
            if terreno_id not in censuario['FABBRICATI_TERRENI']:
                censuario['FABBRICATI_TERRENI'][terreno_id] = []
            terreno = censuario['FABBRICATI_TERRENI'][terreno_id]
            terreno.append(oggetto['identificativo_immobile'])
            #print terreno
        #elif oggetto['tipo_record'] == '3':
        elif tipo_record == '3':
            record_len = 6
            #CAMPI COMUNI per qualsiasi tipo_record:
            oggetto['codice_ammvo'] = censuario['CODICE_COMUNE']
            oggetto['sezione'] = fields[1]
            oggetto['identificativo_immobile'], oggetto['tipo_immobile'], oggetto['progressivo'], oggetto['tipo_record'] = fields[2:6]
            assert oggetto['tipo_record'] == '3'
            assert oggetto['tipo_immobile'] == 'F'
            #CAMPI PROPRI di ogni tipo_record - cerco di recuperare i multi-record:
            fields = fields[6:]
            #print fields
            n_record = len(fields) / 6 #numero di multi-record, essendo 6 i campi per ogni record
            #print n_record
            for ii in range(int(n_record)):
                mm = ii*6
                #print 'mm %s' % mm
                id_immobile = "%s_%s_%s" % (oggetto['identificativo_immobile'], oggetto['tipo_record'], ii)
                oggetto['toponimo'], oggetto['indirizzo'], oggetto['civico1'], oggetto['civico2'], oggetto['civico3'], oggetto['codice_strada'] = fields[mm:(mm+6)]
                oggetto['civico1'] = oggetto['civico1'].lstrip('0')
                oggetto['civico2'] = oggetto['civico2'].lstrip('0')
                oggetto['civico3'] = oggetto['civico3'].lstrip('0')
                censuario['FABBRICATI'][id_immobile] = {}
                censuario['FABBRICATI'][id_immobile].update(oggetto)
            fields = fields[:6]
        elif tipo_record == '4':
            record_len = 6
            #CAMPI COMUNI per qualsiasi tipo_record:
            oggetto['codice_ammvo'] = censuario['CODICE_COMUNE']
            oggetto['sezione'] = fields[1]
            oggetto['identificativo_immobile'], oggetto['tipo_immobile'], oggetto['progressivo'], oggetto['tipo_record'] = fields[2:6]
            assert oggetto['tipo_record'] == '4'
            assert oggetto['tipo_immobile'] == 'F'
            #CAMPI PROPRI di ogni tipo_record - cerco di recuperare i multi-record:
            fields = fields[6:]
            n_record = len(fields) / 5 #numero di multi-record, essendo 5 i campi per ogni record
            for ii in range(int(n_record)):
                #print "n_record: ", n_record, "; valore di ii: ", ii
                mm = ii*5
                id_immobile = "%s_%s_%s" % (oggetto['identificativo_immobile'], oggetto['tipo_record'], ii)
                oggetto['sezione_urbana'], oggetto['foglio'], oggetto['numero'], oggetto['denominatore'], oggetto['subalterno'] = fields[mm:(mm+5)]
                oggetto['foglio'] = oggetto['foglio'].lstrip('0')
                oggetto['numero'] = oggetto['numero'].lstrip('0')
                oggetto['subalterno'] = oggetto['subalterno'].lstrip('0')
                censuario['FABBRICATI'][id_immobile] = {}
                censuario['FABBRICATI'][id_immobile].update(oggetto)
            fields = fields[:6]
        elif tipo_record == '5':
            record_len = 2
            #CAMPI COMUNI per qualsiasi tipo_record:
            oggetto['codice_ammvo'] = censuario['CODICE_COMUNE']
            oggetto['sezione'] = fields[1]
            oggetto['identificativo_immobile'], oggetto['tipo_immobile'], oggetto['progressivo'], oggetto['tipo_record'] = fields[2:6]
            assert oggetto['tipo_record'] == '5'
            assert oggetto['tipo_immobile'] == 'F'
            #CAMPI PROPRI di ogni tipo_record - cerco di recuperare i multi-record:
            fields = fields[6:]
            n_record = len(fields) / 2 #numero di multi-record, essendo 5 i campi per ogni record
            for ii in range(int(n_record)):
                #print "n_record: ", n_record, "; valore di ii: ", ii
                mm = ii*2
                id_immobile = "%s_%s_%s" % (oggetto['identificativo_immobile'], oggetto['tipo_record'], ii)
                oggetto['codice_riserva'], oggetto['partita_iscrizione_riserva'] = fields[mm:(mm+2)]
                censuario['FABBRICATI'][id_immobile] = {}
                censuario['FABBRICATI'][id_immobile].update(oggetto)
            fields = fields[:2]
        else:
            continue

        assert len(fields) == record_len, 'Anomalous record schema line: %d, type: %r, len: %d, record: %r' % (i + 1, oggetto['tipo_record'], len(fields),  record)

        #Con queste stringhe pero' tiene solo UN immobile poiche' fa un update! Io voglio TUTTI gli immobili per ogni TIPO_RECORD - modifico e sposto questa parte negli IF sul tipo_record per considerare le tavole multi-record:
        #if oggetto['identificativo_immobile'] not in censuario['FABBRICATI']:
        #    censuario['FABBRICATI'][oggetto['identificativo_immobile']] = {}
        #censuario['FABBRICATI'][oggetto['identificativo_immobile']].update(oggetto)

############################# FINE ANALISI FILE .FAB ###############################


############################# INIZIO ANALISI FILE .TER ###############################
    censuario['TERRENI'] = {}
    try:
        ter = open(basepath + '.TER')
    except:
        ter = ()

    for i, raw_line in enumerate(ter):
        record = raw_line.strip()
        fields = record.split('|')
        oggetto = {}
        tipo_record = fields[5]
        assert tipo_record in ['1', '2', '3', '4']
        ###Definisco "oggetto" a seconda del tipo_record per considerare i multi-record di certe tavole
        if tipo_record == '1':
            record_len = 40
            #CAMPI COMUNI per qualsiasi tipo_record:
            oggetto['codice_ammvo'] = censuario['CODICE_COMUNE']
            oggetto['sezione'] = fields[1]
            oggetto['identificativo_immobile'], oggetto['tipo_immobile'], oggetto['progressivo'], oggetto['tipo_record'] = fields[2:6]
            assert oggetto['tipo_record'] == '1'
            assert oggetto['tipo_immobile'] == 'T'
            #CAMPI PROPRI di ogni tipo_record:
            id_immobile = "%s_%s" % (oggetto['identificativo_immobile'], oggetto['tipo_record'])
            oggetto['foglio'], oggetto['numero'], oggetto['denominatore'], oggetto['subalterno'], oggetto['edificialita'] = fields[6:11]
            oggetto['foglio'] = oggetto['foglio'].lstrip('0')
            oggetto['numero'] = oggetto['numero'].lstrip('0')
            oggetto['subalterno'] = oggetto['subalterno'].lstrip('0')
            oggetto['id_particella'] = censuario['CODICE_COMUNE'] + '_' + oggetto['foglio'] + '_' + oggetto['numero']
            # Dati caratteristici della particella (terreno):
            oggetto['qualita'], oggetto['classe'] = fields[11:13]
            to_float('ettari', fields[13])
            to_float('are', fields[14])
            to_float('centiare', fields[15])
            # Dati relativi al reddito:
            oggetto['flag_reddito'], oggetto['flag_porzione'], oggetto['flag_deduzioni'] = fields[16:19]
            to_float('reddito_dominicale_lire', fields[19])
            to_float('reddito_agrario_lire', fields[20])
            to_float('reddito_dominicale_euro', fields[21])
            to_float('reddito_agrario_euro', fields[22])
            # Dati relativi all'atto che ha generato la situazione oggettiva della particella:
            to_date('data_di_efficacia00', fields[23])
            to_date('data_di_registrazione00', fields[24])
            oggetto['tipo_nota00'], oggetto['numero_nota00'], oggetto['progressivo_nota00'], oggetto['anno_nota00'] = fields[25:29]
            oggetto['numero_nota00'] = oggetto['numero_nota00'].lstrip('0')
            # Dati relativi all'atto che ha concluso la situazione oggettiva della particella:
            to_date('data_di_efficacia99', fields[29])
            to_date('data_di_registrazione99', fields[30])
            oggetto['tipo_nota99'], oggetto['numero_nota99'], oggetto['progressivo_nota99'], oggetto['anno_nota99'] = fields[31:35]
            oggetto['numero_nota99'] = oggetto['numero_nota99'].lstrip('0')
            oggetto['partita'] = fields[35]
            oggetto['annotazione'] = remove_accents(fields[36])
            oggetto['identificativo_mutazione_iniziale'], oggetto['identificativo_mutazione_finale'] = fields[37:39]
            #Dati aggiunti dal 2007:
            if len(fields) > 40:
                record_len = 44
                # Dati relativi all'atto che ha generato la situazione oggettiva dell'unita':
                oggetto['codice_causale_atto_generante'], oggetto['descrizione_atto_generante'] = fields[39:41]
                oggetto['descrizione_atto_generante'] = remove_accents(oggetto['descrizione_atto_generante']).replace(u'\xb0', u'.') #soluzione poco elegante per risolvere il problema del simbolo \xb0 cioe' il "grado" -- 14-01-2022
                # Dati relativi all'atto che ha concluso la situazione oggettiva dell'unita':
                oggetto['codice_causale_atto_conclusivo'], oggetto['descrizione_atto_conclusivo'] = fields[41:43]
            #fields = fields[6:] #togli questo controllo sulla lunghezza del record
            censuario['TERRENI'][id_immobile] = {}
            censuario['TERRENI'][id_immobile].update(oggetto)
        elif tipo_record == '2': #DEDUZIONI
            record_len = 1
            #CAMPI COMUNI per qualsiasi tipo_record:
            oggetto['codice_ammvo'] = censuario['CODICE_COMUNE']
            oggetto['sezione'] = fields[1]
            oggetto['identificativo_immobile'], oggetto['tipo_immobile'], oggetto['progressivo'], oggetto['tipo_record'] = fields[2:6]
            assert oggetto['tipo_record'] == '2'
            assert oggetto['tipo_immobile'] == 'T'
            #CAMPI PROPRI di ogni tipo_record - cerco di recuperare i multi-record:
            fields = fields[6:]
            n_record = len(fields) / 1 #numero di multi-record, essendo 1 i campi per ogni record
            for ii in range(int(n_record)):
                #print "n_record: ", n_record, "; valore di ii: ", ii
                mm = ii*1
                id_immobile = "%s_%s_%s" % (oggetto['identificativo_immobile'], oggetto['tipo_record'], ii)
                oggetto['simbolo_deduzione'] = fields[mm]
                censuario['TERRENI'][id_immobile] = {}
                censuario['TERRENI'][id_immobile].update(oggetto)
            fields = fields[:1]
        elif tipo_record == '3': #RISERVE
            record_len = 2
            #CAMPI COMUNI per qualsiasi tipo_record:
            oggetto['codice_ammvo'] = censuario['CODICE_COMUNE']
            oggetto['sezione'] = fields[1]
            oggetto['identificativo_immobile'], oggetto['tipo_immobile'], oggetto['progressivo'], oggetto['tipo_record'] = fields[2:6]
            assert oggetto['tipo_record'] == '3'
            assert oggetto['tipo_immobile'] == 'T'
            #CAMPI PROPRI di ogni tipo_record - cerco di recuperare i multi-record:
            fields = fields[6:]
            n_record = len(fields) / 2 #numero di multi-record, essendo 2 i campi per ogni record
            for ii in range(int(n_record)):
                mm = ii*2
                id_immobile = "%s_%s_%s" % (oggetto['identificativo_immobile'], oggetto['tipo_record'], ii)
                oggetto['codice_riserva'], oggetto['partita_iscrizione_riserva'] = fields[mm:(mm+2)]
                censuario['TERRENI'][id_immobile] = {}
                censuario['TERRENI'][id_immobile].update(oggetto)
            fields = fields[:2]
        elif tipo_record == '4': #PORZIONI
            record_len = 6
            #CAMPI COMUNI per qualsiasi tipo_record:
            oggetto['codice_ammvo'] = censuario['CODICE_COMUNE']
            oggetto['sezione'] = fields[1]
            oggetto['identificativo_immobile'], oggetto['tipo_immobile'], oggetto['progressivo'], oggetto['tipo_record'] = fields[2:6]
            assert oggetto['tipo_record'] == '4'
            assert oggetto['tipo_immobile'] == 'T'
            #CAMPI PROPRI di ogni tipo_record - cerco di recuperare i multi-record:
            fields = fields[6:]
            n_record = len(fields) / 6 #numero di multi-record, essendo 6 i campi per ogni record
            for ii in range(int(n_record)):
                mm = ii*6
                id_immobile = "%s_%s_%s" % (oggetto['identificativo_immobile'], oggetto['tipo_record'], ii)
                oggetto['identificativo_porzione'], oggetto['qualita'], oggetto['classe'], oggetto['ettari'], oggetto['are'], oggetto['centiare'] = fields[mm:(mm+6)]
                censuario['TERRENI'][id_immobile] = {}
                censuario['TERRENI'][id_immobile].update(oggetto)
            fields = fields[:6]
        else:
            continue
    
        assert len(fields) == record_len, 'Anomalous record schema line: %d, type: %r, len: %d, record: %r' % (i + 1, oggetto['tipo_record'], len(fields),  record)
            

############################# FINE ANALISI FILE .TER ###############################


############################# INIZIO ANALISI FILE .SOG ###############################
    censuario['SOGGETTI'] = {}    
    try:
        sog = open(basepath + '.SOG')
    except:
        sog = ()
    #sog = open(basepath + '.SOG')

    for i, raw_line in enumerate(sog):
        record = raw_line.strip()
        fields = record.split('|')
        oggetto = {}
        oggetto['codice_ammvo'] = fields[0]
        oggetto['sezione'] = fields[1]
        oggetto['identificativo_soggetto'], oggetto['tipo_soggetto'] = fields[2:4]
        assert oggetto['tipo_soggetto'] in ['P', 'G'], oggetto['tipo_soggetto']
        if oggetto['tipo_soggetto'] == 'P':
            record_len = 12
            oggetto['cognome'], oggetto['nome'], oggetto['sesso'] = fields[4:7]
            to_date('data_di_nascita', fields[7])
            oggetto['luogo_di_nascita'], oggetto['codice_fiscale'], oggetto['info_suppl'] = fields[8:11]
        elif oggetto['tipo_soggetto'] == 'G':
            record_len = 8
            oggetto['denominazione'], oggetto['sede'], oggetto['codice_fiscale'] = fields[4:7]

        assert len(fields) == record_len, 'Anomalous record schema line: %d, type: %r, len: %d, record: %r' % (i + 1, oggetto['TIPO SOGGETTO'], len(fields),  record)

        identificativo_soggetto = oggetto['identificativo_soggetto'], oggetto['tipo_soggetto']
        censuario['SOGGETTI'][identificativo_soggetto] = oggetto
    #print "id_soggetto: ", identificativo_soggetto, "; DATI: ", censuario['SOGGETTI'][identificativo_soggetto]

############################# FINE ANALISI FILE .SOG ###############################


############################# INIZIO ANALISI FILE .TIT ###############################
    censuario['TITOLARITA'] = {}    
    try:
        tit = open(basepath + '.TIT')
    except:
        tit = ()
    #tit = open(basepath + '.TIT')

    for i, raw_line in enumerate(tit):
        record = raw_line.strip()
        fields = record.split('|')
        oggetto = {}
        oggetto['codice_ammvo'] = fields[0]
        oggetto['sezione'] = fields[1]
        oggetto['identificativo_soggetto'], oggetto['tipo_soggetto'] = fields[2:4]
        oggetto['identificativo_immobile'], oggetto['tipo_immobile'] = fields[4:6]
        # Dati della titolarita':
        oggetto['codice_diritto'] = fields[6]
        oggetto['titolo_non_codificato'] = remove_accents(fields[7])
        oggetto['quota_numeratore'], oggetto['quota_denominatore'], oggetto['regime'], oggetto['soggetto_di_riferimento'] = fields[8:12]
        oggetto['regime'] = remove_accents(oggetto['regime'])
        oggetto['soggetto_di_riferimento'] = remove_accents(oggetto['soggetto_di_riferimento'])
        # Dati relativi all'atto che ha generato la titolarita':
        to_date('data_di_validita00', fields[12])
        oggetto['tipo_nota00'], oggetto['numero_nota00'], oggetto['progressivo_nota00'], oggetto['anno_nota00'] = fields[13:17]
        oggetto['numero_nota00'] = oggetto['numero_nota00'].lstrip('0')
        to_date('data_registrazione_atti00', fields[17])
        oggetto['partita'] = fields[18]
        # Dati relativi all'atto che ha concluso la situazione oggettiva dell'unita':
        to_date('data_di_validita99', fields[19])
        oggetto['tipo_nota99'], oggetto['numero_nota99'], oggetto['progressivo_nota99'], oggetto['anno_nota99'] = fields[20:24]
        oggetto['numero_nota99'] = oggetto['numero_nota99'].lstrip('0')
        to_date('data_registrazione_atti99', fields[24])
        oggetto['identificativo_mutazione_iniziale'], oggetto['identificativo_mutazione_finale'] = fields[25:27]
        if len(fields) > 28:
            # Campo introdotto solo dal 2001:
            oggetto['identificativo_titolarita'] = fields[27]
        if len(fields) > 29:
            # Campi introdotti dal 2007:
            oggetto['codice_causale_atto_generante'], oggetto['descrizione_atto_generante'], oggetto['codice_causale_atto_conclusivo'], oggetto['descrizione_atto_conclusivo'] = fields[28:32]
            oggetto['descrizione_atto_generante'] = remove_accents(oggetto['descrizione_atto_generante']).replace(u'\xb0', u'.') #soluzione poco elegante per risolvere il problema del simbolo \xb0 cioe' il "grado" -- 14-01-2022
        #id_tit = tuple(fields[2:6])
        #censuario['TITOLARITA'].append(id_tit)
        id_titolarita = "%s_%s" % (fields[2], i)
        censuario['TITOLARITA'][id_titolarita] = {}
        censuario['TITOLARITA'][id_titolarita].update(oggetto)

############################# FINE ANALISI FILE .TIT ###############################


############################# INIZIO ANALISI FILE SC.DAT PLANIMETRIE ###############################
    censuario['PLANIMETRIE_SC'] = {}
    try:
        sc_dat = open(basepath + 'SC.DAT')
    except:
        sc_dat = ()
    #sc_dat = open(basepath + 'SC.DAT')

    for i, raw_line in enumerate(sc_dat):
        record = raw_line.strip()
        fields = record.split('|')
        oggetto = {}
        oggetto['codice_ammvo'] = fields[0]
        oggetto['sezione'] = fields[1]
        oggetto['identificativo_immobile'], oggetto['protocollo_registrazione'] = fields[2:4]
        to_date('data_registrazione', fields[4])
        oggetto['nome_file'] = fields[5]
        to_int('n_scheda', fields[6])
        to_int('formato_scheda', fields[7])
        to_int('scala_scheda', fields[8])

        #id_tit = tuple(fields[2:6])
        #censuario['PLANIMETRIE_SC'].append(id_tit)
        id_sc = "%s_%s" % (fields[2], i)
        censuario['PLANIMETRIE_SC'][id_sc] = {}
        censuario['PLANIMETRIE_SC'][id_sc].update(oggetto)

############################# FINE ANALISI FILE SC.DAT ###############################



############################# INIZIO ANALISI FILE DM.DAT PLANIMETRIE ###############################
    censuario['PLANIMETRIE_DM'] = {}    
    try:
        dm_dat = open(basepath + 'DM.DAT')
    except:
        dm_dat = ()
    #dm_dat = open(basepath + 'DM.DAT')

    for i, raw_line in enumerate(dm_dat):
        record = raw_line.strip()
        fields = record.split('|')
        oggetto = {}
        oggetto['codice_ammvo'] = fields[0]
        oggetto['sezione'] = fields[1]
        oggetto['identificativo_immobile'], oggetto['protocollo_registrazione'] = fields[2:4]
        to_date('data_registrazione', fields[4])
        to_int('progressivo_poligono', fields[5])
        to_int('superficie_poligono', fields[6])
        oggetto['ambiente_poligono'] = fields[7]
        to_int('altezza', fields[8])
        to_int('altezza_max', fields[9])

        #id_tit = tuple(fields[2:6])
        #censuario['PLANIMETRIE_DM'].append(id_tit)
        id_dm = "%s_%s" % (fields[2], i)
        censuario['PLANIMETRIE_DM'][id_dm] = {}
        censuario['PLANIMETRIE_DM'][id_dm].update(oggetto)

############################# FINE ANALISI FILE DM.DAT ###############################



    #print "fabbricati: %s" % censuario['FABBRICATI']
    #print "soggetti: %s" % censuario['SOGGETTI']
    #print "titolarita: %s" % censuario['TITOLARITA']

    return censuario
예제 #24
0
    def import_action(self):
        self.dlg.txtFeedback.setText('Attendi.....')
        chkbox_value = self.dlg.chkActivate.isChecked(
        )  #button per aggiungere il layer su mappa
        chkbox_str = '%s' % (chkbox_value)

        #Verifico se ho scelto di salvare su DB o su SHP:
        shp_value = self.dlg.chkShp.isChecked()
        db_value = self.dlg.chkDB.isChecked()

        input_cxf = self.dlg.fileBrowse_txt.text()
        #input_cxf = r'C:\ARPA-2015\fatti_miei\AndreaMGOS-lavori\QGis_custom_plugins\dati_cxf-orbassano\G087_003800.CXF'
        basename = path_basename(input_cxf).upper()
        foglio = parse_foglio(input_cxf[:-4])

        #Al momento non riuscendo ad escluderli, do precedenza a shp:
        if (shp_value == True):
            output_dir = self.dlg.dirBrowse_txt.text()
            #output_dir = r'C:\Users\riccardo\Desktop'
            dest_dir = '%s\\%s' % (output_dir, basename[:-4])
            #foglio = parse_foglio(input_cxf[:-4])
            #Comando dinamico per scrivere su cartella scelta dall'utente:
            write_foglio(foglio,
                         dest_dir,
                         point_borders=False,
                         format_name='ESRI Shapefile')

        elif (db_value == True):
            #Prova per scrivere su DB PostgreSQL in percorso fisso - IN SVILUPPO:
            userDB = self.dlg.usrDB.text()
            pwdDB = self.dlg.pwdDB.text()
            hostDB = self.dlg.hostDB.text()
            portDB = self.dlg.portDB.text()
            nameDB = self.dlg.nameDB.text()
            schemaDB = self.dlg.schemaDB.text()
            dest_dir = "dbname=%s host=%s port=%s user=%s password=%s active_schema=%s" % (
                nameDB, hostDB, portDB, userDB, pwdDB, schemaDB)
            param_conn = "PG:%s" % (dest_dir)
            #param_conn = PG:"dbname=tucatuca host=localhost port=5432 user=postgres password=ARTURO active_schema=public"
            write_foglio(foglio,
                         param_conn,
                         point_borders=False,
                         format_name='PostgreSQL')
        '''
        Aggiungo il layer su mappa: funzione NON IMPLEMENTATA in quanto non so il NOME DEL LAYER GENERATO a prescindere!! Al momento carica gli SHP ma sicuramente penso sia impossibile riuscire a caricare i dati eventualmente caricati su DB.
        '''
        if (chkbox_value == True):
            pedice = "%s_%s_%s" % (foglio['CODICE COMUNE'],
                                   foglio['NUMERO FOGLIO'],
                                   foglio['CODICE SVILUPPO'])
            layer_path = '%s' % (
                dest_dir)  #'C:\\Users\\riccardo\\Desktop\\G087_003800'

            #metodo di caricamento con QgsVectorLayer
            layer_bordi_name = 'CATASTO_BORDI_%s' % (pedice)
            layer_testi_name = 'CATASTO_TESTI_%s' % (pedice)
            layer_fiduciali_name = 'CATASTO_FIDUCIALI_%s' % (pedice)
            layer_simboli_name = 'CATASTO_SIMBOLI_%s' % (pedice)
            #carico elementi in mappa:
            layer_bordi = QgsVectorLayer(
                layer_path + "\\" + layer_bordi_name + ".shp",
                layer_bordi_name, "ogr")
            layer_testi = QgsVectorLayer(
                layer_path + "\\" + layer_testi_name + ".shp",
                layer_testi_name, "ogr")
            layer_fiduciali = QgsVectorLayer(
                layer_path + "\\" + layer_fiduciali_name + ".shp",
                layer_fiduciali_name, "ogr")
            layer_simboli = QgsVectorLayer(
                layer_path + "\\" + layer_simboli_name + ".shp",
                layer_simboli_name, "ogr")
            lista_layer_to_load = [
                layer_simboli, layer_fiduciali, layer_testi, layer_bordi
            ]
            QgsMapLayerRegistry.instance().addMapLayers(lista_layer_to_load)
            #assegno uno stile - inmm se carico in LatLon, altrimenti lo stile piu' corretto e' senza pedice ed e' in unita mappa:
            layer_bordi.loadNamedStyle(
                os.getenv("HOME") +
                '/.qgis2/python/plugins/ImportCatastoCXF/qml_base/catasto_bordi_inmm.qml'
            )
            layer_testi.loadNamedStyle(
                os.getenv("HOME") +
                '/.qgis2/python/plugins/ImportCatastoCXF/qml_base/catasto_testi_inmm.qml'
            )
            layer_fiduciali.loadNamedStyle(
                os.getenv("HOME") +
                '/.qgis2/python/plugins/ImportCatastoCXF/qml_base/catasto_fiduciali_inmm.qml'
            )
            layer_simboli.loadNamedStyle(
                os.getenv("HOME") +
                '/.qgis2/python/plugins/ImportCatastoCXF/qml_base/catasto_simboli_inmm.qml'
            )
            '''
            #Questo metodo di caricamento in mappa e' piu' diretto ma non consente di applicare uno stile
            layer_name_arr = ['CATASTO_BORDI_', 'CATASTO_TESTI_', 'CATASTO_FIDUCIALI_', 'CATASTO_SIMBOLI_', 'CATASTO_LINEE_']
            for elemento in layer_name_arr:
                layername = '%s%s' % (elemento, pedice)
                layer_full_path = '%s\%s.shp' % (layer_path, layername)
                #carico elementi in mappa:
                layer = self.iface.addVectorLayer(layer_full_path, layername, "ogr")
                #assegno uno stile qml - NON FUNZIONA!
                if (elemento == 'CATASTO_BORDI_'):
                    layer.loadNamedStyle('qml_base/catasto_bordi.qml')
                elif (elemento == 'CATASTO_TESTI_'):
                    layer.loadNamedStyle('qml_base/catasto_testi.qml')
                elif (elemento == 'CATASTO_FIDUCIALI_'):
                    layer.loadNamedStyle('qml_base/catasto_fiduciali.qml')
                elif (elemento == 'CATASTO_SIMBOLI_'):
                    layer.loadNamedStyle('qml_base/catasto_simboli.qml')
                
                if not layer:
                    print "Layer failed to load!"
            '''

        self.dlg.txtFeedback.setText('Conversione avvenuta con successo!!')
예제 #25
0
def parse_censuario(basepath):
    censuario = {}

    basename = path_basename(basepath).upper()

    # check basename
    assert len(basename) == 7 or len(basename) == 8, basename

    censuario['CODICE_COMUNE'] = basename[:4]
    if len(basename) == 8:
        censuario['SEZIONE'] = basename[4]
    censuario['IDENTIFICATIVO RICHIESTA'] = basename[-4:]

    censuario['FABBRICATI'] = {}
    censuario['FABBRICATI_TERRENI'] = {}

    try:
        fab = open(basepath + '.FAB')
    except:
        fab = ()

    for i, raw_line in enumerate(fab):
        record = raw_line.strip()
        fields = record.split('|')
        oggetto = {}
        oggetto['IDENTIFICATIVO_IMMOBILE'], tipo_immobile, oggetto['PROGRESSIVO'], oggetto['TIPO_RECORD'] = fields[2:6]
        assert tipo_immobile == 'F'
        assert oggetto['TIPO_RECORD'] in ['1', '2', '3', '4', '5']
        if oggetto['TIPO_RECORD'] == '1':
            record_len = 40
            oggetto['ZONA'], oggetto['CATEGORIA'], oggetto['CLASSE'], oggetto['CONSISTENZA'], oggetto['SUPERFICIE'] = fields[6:11]
            oggetto['RENDITA'] = fields[12]
        elif oggetto['TIPO_RECORD'] == '2':
            record_len = 13
            oggetto['SEZIONE URBANA'], oggetto['FOGLIO'], oggetto['NUMERO'], oggetto['DENOMINATORE'], oggetto['SUBALTERNO'], oggetto['EDIFICIALITA'] = fields[6:12]
            fields = fields[:13]
            terreno_id = (oggetto['SEZIONE URBANA'], oggetto['FOGLIO'], oggetto['NUMERO'])
            if terreno_id not in censuario['FABBRICATI_TERRENI']:
            	censuario['FABBRICATI_TERRENI'][terreno_id] = []
            terreno = censuario['FABBRICATI_TERRENI'][terreno_id]
            terreno.append(oggetto['IDENTIFICATIVO_IMMOBILE'])
            print terreno
        else:
            continue

        assert len(fields) == record_len, 'Anomalous record schema line: %d, type: %r, len: %d, record: %r' % (i + 1, oggetto['TIPO_RECORD'], len(fields),  record)

        if oggetto['IDENTIFICATIVO_IMMOBILE'] not in censuario['FABBRICATI']:
        	censuario['FABBRICATI'][oggetto['IDENTIFICATIVO_IMMOBILE']] = {}
        censuario['FABBRICATI'][oggetto['IDENTIFICATIVO_IMMOBILE']].update(oggetto)

    censuario['TERRENI'] = {}

    try:
        ter = open(basepath + '.TER')
    except:
        ter = ()

    for i, raw_line in enumerate(ter):
        record = raw_line.strip()
        fields = record.split('|')
        oggetto = {}
        oggetto['IDENTIFICATIVO_IMMOBILE'], tipo_immobile, oggetto['PROGRESSIVO'], oggetto['TIPO_RECORD'] = fields[2:6]
        assert tipo_immobile == 'T'
        assert oggetto['TIPO_RECORD'] in ['1', '2', '3', '4']
        if oggetto['TIPO_RECORD'] == '1':
            record_len = 40
            oggetto['FOGLIO'], oggetto['NUMERO'], oggetto['DENOMINATORE'], oggetto['SUBALTERNO'], oggetto['EDIFICIALITA'] = fields[6:11]
            oggetto['QUALITA'], oggetto['CLASSE'], oggetto['ETTARI'], oggetto['ARE'], oggetto['CENTIARE'] = fields[11:16]
            oggetto['REDDITO_DOMINICALE'], oggetto['REDDITO_AGRICOLO'] = fields[21:23]
        else:
            continue

        assert len(fields) == record_len, 'Anomalous record schema line: %d, type: %r, len: %d, record: %r' % (i + 1, oggetto['TIPO RECORD'], len(fields),  record)

        censuario['TERRENI'][oggetto['IDENTIFICATIVO_IMMOBILE']] = oggetto

    censuario['SOGGETTI'] = {}

    sog = open(basepath + '.SOG')

    for i, raw_line in enumerate(sog):
        record = raw_line.strip()
        fields = record.split('|')
        oggetto = {}
        oggetto['IDENTIFICATIVO_SOGGETTO'], oggetto['TIPO_SOGGETTO'] = fields[2:4]
        assert oggetto['TIPO_SOGGETTO'] in ['P', 'G'], oggetto['TIPO_SOGGETTO']
        if oggetto['TIPO_SOGGETTO'] == 'P':
            record_len = 12
            oggetto['COGNOME'], oggetto['NOME'], oggetto['SESSO'], oggetto['DATA_DI_NASCITA'], oggetto['LUOGO_DI_NASCITA'], oggetto['CODICE FISCALE'] = fields[6:12]
        elif oggetto['TIPO_SOGGETTO'] == 'G':
            record_len = 8
            oggetto['DENOMINAZIONE'], oggetto['SEDE'], oggetto['CODICE_FISCALE'] = fields[4:7]

        assert len(fields) == record_len, 'Anomalous record schema line: %d, type: %r, len: %d, record: %r' % (i + 1, oggetto['TIPO SOGGETTO'], len(fields),  record)

        identificativo_soggetto = oggetto['IDENTIFICATIVO_SOGGETTO'], oggetto['TIPO_SOGGETTO']
        censuario['SOGGETTI'][identificativo_soggetto] = oggetto

    censuario['TITOLARITA'] = []

    tit = open(basepath + '.TIT')

    for i, raw_line in enumerate(tit):
        record = raw_line.strip()
        fields = record.split('|')
        oggetto = {}
        tit = tuple(fields[2:6])
        censuario['TITOLARITA'].append(tit)

    return censuario
예제 #26
0
        help='Destination directory to save the self-sufficient directory tree '
        'of executables and libraries at.',
        required=True)
    parser.add_argument(
        '--clean-dest',
        help='Remove the destination directory if it already exists. Only works '
        'with directories under /tmp/... for safety.',
        action='store_true')
    add_common_arguments(parser)

    args = parser.parse_args()
    log_level = logging.INFO
    if args.verbose:
        log_level = logging.DEBUG
    logging.basicConfig(level=log_level,
                        format="[" + path_basename(__file__) +
                        "] %(asctime)s %(levelname)s: %(message)s")

    release_manifest_path = os.path.join(YB_SRC_ROOT,
                                         'yb_release_manifest.json')
    release_manifest = json.load(open(release_manifest_path))
    if args.clean_dest and os.path.exists(args.dest_dir):
        if args.dest_dir.startswith('/tmp/'):
            logging.info(("--clean-dest specified and '{}' already exists, "
                          "deleting.").format(args.dest_dir))
            shutil.rmtree(args.dest_dir)
        else:
            raise RuntimeError(
                "For safety, --clean-dest only works with destination directories "
                "under /tmp/...")

DirList = []
FileList = []
for root, dirs, files in os_walk(TEMPLATE_PyPROJECT_DIR_PATH, topdown=False):
   for dir_ in dirs:
      DirList.append((root, dir_))
   for file_ in files:
      FileList.append((root, file_))


# FIRST: replace text in Files
for root, file_ in FileList:
   file_path = path_join(root, file_)
   # check SkipFileNames
   if path_basename(file_path) in SkipFileNames:
      continue
   with open(file_path, 'r') as file_p:
      file_content = file_p.read()

   if OrigTemplateName in file_content:
      file_content = file_content.replace(OrigTemplateName, NEW_PROJECT_NAME)

   if OrigTemplateName__lower_case in file_content:
      file_content = file_content.replace(OrigTemplateName__lower_case, NewProjectName__lower_case)

   if OrigTemplateOneLineDescription in file_content:
      file_content = file_content.replace(OrigTemplateOneLineDescription, NEW_PROJECT_ONE_LINE_DESCRIPTION)

   with open(file_path, 'w') as file_p:
      file_p.write(file_content)
예제 #28
0
    def package_binaries(self):
        """
        The main entry point to this class. Arranges binaries (executables and shared libraries),
        starting with the given set of "seed executables", in the destination directory so that
        the executables can find all of their dependencies.
        """

        # Breadth-first search queue.
        queue = deque()

        for seed_executable_glob in self.seed_executable_patterns:
            re_match = re.match(r'^build/latest/(.*)$', seed_executable_glob)
            if re_match:
                updated_glob = path_join(self.build_dir, re_match.group(1))
                logging.info(
                    "Automatically updating seed glob to be relative to build dir: {} -> {}".format(
                        seed_executable_glob, updated_glob))
                seed_executable_glob = updated_glob
            for seed_executable in glob.glob(seed_executable_glob):
                queue.append(self.get_node_by_path(seed_executable, is_executable=True))

        # Also package Linuxbrew's dynamic linker.
        ld_path = realpath(path_join(LINUXBREW_HOME, 'lib', 'ld.so'))
        queue.append(self.get_node_by_path(ld_path))
        queue.append(self.get_node_by_path(PATCHELF_PATH, is_executable=True))

        # This will be a set of GraphNode objects. GraphNode should have proper hashing and equality
        # overrides.
        visited = set()

        while len(queue) > 0:
            node = queue.popleft()
            if node not in visited:
                visited.add(node)
                for new_dep in node.deps:
                    target_node = self.get_node_by_path(new_dep.target)
                    new_dep.target_node = target_node
                    if target_node not in visited:
                        queue.append(target_node)
                        target_node.add_reverse_dependency(node)

        nodes = self.nodes_by_digest.values()

        unique_dir_names = set()
        need_short_digest = False
        for node in nodes:
            name = node.lib_link_dir_name_prefix()
            if name in unique_dir_names:
                logging.warn(
                        "Duplicate library dir name: '{}', will use SHA256 digest prefix to "
                        "ensure directory name uniqueness".format(name))
                need_short_digest = True
            unique_dir_names.add(name)

        mkpath(path_join(self.dest_dir, 'bin'))
        dest_lib_dir = path_join(self.dest_dir, 'lib')
        dest_bin_dir = path_join(self.dest_dir, 'bin')
        for node in nodes:
            target_dir_basename = node.lib_link_dir_name_prefix()
            if need_short_digest:
                # We need to add a digest to the directory name to disambiguate between libraries
                # with the same name/version, e.g. two versions of libz 1.2.8 that come in when
                # building using Clang. Eventually we need to deduplicate libraries and remove the
                # need of doing this.
                target_dir_basename += '_' + node.digest[:8]
            node.target_dir = path_join(dest_lib_dir, target_dir_basename)
            node_basename = path_basename(node.path)
            if node.is_executable:
                node.target_path = path_join(dest_bin_dir, node_basename)
            else:
                node.target_path = path_join(node.target_dir, node_basename)
            mkpath(node.target_dir)
            if os.path.exists(node.target_path):
                os.unlink(node.target_path)
            shutil.copyfile(node.path, node.target_path)
            shutil.copymode(node.path, node.target_path)
            # Make the file writable so we can change the rpath in the file.
            os.chmod(node.target_path, os.stat(node.target_path).st_mode | stat.S_IWUSR)

        # Symlink the dynamic linker into an easy-to-find location inside lib.
        ld_node = self.find_existing_node_by_path(ld_path)
        ld_symlink_path = path_join(dest_lib_dir, 'ld.so')
        create_rel_symlink(ld_node.target_path, ld_symlink_path)

        patchelf_node = self.find_existing_node_by_path(PATCHELF_PATH)

        for node in nodes:
            if node in [ld_node, patchelf_node]:
                continue

            # Create symlinks in each node's directory that point to all of its dependencies.
            for dep in sorted(set(node.deps).union(set(node.indirect_dependencies_via_dlopen()))):
                existing_file_path = dep.target_node.target_path
                link_path = path_join(node.target_dir, os.path.basename(dep.name))
                if os.path.islink(link_path):
                    os.unlink(link_path)
                if realpath(existing_file_path) != realpath(link_path):
                    create_rel_symlink(existing_file_path, link_path)

            # Update RPATH of this binary to point to the directory that has symlinks to all the
            # right versions libraries it needs.
            new_rpath = '$ORIGIN'
            # Compute the path of the "target directory" (the directory that RPATH needs to
            # point to) relative to the binary path. If the binary is an executable, it won't be
            # that directory, because it will be in "bin" instead.
            target_dir_rel_path = os.path.relpath(
                    node.target_dir, path_dirname(node.target_path))
            if target_dir_rel_path != '.':
                if target_dir_rel_path.startswith('./'):
                    target_dir_rel_path = target_dir_rel_path[2:]
                new_rpath += '/' + target_dir_rel_path

            logging.debug("Setting RPATH of '{}' to '{}'".format(node.target_path, new_rpath))

            patchelf_result = run_patchelf('--set-rpath', new_rpath, node.target_path)
            if node.is_executable and \
               patchelf_result.stderr == PATCHELF_NOT_AN_ELF_EXECUTABLE:
                logging.info("Not an ELF executable: '{}', removing the directory '{}'.".format(
                    node.path, node.target_dir))
                shutil.rmtree(node.target_dir)
                continue

            node_metadata = dict(
               original_location=normalize_path_for_metadata(node.path),
               dependencies=[
                   d.as_metadata() for d in
                   sorted(list(node.deps), key=lambda dep: dep.target)])

            with open(path_join(node.target_dir, 'metadata.yml'), 'w') as metadata_file:
                yaml.dump(node_metadata, metadata_file)

            # Use the Linuxbrew dynamic linker for all files.
            run_patchelf('--set-interpreter', ld_symlink_path, node.target_path)

        # Create symlinks from the "lib" directory to the "libdb" library. We add the "lib"
        # directory to LD_LIBRARY_PATH, and this makes sure dlopen finds correct library
        # dependencies when it is invoked by libsasl2.
        for node in nodes:
            if node.basename().startswith('libdb-'):
                create_rel_symlink(node.target_path, path_join(dest_lib_dir, node.basename()))
        logging.info("Successfully generated a YB distribution at {}".format(self.dest_dir))
예제 #29
0
 def lib_link_dir_name_prefix(self):
     """
     Get a prefix to be used for the directory name in "lib" that would countain symlinks to
             all the libraries this binary needs.
     """
     return remove_so_extension_from_lib_name(path_basename(self.path))
예제 #30
0
 def basename(self):
     return path_basename(self.path)
예제 #31
0
def find_elf_dependencies(elf_file_path):
    """
    Run ldd on the given ELF file and find libraries that it depends on. Also run patchelf and get
    the dynamic linker used by the file.

    @param elf_file_path: ELF file (executable/library) path
    """

    elf_file_path = realpath(elf_file_path)
    if elf_file_path.startswith('/usr/') or elf_file_path.startswith('/lib64/'):
        ldd_path = '/usr/bin/ldd'
    else:
        ldd_path = LINUXBREW_LDD_PATH

    ldd_result = run_program([ldd_path, elf_file_path], error_ok=True)
    dependencies = set()
    if ldd_result.returncode != 0:
        # Interestingly, the below error message is printed to stdout, not stderr.
        if ldd_result.stdout == 'not a dynamic executable':
            logging.debug(
                "Not a dynamic executable: {}, ignoring dependency tracking".format(elf_file_path))
            return dependencies
        raise RuntimeError(ldd_result.error_msg)

    for ldd_output_line in ldd_result.stdout.split("\n"):
        m = RESOLVED_DEP_RE.match(ldd_output_line)
        if m:
            lib_name = m.group(1)
            lib_resolved_path = realpath(m.group(2))
            dependencies.add(Dependency(lib_name, lib_resolved_path))

        tokens = ldd_output_line.split()
        if len(tokens) >= 4 and tokens[1:4] == ['=>', 'not', 'found']:
            missing_lib_name = tokens[0]
            raise RuntimeError("Library not found for '{}': {}".format(
                elf_file_path, missing_lib_name))

        # If we matched neither RESOLVED_DEP_RE or the "not found" case, that is still fine,
        # e.g. there could be a line of the following form in the ldd output:
        #   linux-vdso.so.1 =>  (0x00007ffc0f9d2000)

    elf_basename = path_basename(elf_file_path)
    elf_dirname = path_dirname(elf_file_path)
    if elf_basename.startswith('libsasl2.'):
        # TODO: don't package Berkeley DB with the product -- it has an AGPL license.
        for libdb_so_name in ['libdb.so', 'libdb-5.so']:
            libdb_so_path = path_join(path_dirname(elf_file_path), 'libdb.so')
            if os.path.exists(libdb_so_path):
                dependencies.add(Dependency('libdb.so', libdb_so_path, via_dlopen=True))
        sasl_plugin_dir = '/usr/lib64/sasl2'
        for sasl_lib_name in os.listdir(sasl_plugin_dir):
            if sasl_lib_name.endswith('.so'):
                dependencies.add(
                        Dependency(sasl_lib_name,
                                   realpath(path_join(sasl_plugin_dir, sasl_lib_name))))

    if elf_basename.startswith('libc-'):
        # glibc loads a lot of libns_... libraries using dlopen.
        for libnss_lib in glob.glob(os.path.join(elf_dirname, 'libnss_*')):
            if re.search(r'([.]so|\d+)$', libnss_lib):
                dependencies.add(Dependency(path_basename(libnss_lib),
                                            libnss_lib, via_dlopen=True))

    return dependencies
예제 #32
0
                create_rel_symlink(node.target_path, path_join(dest_lib_dir, node.basename()))
        logging.info("Successfully generated a YB distribution at {}".format(self.dest_dir))


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description=LibraryPackager.__doc__)
    parser.add_argument('--build-dir',
                        help='Build directory to pick up executables/libraries from.',
                        required=True)
    parser.add_argument('--dest-dir',
                        help='Destination directory to save the self-sufficient directory tree '
                             'of executables and libraries at.',
                        required=True)
    parser.add_argument('--verbose',
                        help='Enable verbose output.',
                        action='store_true')
    args = parser.parse_args()
    log_level = logging.INFO
    if args.verbose:
        log_level = logging.DEBUG
    logging.basicConfig(
        level=log_level,
        format="[" + path_basename(__file__) + "] %(asctime)s %(levelname)s: %(message)s")

    release_manifest_path = path_join(YB_SRC_ROOT, 'yb_release_manifest.json')
    release_manifest = json.load(open(release_manifest_path))
    packager = LibraryPackager(build_dir=args.build_dir,
                               seed_executable_patterns=release_manifest['bin'],
                               dest_dir=args.dest_dir)
    packager.package_binaries()
예제 #33
0
def build_code(src, dst, env, options):
    input = path_basename(src)
    appname, _ = path_splitext(input)
    code = '%s.canvas.js' % appname
    tzjs = '%s.tzjs' % appname

    dependency_file = '%s.deps' % src

    templates_dirs = [env['APP_ROOT'], env['APP_TEMPLATES'], env['APP_JSLIB']]
    for t in templates_dirs:
        template = path_join(t, '%s.html' % appname)
        if path_exists(template):
            template = path_basename(template)
            break
    else:
        template = None

    if dst.endswith('.canvas.debug.html'):
        env['MAKEHTML'].build(env, options, input=input, output=dst,
                              mode='canvas-debug',
                              templates=templates_dirs,
                              template=template)
    elif dst.endswith('.canvas.release.html'):
        env['MAKEHTML'].build(env, options, input=input, output=dst,
                              mode='canvas',
                              code=code,
                              templates=templates_dirs,
                              template=template)
    elif dst.endswith('.canvas.default.debug.html'):
        env['MAKEHTML'].build(env, options, input=input, output=dst,
                              mode='canvas-debug',
                              templates=templates_dirs)
    elif dst.endswith('.canvas.default.release.html'):
        env['MAKEHTML'].build(env, options, input=input, output=dst,
                              mode='canvas',
                              code=code,
                              templates=templates_dirs)
    elif dst.endswith('.canvas.js'):
        if options.closure:
            env['MAKETZJS'].build(env, options, input=input, output=dst,
                                  mode='canvas',
                                  MF=dependency_file,
                                  templates=templates_dirs)
            google_compile(dependency_file, dst, options.closure)
        else:
            env['MAKETZJS'].build(env, options, input=input, output=dst,
                                  mode='canvas',
                                  templates=templates_dirs)
    elif dst.endswith('.debug.html'):
        env['MAKEHTML'].build(env, options, input=input, output=dst,
                              mode='plugin-debug',
                              templates=templates_dirs,
                              template=template)
    elif dst.endswith('.release.html'):
        env['MAKEHTML'].build(env, options, input=input, output=dst,
                              mode='plugin',
                              code=tzjs,
                              templates=templates_dirs,
                              template=template)
    elif dst.endswith('.default.debug.html'):
        env['MAKEHTML'].build(env, options, input=input, output=dst,
                              mode='plugin-debug',
                              templates=templates_dirs)
    elif dst.endswith('.default.release.html'):
        env['MAKEHTML'].build(env, options, input=input, output=dst,
                              mode='plugin',
                              code=tzjs,
                              templates=templates_dirs)
    elif dst.endswith('.tzjs'):
        if env['SDK_VERSION'] < StrictVersion('0.19.0'):
            run_js2tzjs({
                'inputs': [src],
                'outputs': [dst],
                'env': env,
                'options': options
            })
        else:
            env['MAKETZJS'].build(env, options, input=input, output=dst,
                                  mode='plugin',
                                  yui=options.yui,
                                  templates=templates_dirs)
    elif dst.endswith('.jsinc'):
        run_js2tzjs_jsinc({
            'inputs': [src],
            'outputs': [dst],
            'env': env,
            'options': options
        })
    else:
        return False

    return True
    dirname     as path_dirname,
    basename    as path_basename,
)

# Discover the path to a home directory.
# Performs Unix shell-like "tilde expansion". (Works on Windows too.)
print( path_expand_user( "~" ) )
# Useful for finding a user's configuration files.

# Find the directory name portion of a path.
# Like Unix 'dirname' command.
print( path_dirname( path_expand_user( "~" ) ) )
# Find the file name portion of a path.
# Like Unix 'basename' command, but doesn't filter extensions.
print( path_basename( path_join(
    path_curdir,
    "stdlib-demo" + path_extsep + "py"
) ) )

# <demo> --- stop ---

### 'os.path' Module ###
from os.path import(
    exists      as path_exists,
)

# Test for the existence of a file.
# Note: If you need to check permissions on a file,
#       then use the 'os.access' or 'os.stat' callables or the 'stat' module,
#       some pieces of which are in the 'os.path' module.
print( path_exists( path_join(
    path_curdir,
예제 #35
0
파일: cxf.py 프로젝트: bopen/bgeo.catasto
def parse_foglio(basepath):
    foglio = {}

    basename = path_basename(basepath).upper()

    # check basename
    assert len(basename) == 11

    foglio['CODICE FOGLIO'] = basename
    foglio['CODICE COMUNE'] = basename[:4]
    foglio['CODICE SEZIONE CENSUARIA'] = basename[4]
    assert foglio['CODICE SEZIONE CENSUARIA'] in ['A', 'B', '_']

    foglio['CODICE NUMERO FOGLIO'] = basename[5:9]
    foglio['NUMERO FOGLIO'] = foglio['CODICE NUMERO FOGLIO'].lstrip('0')

    foglio['CODICE ALLEGATO'] = basename[9]
    assert foglio['CODICE ALLEGATO'] in ['0', 'Q']  # missing

    foglio['CODICE SVILUPPO'] = basename[10]
    assert foglio['CODICE SVILUPPO'] in ['0', 'U']

    cxf = open(basepath + '.CXF')
    sup = open(basepath + '.SUP')

    # parse CXF
    foglio['header'] = {}
    header = foglio['header']
    header['MAPPA'] = next(cxf).strip()
    assert header['MAPPA'] in ['MAPPA', 'MAPPA FONDIARIO', 'QUADRO D\'UNIONE']

    header['NOME MAPPA'] = next(cxf).strip()
    assert header['NOME MAPPA'] == basename

    header['SCALA ORIGINARIA'] = next(cxf).strip()

    foglio['oggetti'] = dict((object_name, []) for object_name in oggetti_cartografici)

    for raw_line in cxf:
        line = raw_line.strip().rstrip('\\')
        assert line in oggetti_cartografici, 'Unknown object %r' % line

        oggetto = {}
        nomi_record, parse_functions = oggetti_cartografici[line]
        for nome_record in nomi_record:
            oggetto[nome_record] = next(cxf).strip()

        for parse_function in parse_functions:
            parse_function(cxf, oggetto)

        foglio['oggetti'][line].append(oggetto)

        if line == 'EOF':
            # record di terminazione
            break

    try:
        garbage = next(cxf)
    except StopIteration:
        garbage = None
    assert garbage is None, 'Garbage after CXF EOF %r' % garbage

    # parse SUP
    name, date_update = next(sup).split()

    def check(sup, key_name, tipo):
        key, value = next(sup).split()
        assert key == key_name
        check = int(value)
        oggetti = sum(1 for b in foglio['oggetti']['BORDO'] if b['tipo'] == tipo)
        print(key, value)
        assert oggetti == check

    check(sup, 'N.FABBRIC', 'FABBRICATO')
    check(sup, 'N.PARTIC', 'PARTICELLA')
    check(sup, 'N.STRADE', 'STRADA')
    check(sup, 'N.ACQUE', 'ACQUA')

    next(sup)
    next(sup)

    areas = dict(next(sup).split() for b in foglio['oggetti']['BORDO'] if b['tipo'] == 'PARTICELLA')

    for b in foglio['oggetti']['BORDO']:
        if b['tipo'] != 'PARTICELLA':
            continue
        b['AREA'] = int(areas[b['CODICE IDENTIFICATIVO']])

    key, value = next(sup).split()
    assert key == 'PARTIC', (key, value)
    check = int(value)
    area = sum(int(a) for a in areas.values())
    print(key, value)
    assert check * 0.95 < area < check * 1.05, (area, check)

    for i in range(6):
        next(sup)

    try:
        garbage = next(sup)
    except StopIteration:
        garbage = None
    assert garbage is None, 'Garbage after SUP EOF %r' % garbage

    return foglio
예제 #36
0
def main():
    # pylint: disable=E1103

    options = _check_options()

    locale.setlocale(locale.LC_ALL, '')

    verbose = options.verbose

    if verbose:
        logging.disable(logging.INFO)
    else:
        logging.disable(logging.WARNING)

    _add_missing_mime_types()

    try:
        game = Game(game_list=None,
                    game_path=path_abspath(path_dirname(options.input)),
                    slug=None,
                    games_root=options.cache,
                    deploy_enable=True,
                    manifest_name=path_basename(options.input))

        _check_game(game)

        silent = options.silent
        if not silent:
            log('Deploying "%s" to "%s".' % (game.slug, options.hub))

        connection = connection_from_url(options.hub, maxsize=8, timeout=8.0)

        cookie = login(connection, options)

        (project, projectversion,
         projectversion_title) = _check_project(connection, options, cookie)

        result = 0

        deploy_info = None
        deploy_thread = None

        try:
            deploy_info = Deployment(game, connection, project, projectversion,
                                     projectversion_title,
                                     _get_cookie_value(cookie), options.cache)

            deploy_thread = Thread(target=deploy_info.deploy,
                                   args=[options.ultra])
            deploy_thread.start()

            start_time = time()

            result = _progress(deploy_info, silent, verbose)
            if (0 == result):
                result = _postupload_progress(deploy_info, connection, cookie,
                                              silent, verbose)
                if (0 == result):
                    if not silent:
                        log('Deployment time: %s' % _fmt_time(
                            (time() - start_time)))
                    game.set_deployed()

        except KeyboardInterrupt:
            warning('Program stopped by user!')
            if deploy_info:
                deploy_info.cancel()
            result = -1

        except Exception as e:
            error(str(e))
            if deploy_info:
                deploy_info.cancel()
            result = -1

        if deploy_info:
            del deploy_info

        if deploy_thread:
            del deploy_thread

        logout(connection, cookie)

        return result

    except GameError:
        return -1
예제 #37
0
def main():
    # pylint: disable=E1103

    options = _check_options()

    locale.setlocale(locale.LC_ALL, '')

    verbose = options.verbose

    if verbose:
        logging.disable(logging.INFO)
    else:
        logging.disable(logging.WARNING)

    _add_missing_mime_types()

    try:
        game = Game(game_list=None,
                    game_path=path_abspath(path_dirname(options.input)),
                    slug=None,
                    games_root=options.cache,
                    deploy_enable=True,
                    manifest_name=path_basename(options.input))

        _check_game(game)

        silent = options.silent
        if not silent:
            log('Deploying "%s" to "%s".' % (game.slug, options.hub))

        connection = connection_from_url(options.hub, maxsize=8, timeout=8.0)

        cookie = login(connection, options)

        (project, projectversion, projectversion_title) = _check_project(connection, options, cookie)

        result = 0

        deploy_info = None
        deploy_thread = None

        try:
            deploy_info = Deployment(game,
                                     connection,
                                     project,
                                     projectversion,
                                     projectversion_title,
                                     _get_cookie_value(cookie),
                                     options.cache)

            deploy_thread = Thread(target=deploy_info.deploy, args=[options.ultra])
            deploy_thread.start()

            start_time = clock()

            result = _progress(deploy_info, silent, verbose)
            if (0 == result):
                result = _postupload_progress(deploy_info, connection, cookie, silent, verbose)
                if (0 == result):
                    if not silent:
                        log('Deployment time: %s' % _fmt_time((clock() - start_time)))
                    game.set_deployed()

        except KeyboardInterrupt:
            warning('Program stopped by user!')
            if deploy_info:
                deploy_info.cancel()
            result = -1

        except Exception as e:
            error(str(e))
            if deploy_info:
                deploy_info.cancel()
            result = -1

        if deploy_info:
            del deploy_info

        if deploy_thread:
            del deploy_thread

        logout(connection, cookie)

        return result

    except GameError:
        return -1
예제 #38
0
def standard_output_version(version, dependencies, output_file=None):
    main_module_name = path_basename(sys.argv[0])
    version_string = None
    if dependencies:
        deps = {}

        def get_dependencies_set(this_module_name, deps_list):
            for module_name in deps_list:
                if module_name not in deps:
                    m = None
                    try:
                        m = __import__(module_name, globals(), locals(),
                                       ['__version__', '__dependencies__'])
                    except ImportError:
                        print "Failed to import %s, listed in dependencies " \
                            "for %s" % (module_name, this_module_name)
                        exit(1)
                    else:
                        # Test is the module actually has a version attribute
                        try:
                            version_ = m.__version__
                        except AttributeError as e:
                            print 'No __version__ attribute for tool %s' \
                                % m.__name__
                            print ' >> %s' % str(e)
                        else:
                            deps[module_name] = m

                    if m is not None:
                        try:
                            get_dependencies_set(module_name,
                                                 m.__dependencies__)
                        except AttributeError:
                            pass

        get_dependencies_set(main_module_name, dependencies)

        module_names = deps.keys()
        module_names.sort()

        module_list = ', '.join([
            '%s %s' % (deps[m].__name__, deps[m].__version__)
            for m in module_names
        ])
        version_string = '%s %s (%s)' % (main_module_name, version,
                                         module_list)
    else:
        version_string = '%s %s' % (main_module_name, version)

    # If we are given an output file, write the versions info there if
    # either:
    #   the file doesn't exist already, or
    #   the file contains different data
    # If we are given no output file, just write to stdout.

    print version_string
    if output_file is not None:
        if path_exists(output_file):
            with open(output_file, "rb") as f:
                old_version = f.read()
            if old_version == version_string:
                return
        with open(output_file, "wb") as f:
            f.write(version_string)
예제 #39
0
def parse_foglio(basepath):
    foglio = {}

    basename = path_basename(basepath).upper()
    
    # check basename
    assert len(basename) == 11

    foglio['CODICE FOGLIO'] = basename #Diventa su postgres una roba del tipo "G087_003600": ILLEGGIBILE! Dovrebbe essere solo "36". Provo a modificare questo comportamento in "ogr.py"
    foglio['CODICE COMUNE'] = basename[:4]
    foglio['CODICE SEZIONE CENSUARIA'] = basename[4]
    assert foglio['CODICE SEZIONE CENSUARIA'] in ['A', 'B', '_']
    
    foglio['CODICE NUMERO FOGLIO'] = basename[5:9]
    foglio['NUMERO FOGLIO'] = foglio['CODICE NUMERO FOGLIO'].lstrip('0')

    foglio['CODICE ALLEGATO'] = basename[9]
    assert foglio['CODICE ALLEGATO'] in ['0', 'Q', 'A', 'B', 'D'] # missing

    foglio['CODICE SVILUPPO'] = basename[10]
    assert foglio['CODICE SVILUPPO'] in ['A', 'B', 'C', 'D', '0', 'U']
    
    cxf = open(basepath + '.CXF')
    sup = open(basepath + '.SUP')

    # parse CXF
    foglio['header'] = {}
    header = foglio['header']
    header['MAPPA'] = cxf.next().strip()
    assert header['MAPPA'] in ['MAPPA', 'MAPPA FONDIARIO', 'QUADRO D\'UNIONE']

    header['NOME MAPPA'] = cxf.next().strip()
    assert header['NOME MAPPA'] == basename
    
    header['SCALA ORIGINARIA'] = cxf.next().strip()

    foglio['oggetti'] = dict((object_name, []) for object_name in oggetti_cartografici)

    for raw_line in cxf:
        line = raw_line.strip().rstrip('\\')
        assert line in oggetti_cartografici, 'Unknown object %r' % line

        oggetto = {}
        nomi_record, parse_functions = oggetti_cartografici[line]
        for nome_record in nomi_record:
            oggetto[nome_record] = cxf.next().strip()

        for parse_function in parse_functions:
            parse_function(cxf, oggetto)

        foglio['oggetti'][line].append(oggetto)

        if line == 'EOF':
            # record di terminazione
            break

    try:
        garbage = cxf.next()
    except StopIteration:
        garbage = None
    assert garbage == None, 'Garbage after CXF EOF %r' % garbage

    # parse SUP
    name, date_update = sup.next().split()

    def check(sup, key_name, tipo):
        key, value = sup.next().split()
        assert key == key_name
        check = int(value)
        oggetti = sum(1 for b in foglio['oggetti']['BORDO'] if b['tipo'] == tipo)
        print key, value
        assert oggetti == check

    check(sup, 'N.FABBRIC', 'FABBRICATO')
    check(sup, 'N.PARTIC', 'PARTICELLA')
    check(sup, 'N.STRADE', 'STRADA')
    check(sup, 'N.ACQUE', 'ACQUA')
    
    sup.next()
    sup.next()

    areas = dict(sup.next().split() for b in foglio['oggetti']['BORDO'] if b['tipo'] == 'PARTICELLA')

    for b in foglio['oggetti']['BORDO']:
        if b['tipo'] != 'PARTICELLA':
           continue
        b['AREA'] = int(areas[b['CODICE IDENTIFICATIVO']])

    key, value = sup.next().split()
    assert key == 'PARTIC', (key, value)
    check = int(value)
    area = sum(int(a) for a in areas.values())
    print key, value
    assert check *.95 < area < check * 1.05, (area, check)

    for i in range(6):
        sup.next()

    try:
        garbage = sup.next()
    except StopIteration:
        garbage = None
    assert garbage == None, 'Garbage after SUP EOF %r' % garbage

    return foglio