Esempio n. 1
0
    def search(self,search_str,search_dir,depth=2):
        # Depth -> number of top nodes to search. Default 2 (arbitrary) has been sufficient so far.
        results = Counter()        # {url:score}
        dist_neu_searchq=Counter()  # {nid:dist} Contains the similarity of each neuron to the aggregated search query
        neuron_lookup = self._neu_index
        neuron_labels = [[k for (k,n) in Counter(neuron.get_keylist()).most_common()[:10]] for neuron in self._neurons]
        glomat = self._glomat # Global matrix. Contains similarity of each search term in the query to all neurons. Something like a cache.

        conn = SQLCon()
        searchvecs = [(x,list(conn.read(x))) for x in search_str.split() if not conn.read(x) is None] # Obtain (word,vec) of search terms
        search_len = len(searchvecs)
        for (w,v) in searchvecs:        # For colour coding the map
            try:
                for nid in glomat[w]:
                    if glomat[w][nid] > dist_neu_searchq[nid]:
                        dist_neu_searchq[nid] += glomat[w][nid]/search_len
            except KeyError:
                glomat[w]={}
                for nid,neuron in enumerate(self._neurons):
                    glomat[w][nid] = distance(neuron.get_weights(),v) # cosine similarity, hence 1 is best. 0 is bleh. -1 is opposite.
                    if glomat[w][nid] > dist_neu_searchq[nid]:
                        dist_neu_searchq[nid] += glomat[w][nid]/search_len


            # Union of all doclists with minimum dist_from_neuron. 
            doclist = {}
            for nid in dist_neu_searchq.most_common()[:depth]:
                neuron = neuron_lookup[nid[0]]
                doclist.update(neuron.get_top_docs(30))
            files = (open(doc) for doc in doclist)
            for json_file in files:
                data = json.load(json_file)
                centroids = data['centroids']
                url = data['url']
                json_file.close()
                wc_sim = [distance(v,c) for c in centroids]
                max_wc_sim = max(wc_sim)
                results[url] += max_wc_sim/len(searchvecs)

        results = OrderedDict(results.most_common(20))
        htmlVars = {'query': search_str, 'results':results}
        htmlCode = template.render(htmlVars)
        result_path = os.path.join(search_dir,search_str+'.html')
        map_path = os.path.join(search_dir,search_str+'_map.html')

        with open(result_path,'w') as f:
            f.write(htmlCode)
        self.draw_SOM(search_str,dist_neu_searchq,neuron_labels,map_path)


        result_path = "file://{}".format(pathname2url(result_path))
        map_path = "file://{}".format(pathname2url(map_path))
        webbrowser.open(result_path)
def repl_relative(m, base_path, relative_path):
    """ Replace path with relative path """

    RE_WIN_DRIVE_PATH = re.compile(r"(^(?P<drive>[A-Za-z]{1}):(?:\\|/))")
    link = m.group(0)
    try:
        scheme, netloc, path, params, query, fragment, is_url, is_absolute = parse_url(m.group('path')[1:-1])

        if not is_url:
            # Get the absolute path of the file or return
            # if we can't resolve the path
            path = url2pathname(path)
            abs_path = None
            if (not is_absolute):
                # Convert current relative path to absolute
                temp = os.path.normpath(os.path.join(base_path, path))
                if os.path.exists(temp):
                    abs_path = temp.replace("\\", "/")
            elif os.path.exists(path):
                abs_path = path

            if abs_path is not None:
                convert = False
                # Determine if we should convert the relative path
                # (or see if we can realistically convert the path)
                if (sublime.platform() == "windows"):
                    # Make sure basepath starts with same drive location as target
                    # If they don't match, we will stay with absolute path.
                    if (base_path.startswith('//') and base_path.startswith('//')):
                        convert = True
                    else:
                        base_drive = RE_WIN_DRIVE_PATH.match(base_path)
                        path_drive = RE_WIN_DRIVE_PATH.match(abs_path)
                        if (
                            (base_drive and path_drive) and
                            base_drive.group('drive').lower() == path_drive.group('drive').lower()
                        ):
                            convert = True
                else:
                    # OSX and Linux
                    convert = True

                # Convert the path, url encode it, and format it as a link
                if convert:
                    path = pathname2url(os.path.relpath(abs_path, relative_path).replace('\\', '/'))
                else:
                    path = pathname2url(abs_path)
                link = '%s"%s"' % (m.group('name'), urlunparse((scheme, netloc, path, params, query, fragment)))
    except:
        # Parsing crashed an burned; no need to continue.
        pass

    return link
Esempio n. 3
0
File: put.py Progetto: mchafu/zget
    def do_GET(self):
        if self.path in map(
            lambda x: urllib.pathname2url(os.path.join('/', x)),
            self.server.allowed_basenames
        ):
            utils.logger.info(_("Peer found. Uploading..."))
            full_path = os.path.join(os.curdir, self.server.filename)
            with open(full_path, 'rb') as fh:
                maxsize = os.path.getsize(full_path)
                self.send_response(200)
                self.send_header('Content-type', 'application/octet-stream')
                self.send_header(
                    'Content-disposition',
                    'inline; filename="%s"' % os.path.basename(
                        self.server.filename
                    )
                )
                self.send_header('Content-length', maxsize)
                self.end_headers()

                i = 0
                while True:
                    data = fh.read(1024 * 8)  # chunksize taken from urllib
                    if not data:
                        break
                    self.wfile.write(data)
                    if self.server.reporthook is not None:
                        self.server.reporthook(i, 1024 * 8, maxsize)
                    i += 1
            self.server.downloaded = True

        else:
            self.send_response(404)
            self.end_headers()
            raise RuntimeError(_("Invalid request received. Aborting."))
Esempio n. 4
0
    def _create_tar_overlay(self):
        repo_name = 'tar-test-overlay'
        tar_source_path = os.path.join(HERE, 'testfiles', 'layman-test.tar.bz2')

        # Duplicate test tarball (so we can delete it after testing)
        (_, temp_tarball_path) = tempfile.mkstemp()
        shutil.copyfile(tar_source_path, temp_tarball_path)

        # Write overlay collection XML
        xml_text = '''\
<?xml version="1.0" encoding="UTF-8"?>
<repositories xmlns="" version="1.0">
  <repo quality="experimental" status="unofficial">
    <name>%(repo_name)s</name>
    <description>XXXXXXXXXXX</description>
    <owner>
      <email>[email protected]</email>
    </owner>
    <source type="tar">file://%(temp_tarball_url)s</source>
  </repo>
</repositories>
        '''\
        % {
            'temp_tarball_url': urllib.pathname2url(temp_tarball_path),
            'repo_name': repo_name
          }
        print(xml_text)
        return xml_text, repo_name, temp_tarball_path
Esempio n. 5
0
def h5toncml(h5fname):
    """Generate NcML representation of HDF5 file's content.

    Dataset values not included.

    :arg str h5fname: HDF5 file name.
    :return: An instance of lxml.etree.ElementTree representing the NcML
        content.
    """
    f = h5py.File(h5fname, 'r')

    # Create the root element and the document...
    root = etree.Element(etree.QName(ns['nc'], 'netcdf'), nsmap=ns)
    ncmldoc = etree.ElementTree(root)

    # Add the XML schema attributes...
    root.attrib[etree.QName(ns['xsi'], 'schemaLocation')] = \
        'http://www.unidata.ucar.edu/schemas/netcdf/ncml-2.2.xsd'

    # Add location attribute...
    root.attrib['location'] = urlparse.urljoin(
        'file:',
        urlrequest.pathname2url(osp.abspath(h5fname)))

    # Visit each HDF5 object...
    grp_node['/'] = root
    do_attributes(root, f)
    f.visititems(objinfo)
    f.close()

    return ncmldoc
Esempio n. 6
0
    def __init__(self, **kwargs):
        """Creates a lab notebook Entry object instance.
    
        Parses the relevant file corresponding to the notebook entry and
        attempts to determine an appropriate title to use for the entry, the
        date the analysis was last modified, etc. and stores all of the
        relevant information as a Entry instance.
        
        Args:
            filepath: Path to the file for which the lab notebook entry is
                being created.
            output_dir: The notebook HTML output directory. This will be
                removed from the final path in order to generate a relative URL.
            url_prefix: An optional URL prefix to be preprended to the entry.
        """
        self.filepath = kwargs['filepath']
        self.filename = os.path.basename(self.filepath)
        self.dir_name = os.path.basename(os.path.dirname(self.filepath))
        self.date = datetime.fromtimestamp(os.path.getmtime(self.filepath))
        self.url = pathname2url(urljoin(kwargs['url_prefix'], 
                                                self.filepath.replace(kwargs['output_dir'] + "/", '')))

        # set title
        if 'title' in kwargs:
            self.title = kwargs['title']
        else:
            self.title = self._get_entry_title()
Esempio n. 7
0
    def test_branch_no_commit(self):
        """Test branching when no commits are involved"""
        repo = self.make_repo((
            dict(nodes=(
                dict(action="add", path="trunk", kind="dir"),
                dict(action="add", path="branches", kind="dir"),
                dict(action="add", path="trunk/file", kind="file",
                    content=b""),
            )),
            dict(nodes=(
                dict(action="add", path="branches/branch",
                    copyfrom_path="trunk", copyfrom_rev=1),
            )),
        ))
        url = "file://{}/branches/branch".format(pathname2url(repo))
        output = os.path.join(self.dir, "output")
        with svnex.FastExportFile(output) as fex:
            rev_map = {"trunk": {1: "trunk"}}
            exporter = svnex.Exporter(url, fex, root="", rev_map=rev_map,
                quiet=True)
            exporter.export("refs/heads/branch")
        with open(output, "r", encoding="ascii") as output:
            self.assertMultiLineEqual("""\
reset refs/heads/branch
from trunk
""",
                output.read())
 def test_less_fields(self):
     filename = os.path.join(os.path.dirname(__file__), 'resources/CSV_TEST_DATA_EMPTY_FIELDS.csv')
     mediator = CsvMediator(first_row_is_headers=True, restval="Worcester")
     uri = urljoin('file:', urllib.pathname2url(filename))
     data = mediator.load(uri=uri, base_name="test", read_on_load=True)
     self.assertEquals(len(data), 1)  # Only one type of elements
     row_class = list(data.keys())[0]
     self.assertEquals(len(data[row_class]), 25)  # Ten rows
     row_element = data[row_class][0]
     self.assertIsInstance(row_element, presentation.Row)
     self.assertTrue(hasattr(row_element, "id"))
     self.assertTrue(hasattr(row_element, "first_name"))
     self.assertTrue(hasattr(row_element, "last_name"))
     self.assertTrue(hasattr(row_element, "email"))
     self.assertTrue(hasattr(row_element, "country"))
     self.assertTrue(hasattr(row_element, "city"))
     self.assertEqual(row_element.id, "1")
     self.assertEqual(row_element.first_name, "Chris")
     self.assertEqual(row_element.last_name, "Jordan")
     self.assertEqual(row_element.email, "*****@*****.**")
     self.assertEqual(row_element.country, "Indonesia")
     self.assertEqual(row_element.city, "Worcester")
     row_element = data[row_class][1]
     self.assertEqual(row_element.id, "2")
     self.assertEqual(row_element.first_name, "Edward")
     self.assertEqual(row_element.last_name, "Williamson")
     self.assertEqual(row_element.email, "*****@*****.**")
     self.assertEqual(row_element.country, "Brazil")
     self.assertEqual(row_element.city, "Vila Velha")
Esempio n. 9
0
 def _open_file( self, filename ):
     uri      = self._rootdir + "/" + pathname2url(filename)
     gio_file = Gio.file_new_for_uri(uri)
     tab = self._window.get_tab_from_location(gio_file)
     if tab == None:
         tab = self._window.create_tab_from_location( gio_file, None, 0, 0, False, False )
     self._window.set_active_tab( tab )
Esempio n. 10
0
def path_to_url(path):
    """Convert a system path to a URL."""

    if os.path.sep == '/':
        return path

    return pathname2url(path)
Esempio n. 11
0
    def build_resources(self):
        resources = []
        if not self.root_dir:
            return resources
        for root, dirs, files in os.walk(self.root_dir, followlinks=True):
            for file_name in files:
                path = os.path.join(root, file_name)
                if os.path.getsize(path) > MAX_FILESIZE_BYTES:
                    continue
                with open(path, 'rb') as f:
                    content = f.read()

                    path_for_url = pathname2url(path.replace(self.root_dir, '', 1))
                    if self.base_url[-1] == '/' and path_for_url[0] == '/':
                        path_for_url = path_for_url.replace('/', '' , 1)


                    resource_url = "{0}{1}".format(self.base_url, path_for_url)
                    resource = percy.Resource(
                        resource_url=resource_url,
                        sha=utils.sha256hash(content),
                        local_path=os.path.abspath(path),
                    )
                    resources.append(resource)
        return resources
Esempio n. 12
0
    def _build_app(self):
        # build window
        w = Gtk.Window()
        w.set_position(Gtk.WindowPosition.CENTER)
        w.set_wmclass('Welcome to Linux Lite', 'Welcome to Linux Lite')
        w.set_title('Welcome to Linux Lite')
        w.set_size_request(768, 496)
        w.set_icon_from_file(os.path.join(self._data_path,
                             "icons/lite-welcome.png"))

        # build webkit container
        mv = LiteAppView()

        # load our index file
        file_out = os.path.abspath(os.path.join(self._data_path, 'frontend/index.html'))

        uri = 'file://' + pathname2url(file_out)
        mv.open(uri)

        # build scrolled window widget and add our appview container
        sw = Gtk.ScrolledWindow()
        sw.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
        sw.add(mv)

        # build a an autoexpanding box and add our scrolled window
        b = Gtk.VBox(homogeneous=False, spacing=0)
        b.pack_start(sw, expand=True, fill=True, padding=0)

        # add the box to the parent window and show
        w.add(b)
        w.connect('delete-event', self.close)
        w.show_all()

        self._window = w
        self._appView = mv
Esempio n. 13
0
def share(filename, forever):
    """Share a file in the local network."""
    ip = utils.get_ip()
    # port = get_port()

    # Bind to port 0. OS assigns a random open port.
    server = httpserver.HTTPServer((ip, 0), utils.LocalFileHandler)
    port = server.server_port
    server.filename = filename

    zc_info = zeroconf.ServiceInfo(
            "_http._tcp.local.",
            "%s._http._tcp.local." % filename,
            utils.ip_to_bytes(ip), port, 0, 0,
            {'filename': filename}
    )
    url = "http://" + ip + ":" + str(port) + "/" + urllib.pathname2url(filename)

    zc_instance = zeroconf.Zeroconf()
    try:
        zc_instance.register_service(zc_info)
        click.echo('Sharing %s at %s' % (filename, url))
        if forever:
            server.serve_forever(poll_interval=0.5)
        else:
            server.handle_request()
            click.echo('File downloaded by peer. Exiting')
            sys.exit(0)
    except KeyboardInterrupt:
        pass
Esempio n. 14
0
def map_files():
    """List all available files."""
    files = OrderedDict()
    zc_instance = zeroconf.Zeroconf()
    listener = utils.ServiceListener()

    zeroconf.ServiceBrowser(zc_instance, "_http._tcp.local.", listener)

    try:
        # Give listener some time to discover available services.
        time.sleep(0.5)
        if not listener.services:
            click.echo('No files available. Waiting ...')
            while not listener.services:
                time.sleep(0.5)
            click.echo('Peer(s) found.')
        for service in listener.services:
            address = utils.bytes_to_ip(service.address)
            port = service.port
            filename = service.properties[b'filename'].decode('utf-8')
            url = "http://" + address + ":" + str(port) + "/" + \
                  urllib.pathname2url(filename)
            files[filename] = url
    except KeyboardInterrupt:
        sys.exit(0)
    return files
Esempio n. 15
0
 def _fetch_data(self, path, size):
     self._curl.setopt(pycurl.URL, "ftp://{}{}".format(self._server_addr, pathname2url(path)))
     try:
         tagsize = self._get_tag_size()
         if tagsize is not None:
             # Tags are at the beginning of the file, so download some more bytes
             self._fetch_range("0-%d" % (tagsize + 10))
         else:
             # Tags are perhaps at the end of file then
             self._fetch_range("%d-%d" % (size - 10, size))
             tagsize = self._get_tag_size()
             if tagsize is not None:
                 self._fetch_range("%d-%d" % (size - tagsize, size - 10))
             else:
                 # See if there is ID3v1
                 self._fetch_range("%d-%d" % (size - 128, size))
                 if self._buffer.getvalue()[0:3] == b"TAG":
                     id3v1 = self._buffer.getvalue()
                     self._fetch_range("%d-%d" % (size - 138, size - 129))
                     tagsize = self._get_tag_size()
                     # See if there is ID3v2 before ID3v1
                     if tagsize is not None:
                         self._fetch_range("%d-%d" % (size - tagsize - 148, size - 129))
                         self._buffer.write(id3v1)  # Concat ID3v1 to ID3v2
         self._buffer.seek(0)
         return True
     except pycurl.error as e:
         self.log.exception("_fetch_data error", e)
         return False
Esempio n. 16
0
def gstDuration(path):
    try:
        disc = GstPbutils.Discoverer()
        discInfo = disc.discover_uri('file://' + url.pathname2url(path))
        return int(discInfo.get_duration() / Gst.MSECOND)
    except:
        return -1
Esempio n. 17
0
    def _create_layout(self):
        box = gui.widgetBox(self.controlArea,
                            orientation='horizontal')
        self.varmodel = VariableListModel(parent=self)
        self.attr_combo = gui.comboBox(box, self, 'selected_attr',
                                       orientation=Qt.Horizontal,
                                       label='Region attribute:',
                                       callback=self.on_attr_change,
                                       sendSelectedValue=True)
        self.attr_combo.setModel(self.varmodel)
        self.map_combo = gui.comboBox(box, self, 'selected_map',
                                      orientation=Qt.Horizontal,
                                      label='Map type:',
                                      callback=self.on_map_change,
                                      items=Map.all)
        hexpand = QSizePolicy(QSizePolicy.Expanding,
                              QSizePolicy.Fixed)
        self.attr_combo.setSizePolicy(hexpand)
        self.map_combo.setSizePolicy(hexpand)

        url = urljoin('file:',
                      pathname2url(os.path.join(
                          os.path.dirname(__file__),
                          'resources',
                         'owgeomap.html')))
        self.webview = gui.WebviewWidget(self.controlArea, self, url=QUrl(url))
        self.controlArea.layout().addWidget(self.webview)

        QTimer.singleShot(
            0, lambda: self.webview.evalJS('REGIONS = {};'.format({Map.WORLD: CC_WORLD,
                                                                   Map.EUROPE: CC_EUROPE,
                                                                   Map.USA: CC_USA})))
Esempio n. 18
0
def get_file_url(path: str) -> str:
    """Return the file URL that corresponds to the file path ``path``.

    If ``path`` is relative, it is converted into an absolute path before being converted into a
    file URL.
    """
    return _urlparse.urljoin('file:', _urlreq.pathname2url(_os.path.abspath(path)))
Esempio n. 19
0
    def _get_box_from_loc_name(self, loc_name):
        loc_name = pathname2url(loc_name)
        api_url = 'https://maps.googleapis.com/maps/api/geocode/json?address={}'
        api_url = api_url.format(loc_name)

        req = urlopen(api_url)
        body = ''.join(map(lambda x: x.decode('utf-8'), req.readlines()))

        result = json.loads(body)['results'][0]
        geo_box = result['geometry']['bounds']

        box = [geo_box['southwest']['lng'],
               geo_box['southwest']['lat'],
               geo_box['northeast']['lng'],
               geo_box['northeast']['lat']]

        if self.expand:
            mid = [(box[2] - box[0]) / 2 + box[0],
                   (box[3] - box[1]) / 2 + box[1]]
            var_lng, var_lat = self._meter_to_geo(mid[0], mid[1], self.expand)
            box[0] -= var_lng
            box[1] -= var_lat
            box[2] += var_lng
            box[3] += var_lat

        return box
Esempio n. 20
0
    def test_modify_branch(self):
        """Modification of branch directory properties"""
        repo = self.make_repo((
            dict(nodes=(dict(action="add", path="trunk", kind="dir"),)),
            dict(nodes=(
                dict(action="change", path="trunk", props={"name": "value"}),
            )),
        ))
        url = "file://{}/trunk".format(pathname2url(repo))
        output = os.path.join(self.dir, "output")
        with svnex.FastExportFile(output) as fex:
            exporter = svnex.Exporter(url, fex, root="", quiet=True)
            exporter.export("refs/ref")
        with open(output, "r", encoding="ascii") as output:
            self.assertMultiLineEqual("""\
commit refs/ref
mark :1
committer (no author) <(no author)@00000000-0000-0000-0000-000000000000> 0 +0000
data 60


git-svn-id: /trunk@1 00000000-0000-0000-0000-000000000000


commit refs/ref
mark :2
committer (no author) <(no author)@00000000-0000-0000-0000-000000000000> 0 +0000
data 60


git-svn-id: /trunk@2 00000000-0000-0000-0000-000000000000


""",
                output.read())
Esempio n. 21
0
    def make_repository(self, relpath, allow_revprop_changes=True):
        """Create a repository.

        :return: Handle to the repository.
        """
        abspath = os.path.join(self.test_dir, relpath)

        repos.create(abspath)

        if allow_revprop_changes:
            if sys.platform == "win32":
                revprop_hook = os.path.join(abspath, "hooks", "pre-revprop-change.bat")
                f = open(revprop_hook, "w")
                try:
                    f.write("exit 0\n")
                finally:
                    f.close()
            else:
                revprop_hook = os.path.join(abspath, "hooks", "pre-revprop-change")
                f = open(revprop_hook, "w")
                try:
                    f.write("#!/bin/sh\n")
                finally:
                    f.close()
                os.chmod(revprop_hook, os.stat(revprop_hook).st_mode | 0o111)

        if sys.platform == "win32":
            return "file:%s" % pathname2url(abspath)
        else:
            return "file://%s" % abspath
Esempio n. 22
0
def path2url(path):
    m = re.match(r'(.*)[/\\]index.html?$', path)
    if m:
        path = m.group(1) + os.path.sep
    path = os.path.sep + path
    url = pathname2url(path.encode(URL_ENCODING))
    return url.decode('ASCII') if PY2 else url
Esempio n. 23
0
 def _get_uri(self):
     uri = self.filename
     if not uri:
         return
     if uri.split(":")[0] not in ("http", "https", "file", "udp", "rtp", "rtsp"):
         uri = "file:" + pathname2url(path.realpath(uri))
     return uri
Esempio n. 24
0
def load_window():
    controller = ColorsController()
    cur_dir = os.path.dirname(os.path.abspath(__file__))
    file_ = os.path.join(cur_dir, 'html/index.html')
    uri = 'file://' + pathname2url(file_)
    zaguan = Zaguan(uri, controller)
    zaguan.run(debug=True)
Esempio n. 25
0
def ffmpeg_take_scene_screenshot_without_save(scene):
    # When we get here we assume that the scene was alrady probed with ffprobe, therefore it has all the video metadata.

    # {10 /x + y  = 1} , {7200 / x + y = 300} where 10 and 7200 are the duration of the original video
    # and 1 and 300 are the time in seconds where we should take the screen shot.
    # EX if the video is 45 second long the screenshot will be taken on the 2nd second
    # if the video duration is half an hour the screenshot will be taken at 74 seconds
    # x=24.0468 y=0.584145

    x = 24.0468
    y = 0.584145

    screenshot_time = seconds_to_string(int(scene.duration / x + y))

    a = ffmpeg_take_screenshot(screenshot_time, scene.path_to_file)

    if a['success']:
        # print("Screenshot Taken")
        dest_path = os.path.join(MEDIA_PATH, "scenes", str(scene.id), "thumb")
        z = move_sample_movie_to_correct_dir(scene, True, "thumb.jpg", dest_path,
                                             SCREENSHOT_OUTPUT_PATH, 'image')
        time.sleep(1)
        thumb_path = os.path.relpath(z, start='videos')
        as_uri = urllib.pathname2url(thumb_path)
        scene.thumbnail = as_uri
Esempio n. 26
0
def getFileCopyState(filename):
    global fileCopyState
    statestring = urlparse.urljoin('file:', urllib.pathname2url(filename))
    #print("statestring {0}".format(statestring))
    if statestring in fileCopyState:
        return fileCopyState[statestring]
    else:
        return -1
Esempio n. 27
0
 def test_no_read_on_load(self):
     filename = os.path.join(os.path.dirname(__file__), 'resources/CSV_TEST_DATA.csv')
     mediator = CsvMediator(first_row_is_headers=True)
     uri = urljoin('file:', urllib.pathname2url(filename))
     data = mediator.load(uri=uri, base_name="test", read_on_load=False)
     self.assertEquals(len(data), 1)  # Only one type of elements
     for k, v in data.items():
         self.assertEquals(len(v), 0)
Esempio n. 28
0
def path2url(url):
    """URL to path."""

    path = pathname2url(url)
    # If on windows, replace the notation to use a default protocol `///` with nothing.
    if is_win() and RE_WIN_DEFAULT_PROTOCOL.match(path):
        path = path.replace('///', '', 1)
    return path
Esempio n. 29
0
 def test_row_class_is_subclass_of_presentation_row(self):
     filename = os.path.join(os.path.dirname(__file__), 'resources/CSV_TEST_DATA.csv')
     mediator = CsvMediator(first_row_is_headers=True)
     uri = urljoin('file:', urllib.pathname2url(filename))
     data = mediator.load(uri=uri, base_name="test", read_on_load=True)
     self.assertEquals(len(data), 1)  # Only one type of elements
     row_class = list(data.keys())[0]
     self.assertTrue(issubclass(row_class, presentation.Row))
Esempio n. 30
0
def path_to_url(path):
    """
    Convert a path to a file: URL.  The path will be made absolute and have
    quoted path parts.
    """
    path = os.path.normpath(os.path.abspath(path))
    url = urlparse.urljoin("file:", urllib2.pathname2url(path))
    return url
Esempio n. 31
0
def get_subtask(cmd_action, file_dep=None):
    """Return a dictionary defining a substack for string 'cmd_action'."""
    if cmd_action.startswith("poetry run "):
        name = cmd_action.split(" ")[2]
    else:
        name = cmd_action.split(" ")[0]
    task = {"name": name, "actions": [cmd_action], "task_dep": ["install"]}
    if file_dep is not None:
        task["file_dep"] = file_dep
    return task


def open_in_browser(file_to_open):
    """Open a file in the web browser."""
    url = "file://" + pathname2url(os.path.abspath(file_to_open))
    webbrowser.open(url)


def targets_exists(task):
    """Return True (updated) if all task targets exists."""
    return all([os.path.exists(target) for target in task.targets])


def get_stdout(command):
    """Run command with text capture and check, then return stdout."""
    return check_output(command, universal_newlines=True)


@contextmanager
def checkout(branch):
Esempio n. 32
0
    def run(self):
        """Executes the command line module, taking the system arguments,
        determining the plugin to run and then running it."""

        volatility3.framework.require_interface_version(1, 0, 0)

        renderers = dict([
            (x.name.lower(), x)
            for x in framework.class_subclasses(text_renderer.CLIRenderer)
        ])

        parser = volargparse.HelpfulArgParser(
            add_help=False,
            prog=self.CLI_NAME,
            description="An open-source memory forensics framework")
        parser.add_argument(
            "-h",
            "--help",
            action="help",
            default=argparse.SUPPRESS,
            help=
            "Show this help message and exit, for specific plugin options use '{} <pluginname> --help'"
            .format(parser.prog))
        parser.add_argument("-c",
                            "--config",
                            help="Load the configuration from a json file",
                            default=None,
                            type=str)
        parser.add_argument(
            "--parallelism",
            help="Enables parallelism (defaults to off if no argument given)",
            nargs='?',
            choices=['processes', 'threads', 'off'],
            const='processes',
            default=None,
            type=str)
        parser.add_argument(
            "-e",
            "--extend",
            help="Extend the configuration with a new (or changed) setting",
            default=None,
            action='append')
        parser.add_argument(
            "-p",
            "--plugin-dirs",
            help="Semi-colon separated list of paths to find plugins",
            default="",
            type=str)
        parser.add_argument(
            "-s",
            "--symbol-dirs",
            help="Semi-colon separated list of paths to find symbols",
            default="",
            type=str)
        parser.add_argument("-v",
                            "--verbosity",
                            help="Increase output verbosity",
                            default=0,
                            action="count")
        parser.add_argument("-l",
                            "--log",
                            help="Log output to a file as well as the console",
                            default=None,
                            type=str)
        parser.add_argument(
            "-o",
            "--output-dir",
            help="Directory in which to output any generated files",
            default=os.getcwd(),
            type=str)
        parser.add_argument("-q",
                            "--quiet",
                            help="Remove progress feedback",
                            default=False,
                            action='store_true')
        parser.add_argument(
            "-r",
            "--renderer",
            metavar='RENDERER',
            help="Determines how to render the output ({})".format(", ".join(
                list(renderers))),
            default="quick",
            choices=list(renderers))
        parser.add_argument(
            "-f",
            "--file",
            metavar='FILE',
            default=None,
            type=str,
            help=
            "Shorthand for --single-location=file:// if single-location is not defined"
        )
        parser.add_argument(
            "--write-config",
            help="Write configuration JSON file out to config.json",
            default=False,
            action='store_true')
        parser.add_argument("--clear-cache",
                            help="Clears out all short-term cached items",
                            default=False,
                            action='store_true')

        # We have to filter out help, otherwise parse_known_args will trigger the help message before having
        # processed the plugin choice or had the plugin subparser added.
        known_args = [
            arg for arg in sys.argv if arg != '--help' and arg != '-h'
        ]
        partial_args, _ = parser.parse_known_args(known_args)

        banner_output = sys.stdout
        if renderers[partial_args.renderer].structured_output:
            banner_output = sys.stderr
        banner_output.write("Volatility 3 Framework {}\n".format(
            constants.PACKAGE_VERSION))

        if partial_args.plugin_dirs:
            volatility3.plugins.__path__ = [
                os.path.abspath(p) for p in partial_args.plugin_dirs.split(";")
            ] + constants.PLUGINS_PATH

        if partial_args.symbol_dirs:
            volatility3.symbols.__path__ = [
                os.path.abspath(p) for p in partial_args.symbol_dirs.split(";")
            ] + constants.SYMBOL_BASEPATHS

        if partial_args.log:
            file_logger = logging.FileHandler(partial_args.log)
            file_logger.setLevel(1)
            file_formatter = logging.Formatter(
                datefmt='%y-%m-%d %H:%M:%S',
                fmt='%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
            file_logger.setFormatter(file_formatter)
            vollog.addHandler(file_logger)
            vollog.info("Logging started")
        if partial_args.verbosity < 3:
            if partial_args.verbosity < 1:
                sys.tracebacklimit = None
            console.setLevel(30 - (partial_args.verbosity * 10))
        else:
            console.setLevel(10 - (partial_args.verbosity - 2))

        vollog.info("Volatility plugins path: {}".format(
            volatility3.plugins.__path__))
        vollog.info("Volatility symbols path: {}".format(
            volatility3.symbols.__path__))

        # Set the PARALLELISM
        if partial_args.parallelism == 'processes':
            constants.PARALLELISM = constants.Parallelism.Multiprocessing
        elif partial_args.parallelism == 'threads':
            constants.PARALLELISM = constants.Parallelism.Threading
        else:
            constants.PARALLELISM = constants.Parallelism.Off

        if partial_args.clear_cache:
            framework.clear_cache()

        # Do the initialization
        ctx = contexts.Context()  # Construct a blank context
        failures = framework.import_files(
            volatility3.plugins,
            True)  # Will not log as console's default level is WARNING
        if failures:
            parser.epilog = "The following plugins could not be loaded (use -vv to see why): " + \
                            ", ".join(sorted(failures))
            vollog.info(parser.epilog)
        automagics = automagic.available(ctx)

        plugin_list = framework.list_plugins()

        seen_automagics = set()
        chosen_configurables_list = {}
        for amagic in automagics:
            if amagic in seen_automagics:
                continue
            seen_automagics.add(amagic)
            if isinstance(amagic,
                          interfaces.configuration.ConfigurableInterface):
                self.populate_requirements_argparse(parser, amagic.__class__)

        subparser = parser.add_subparsers(
            title="Plugins",
            dest="plugin",
            description="For plugin specific options, run '{} <plugin> --help'"
            .format(self.CLI_NAME),
            action=volargparse.HelpfulSubparserAction)
        for plugin in sorted(plugin_list):
            plugin_parser = subparser.add_parser(
                plugin, help=plugin_list[plugin].__doc__)
            self.populate_requirements_argparse(plugin_parser,
                                                plugin_list[plugin])

        ###
        # PASS TO UI
        ###
        # Hand the plugin requirements over to the CLI (us) and let it construct the config tree

        # Run the argparser
        args = parser.parse_args()
        if args.plugin is None:
            parser.error("Please select a plugin to run")

        vollog.log(constants.LOGLEVEL_VVV,
                   "Cache directory used: {}".format(constants.CACHE_PATH))

        plugin = plugin_list[args.plugin]
        chosen_configurables_list[args.plugin] = plugin
        base_config_path = "plugins"
        plugin_config_path = interfaces.configuration.path_join(
            base_config_path, plugin.__name__)

        # Special case the -f argument because people use is so frequently
        # It has to go here so it can be overridden by single-location if it's defined
        # NOTE: This will *BREAK* if LayerStacker, or the automagic configuration system, changes at all
        ###
        if args.file:
            file_name = os.path.abspath(args.file)
            if not os.path.exists(file_name):
                vollog.log(logging.INFO,
                           "File does not exist: {}".format(file_name))
            else:
                single_location = "file:" + request.pathname2url(file_name)
                ctx.config[
                    'automagic.LayerStacker.single_location'] = single_location

        # UI fills in the config, here we load it from the config file and do it before we process the CL parameters
        if args.config:
            with open(args.config, "r") as f:
                json_val = json.load(f)
                ctx.config.splice(
                    plugin_config_path,
                    interfaces.configuration.HierarchicalDict(json_val))

        # It should be up to the UI to determine which automagics to run, so this is before BACK TO THE FRAMEWORK
        automagics = automagic.choose_automagic(automagics, plugin)
        for amagic in automagics:
            chosen_configurables_list[amagic.__class__.__name__] = amagic

        if ctx.config.get('automagic.LayerStacker.stackers', None) is None:
            ctx.config[
                'automagic.LayerStacker.stackers'] = stacker.choose_os_stackers(
                    plugin)
        self.output_dir = args.output_dir
        if not os.path.exists(self.output_dir):
            parser.error(
                "The output directory specified does not exist: {}".format(
                    self.output_dir))

        self.populate_config(ctx, chosen_configurables_list, args,
                             plugin_config_path)

        if args.extend:
            for extension in args.extend:
                if '=' not in extension:
                    raise ValueError(
                        "Invalid extension (extensions must be of the format \"conf.path.value='value'\")"
                    )
                address, value = extension[:extension.find('=')], json.loads(
                    extension[extension.find('=') + 1:])
                ctx.config[address] = value

        ###
        # BACK TO THE FRAMEWORK
        ###
        constructed = None
        try:
            progress_callback = PrintedProgress()
            if args.quiet:
                progress_callback = MuteProgress()

            constructed = plugins.construct_plugin(
                ctx, automagics, plugin, base_config_path, progress_callback,
                self.file_handler_class_factory())

            if args.write_config:
                vollog.debug("Writing out configuration data to config.json")
                with open("config.json", "w") as f:
                    json.dump(dict(constructed.build_configuration()),
                              f,
                              sort_keys=True,
                              indent=2)
        except exceptions.UnsatisfiedException as excp:
            self.process_unsatisfied_exceptions(excp)
            parser.exit(
                1, "Unable to validate the plugin requirements: {}\n".format(
                    [x for x in excp.unsatisfied]))

        try:
            # Construct and run the plugin
            if constructed:
                renderers[args.renderer]().render(constructed.run())
        except (exceptions.VolatilityException) as excp:
            self.process_exceptions(excp)
Esempio n. 33
0
def main(args):

    dbfilename = args.file
    # TODO check the file exists first

    if not os.path.isfile(dbfilename):
        print("ERROR: File ", dbfilename, " does not exist.")
        sys.exit(-1)

    if args.vacuum:
        try:
            conn = sqlite3.connect(dbfilename)
            cur = conn.cursor()
            print("Vacuuming database ..")
            cur.execute('vacuum;')
            conn.commit()
        except sqlite3.Error as e:
            print("An error was encountered trying to vacuum the database:",
                  e.args[0])

    else:
        # TODO there are a lot more things that it might be useful to know.
        # These just give a tiny hint.  Having NumPy would really help,
        # but it is not installed on Isambard2 at present.

        try:
            dburi = 'file:%s?mode=ro' % pathname2url(dbfilename)
            conn = sqlite3.connect(dburi, uri=True)
        except sqlite3.OperationalError as e:
            print('ERROR: Could not connect to databse: ', e)

        try:
            cur = conn.cursor()

            # Warning: If the database is empty, these will fail.
            # That should only happen if something went really wrong

            print('\nNumber of positions searched: ', )
            cur.execute('select count(id) from position;')
            print(cur.fetchone()[0])

            print('\nTotal number of evaluations (all visits) is: ', )
            cur.execute('SELECT SUM(visits) FROM position;')
            print(cur.fetchone()[0])

            print("\nMaximum number of visits to any single position is: ", )
            cur.execute("SELECT MAX(visits) FROM position;")
            print(cur.fetchone()[0])

            print("\nAverage number of visits per position is: ", )
            cur.execute("SELECT AVG(visits) FROM position;")
            print(cur.fetchone()[0])

            # This one doesn't produce anything useful
            #print("Top 10 positions by fitness (fitness,visits,id):")
            #cur.execute("SELECT fitness,visits,id FROM position;")
            #rows = cur.fetchall()
            #sorted_rows = sorted(rows, key=lambda fitness: fitness[0])

            print("\nTop 10 most visited positions:")
            cur.execute("SELECT id,fitness,visits FROM position;")
            rows = cur.fetchall()
            sorted_rows = sorted(rows, key=lambda visits: visits[2])
            count = 0
            print('Id, Fitness, Visits:')
            for row in sorted_rows:
                count = count + 1
                print(row)
                if count == 10:
                    break

            print('\nAll best positions found:')
            print('Timestamp, PositionID:')
            for row in cur.execute('select * from global_best_history;'):
                print(row)

            # If all our fitnesses are UINT_MAX, then most likely every
            # attempt to assess fitness failed.  That means that something
            # failed at all positions visited.  Compiling may have failed,
            # execution of a wrapper script, benchmark or test may have failed,
            # etc.  OptSearch expects quite a few failures to be encountered,
            # but is likely to think the search converged if we never "move"
            # (ie fitness does not change).

            # Did the fitnesses of the positions change much?
            cur.execute('select fitness from position;')
            rows = cur.fetchall()  # This gives us a list of tuples
            print('\nFitnesses found vary between minimum: ',
                  min(rows)[0], ' and maximum: ',
                  max(rows)[0], '\n')

        except sqlite3.Error as e:
            print("An error was encountered trying to query the database:",
                  e.args[0])

    conn.close()
Esempio n. 34
0
 def target():
     return browser.open(urljoin('file:', pathname2url(open_file)),
                         new=self.webbrowser_open_new)
Esempio n. 35
0
def relative_path_to_artifact_path(path):
    if os.path == posixpath:
        return path
    if os.path.abspath(path) == path:
        raise Exception("This method only works with relative paths.")
    return unquote(pathname2url(path))
Esempio n. 36
0
#!/usr/bin/python3

from urllib.request import build_opener, pathname2url, urlopen, url2pathname
import re
import sys

host = "http://ru.wikipedia.org"
start_line = "mw-content-text"
end_line = "references-small"
bane_line = "Вики (значения)"
url_line = "href=\"/wiki/"
#start_url = host + pathname2url("/wiki/Конь_в_пальто_(памятник)")
start_url = host + pathname2url(sys.argv[1])
pictures = [".png", ".svg", "JPG", "jpg"]
picture_line = "?uselang=ru"
where = {}
second_start_line = "<p>"
second_end_line = "</p>"
steck = []


def next_site(url, num):
    if len(steck) - 1 >= num:
        steck[num] = url
    else:
        steck.append(url)
    if url == "http://ru.wikipedia.org/wiki/%D0%A4%D0%B8%D0%BB%D0%BE%D1%81%D0%BE%D1%84%D0%B8%D1%8F":
        for i in range(num + 1):
            print(steck[i])
        print("YES, amount of steps = ", num + 1)
        exit(0)
Esempio n. 37
0
def path2url(path):
    "Convert a pathname to a file URL"

    return urljoin('file:', pathname2url(path))
Esempio n. 38
0
File: R.py Progetto: Embrace158/BOT
 def url(self):
     """资源文件的url,供酷Q(或其他远程服务)使用"""
     return urljoin(hoshino.config.RES_URL, pathname2url(self.__path))
Esempio n. 39
0
def pathname2fileurl(pathname):
    """Returns a file:// URL for pathname. Handles OS-specific conversions."""
    return urljoin('file:', pathname2url(pathname))
Esempio n. 40
0
def normalize_url(url, base_url=None, keep_relative=False):
    """
    Returns a normalized URL doing a join with a base URL. URL scheme defaults to 'file' and
    backslashes are replaced with slashes. For file paths the os.path.join is used instead of
    urljoin.

    :param url: a relative or absolute URL.
    :param base_url: the reference base URL for construct the normalized URL from \
    the argument. For compatibility between "os.path.join" and "urljoin" a trailing \
    '/' is added to not empty paths.
    :param keep_relative: if set to `True` keeps relative file paths, which would \
    not strictly conformant to URL format specification.
    :return: A normalized URL.
    """
    def add_trailing_slash(x):
        return urlunsplit(
            (x[0], x[1], x[2] + '/' if x[2] and x[2][-1] != '/' else x[2],
             x[3], x[4]))

    def filter_url(x):
        x = x.strip().replace('\\', '/')
        while x.startswith('//'):
            x = x.replace('//', '/', 1)
        while x.startswith('file:////'):
            x = x.replace('file:////', 'file:///', 1)
        if urlsplit(x).scheme in {'', 'file'}:
            x = x.replace('#', '%23')
        return x

    url = filter_url(url)

    if base_url is not None:
        base_url = filter_url(base_url)
        base_url_parts = urlsplit(base_url)
        base_url = add_trailing_slash(base_url_parts)
        if base_url_parts.scheme not in uses_relative:
            base_url_parts = urlsplit('file:///{}'.format(base_url))
        else:
            base_url_parts = urlsplit(base_url)

        if base_url_parts.scheme not in ('', 'file'):
            url = urljoin(base_url, url)
        else:
            url_parts = urlsplit(url)
            if url_parts.scheme not in ('', 'file'):
                url = urljoin(base_url, url)
            elif not url_parts.netloc or base_url_parts.netloc == url_parts.netloc:
                # Join paths only if host parts (netloc) are equal, using the os.path.join
                # instead of urljoin for path normalization.
                url = urlunsplit((
                    '',
                    base_url_parts.netloc,
                    os.path.normpath(
                        os.path.join(base_url_parts.path, url_parts.path)),
                    url_parts.query,
                    url_parts.fragment,
                ))

                # Add 'file' scheme if '//' prefix is added
                if base_url_parts.netloc and not url.startswith(base_url_parts.netloc) \
                        and url.startswith('//'):
                    url = 'file:' + url

    url_parts = urlsplit(url, scheme='file')
    if url_parts.scheme not in uses_relative:
        normalized_url = 'file:///{}'.format(
            url_parts.geturl())  # Eg. k:/Python/lib/....
    elif url_parts.scheme != 'file':
        normalized_url = urlunsplit((
            url_parts.scheme,
            url_parts.netloc,
            pathname2url(url_parts.path),
            url_parts.query,
            url_parts.fragment,
        ))
    elif os.path.isabs(url_parts.path):
        normalized_url = url_parts.geturl()
    elif keep_relative:
        # Can't use urlunsplit with a scheme because it converts relative paths to absolute ones.
        normalized_url = 'file:{}'.format(urlunsplit(('', ) + url_parts[1:]))
    else:
        normalized_url = urlunsplit((
            url_parts.scheme,
            url_parts.netloc,
            os.path.abspath(url_parts.path),
            url_parts.query,
            url_parts.fragment,
        ))
    return filter_url(normalized_url)
Esempio n. 41
0
def path_to_url(path):
    path = abspath(expanduser(path))
    url = urljoin('file:', pathname2url(path))
    log.debug("%s converted to %s", path, url)
    return url
Esempio n. 42
0
 def url(self):
     """资源文件的url,供酷Q(或其他远程服务)使用"""
     return urljoin(global_config.res_url, pathname2url(self.__path))
Esempio n. 43
0
def mfa_login(request):
    if request.method == 'POST':
        form = LoginForm(request.POST)
        if form.is_valid():
            username = form.cleaned_data['username']
            password = form.cleaned_data['password']
            user = authenticate(username=username.lower(), password=password)

            if user is not None:

                if user.is_active:
                    # Get User profile
                    up, g_o_c = UserProfile.objects.get_or_create(user=user)
                    # If MFA, send code and redirect
                    if up.mfa_login_mode in ("SMS", "EMAIL") and settings.MFA:
                        # Create an MFA message
                        mfac = MFACode.objects.create(user=up.user,
                                                      mode=up.mfa_login_mode)
                        # Send code and redirect
                        if up.mfa_login_mode == "SMS":
                            messages.info(
                                request,
                                _('An access code was sent to your mobile device. Please enter it here.'
                                  ))
                        if up.mfa_login_mode == "EMAIL":
                            messages.info(
                                request,
                                _('An access code was sent to your email. Please enter it here.'
                                  ))

                        rev = reverse('mfa_code_confirm', args=(mfac.uid, ))
                        # Fetch the next and urlencode
                        if request.GET.get('next', ''):
                            if sys.version_info[0] == 3:
                                import urllib.request as req
                                rev = "%s?next=%s" % (rev,
                                                      req.pathname2url(
                                                          request.GET.get(
                                                              'next', '')))
                            if sys.version_info[0] == 2:
                                import urllib
                                rev = "%s?next=%s" % (rev,
                                                      urllib.pathname2url(
                                                          request.GET.get(
                                                              'next', '')))

                        return HttpResponseRedirect(rev)
                    # Else, just login as normal without MFA
                    login(request, user)
                    logger.info("Successful login from {}".format(
                        get_client_ip(request)))
                    next_param = request.GET.get('next', '')
                    if next_param:
                        # If a next is in the URL, then go there
                        return HttpResponseRedirect(next_param)
                    # otherwise just go to home.
                    return HttpResponseRedirect(reverse('home'))
                else:
                    # The user exists but is_active=False
                    messages.error(
                        request,
                        _('Please check your email for a link to '
                          'activate your account.'))
                    return render(request, 'login.html', {'form': form})
            else:
                logger.info("Invalid login attempt.")
                messages.error(request, _('Invalid username or password.'))
                return render(request, 'login.html', {'form': form})
        else:
            return render(request, 'login.html', {'form': form})
    # this is a GET
    return render(request, 'login.html', {'form': LoginForm()})
Esempio n. 44
0
def sendPush(userid):

    user_services_url = urllib.parse.urljoin('file:', pathname2url(os.path.abspath('./wsdl_files/vipuserservices-auth-1.7.wsdl')))
    user_services_client = Client(user_services_url,
      transport = HTTPSClientCertTransport( './privateKey_nopass.pem', './publicCert.pem'))
    query_services_url = urllib.parse.urljoin('file:', pathname2url(os.path.abspath('./wsdl_files/vipuserservices-query-1.7.wsdl')))
    query_services_client = Client(query_services_url,
      transport = HTTPSClientCertTransport( './privateKey_nopass.pem', './publicCert.pem'))

    user_services_object = SymantecUserServices(user_services_client)
    query_services_object = SymantecQueryServices(query_services_client)

    data = {}
    data["displayParameters"] = []
    data["displayParameters"].append({"Key":"display.message.text", "Value": message})
    data["displayParameters"].append({"Key":"display.message.title", "Value": title})

    response = user_services_object.authenticateUserWithPush("SupportPush",userid, None, data)

    status=user_services_object.getResponseValue(response,'status')
    statusMessage=user_services_object.getResponseValue(response,'statusMessage')

    if (status == '6040'):
        print("Push Initiated for", userid)
        transaction=user_services_object.getResponseValue(response,'transactionId')
        request=user_services_object.getResponseValue(response,'requestId')
    else:
        print("Error sending push to", userid, ":" , statusMessage)
        return("Error: Unable to send push to " + userid + ": " + statusMessage) 

    isExit = False
    isError = False

    for sec in range(1,30 // 3):
        if isExit:
            break
        time.sleep(3) # NEED NEW SOLUTION for querying on interval in python
        poll_status = query_services_object.pollPushStatus(request,transaction)
        transaction_status = query_services_object.getResponseValue(poll_status,'transactionStatus')
        for tup in transaction_status:
            status=tup[1]
            msg=tup[2]

        if isError:
            print("\n\tError: " + msg)
            return("Error: Unable to validate user: "******"0000" in status: # ignore this first status for polling connection
            continue
        elif "7000" in status:
            print("SUCCESS! Push Accepted by " + userid)
            return("Push Accepted by "+ userid)
            isExit = True
            break
        elif "7001" in status:
            print("Waiting for response...")
            continue
        elif "7002" in status:
            print("Push Denied by", userid)
            return("Error: Push denied by "+ userid)
            isExit = True
            break
        else:
            isError = True
    return("Error: Request to " + userid + " timed out.")
Esempio n. 45
0
    def show(self,
             delete_after=20,
             scale=10,
             border=None,
             dark='#000',
             light='#fff'):  # pragma: no cover
        """\
        Displays this QR code.

        This method is mainly intended for debugging purposes.

        This method saves QR code as an image (by default with a scaling factor
        of 10) to a temporary file and opens it with the standard PNG viewer
        application or within the standard webbrowser.
        The temporary file is deleted afterwards (unless
        :paramref:`delete_after <segno.QRCode.show.delete_after>` is set to ``None``).

        If this method does not show any result, try to increase the
        :paramref:`delete_after <segno.QRCode.show.delete_after>` value or set
        it to ``None``

        :param delete_after: Time in seconds to wait till the temporary file is
                deleted.
        :type delete_after: int or None
        :param int scale: Integer indicating the size of a single module.
        :param border: Integer indicating the size of the quiet zone.
                If set to ``None`` (default), the recommended border size
                will be used.
        :type border: int or None
        :param dark: The color of the dark modules (default: black).
        :param light: The color of the light modules (default: white).
        """
        import os
        import time
        import tempfile
        import webbrowser
        import threading
        try:  # Python 3
            from urllib.parse import urljoin
            from urllib.request import pathname2url
        except ImportError:  # Python 2
            from urlparse import urljoin
            from urllib import pathname2url

        def delete_file(name):
            time.sleep(delete_after)
            try:
                os.unlink(name)
            except OSError:
                pass

        f = tempfile.NamedTemporaryFile('wb', suffix='.png', delete=False)
        try:
            self.save(f, scale=scale, dark=dark, light=light, border=border)
        except:
            f.close()
            os.unlink(f.name)
            raise
        f.close()
        webbrowser.open_new_tab(urljoin('file:', pathname2url(f.name)))
        if delete_after is not None:
            t = threading.Thread(target=delete_file, args=(f.name, ))
            t.start()
Esempio n. 46
0
def checkIndividualSetting(setting, dirName):
    """Validate feature settings in a given dirName."""
    if 'name' not in setting:
        raise ValueError('Every setting must have a name property.')

    if setting['name'] in previousNames:
        raise ValueError('Duplicate setting name: ' + setting['name'])

    previousNames.add(setting['name'])

    if 'type' not in setting:
        raise ValueError('Every setting must have a type property.')

    if setting['type'] != 'checkbox' and setting['type'] != 'select':
        raise ValueError(
            'Only checkbox and select settings are supported at this time. Pull requests are welcome!'
        )

    if 'title' not in setting:
        raise ValueError('Every setting must have a title.')

    if 'actions' not in setting:
        raise ValueError(
            'Every setting must declare actions that happen when the setting is activated.'
        )

    for action in setting['actions']:
        if not isinstance(setting['actions'][action], list):
            raise ValueError(
                'Actions must be declared as an array, for example ["injectCSS", "main.css"].'
            )

        if len(setting['actions'][action]) % 2 != 0:
            raise ValueError(
                'Actions must have an even number of elements, for example ["injectCSS", "main.css"].'
            )

    if setting['type'] == 'checkbox':
        if 'true' not in setting['actions'] and 'false' not in setting[
                'actions']:
            raise ValueError(
                'Checkbox settings must declare an action for "true" or "false" to have any effect.'
            )
    elif setting['type'] == 'select':
        if '1' not in setting['actions']:
            raise ValueError(
                'Select settings must declare an action for "1" to have any effect.'
            )

    # Apply the defaults.
    if 'section' not in setting:
        setting['section'] = 'general'

    if 'default' not in setting:
        setting['default'] = False

    if 'description' not in setting:
        setting['description'] = ''

    # Give a relative path to the files in the actions section that the settings system can understand
    # what URL to load when it takes an action
    for action in setting['actions']:
        i = 0
        while i < len(setting['actions'][action]):
            currentAction = setting['actions'][action][i]

            if currentAction == 'injectCSS' or currentAction == 'injectScript':
                fullPath = os.path.join(dirName,
                                        setting['actions'][action][i + 1])

                # Convert to / if we're on Windows
                fullPath = pathname2url(fullPath)

                fullPath = fullPath.replace("./src/common/", "")

                setting['actions'][action][i + 1] = fullPath

            i += 2
Esempio n. 47
0
    def open(self, path, create=True):
        if not has_bsddb:
            return NO_STORE
        homeDir = path

        if self.__identifier is None:
            self.__identifier = URIRef(pathname2url(abspath(homeDir)))

        db_env = self._init_db_environment(homeDir, create)
        if db_env == NO_STORE:
            return NO_STORE
        self.db_env = db_env
        self.__open = True

        dbname = None
        dbtype = db.DB_BTREE
        # auto-commit ensures that the open-call commits when transactions
        # are enabled

        dbopenflags = DBOPENFLAGS
        if self.transaction_aware is True:
            dbopenflags |= db.DB_AUTO_COMMIT

        if create:
            dbopenflags |= db.DB_CREATE

        dbmode = 0o660
        dbsetflags = 0

        # create and open the DBs
        self.__indicies = [
            None,
        ] * 3
        self.__indicies_info = [
            None,
        ] * 3
        for i in range(0, 3):
            index_name = to_key_func(i)(
                ("s".encode("latin-1"), "p".encode("latin-1"),
                 "o".encode("latin-1")),
                "c".encode("latin-1"),
            ).decode()
            index = db.DB(db_env)
            index.set_flags(dbsetflags)
            index.open(index_name, dbname, dbtype, dbopenflags, dbmode)
            self.__indicies[i] = index
            self.__indicies_info[i] = (index, to_key_func(i), from_key_func(i))

        lookup = {}
        for i in range(0, 8):
            results = []
            for start in range(0, 3):
                score = 1
                len = 0
                for j in range(start, start + 3):
                    if i & (1 << (j % 3)):
                        score = score << 1
                        len += 1
                    else:
                        break
                tie_break = 2 - start
                results.append(((score, tie_break), start, len))

            results.sort()
            score, start, len = results[-1]

            def get_prefix_func(start, end):
                def get_prefix(triple, context):
                    if context is None:
                        yield ""
                    else:
                        yield context
                    i = start
                    while i < end:
                        yield triple[i % 3]
                        i += 1
                    yield ""

                return get_prefix

            lookup[i] = (
                self.__indicies[start],
                get_prefix_func(start, start + len),
                from_key_func(start),
                results_from_key_func(start, self._from_string),
            )

        self.__lookup_dict = lookup

        self.__contexts = db.DB(db_env)
        self.__contexts.set_flags(dbsetflags)
        self.__contexts.open("contexts", dbname, dbtype, dbopenflags, dbmode)

        self.__namespace = db.DB(db_env)
        self.__namespace.set_flags(dbsetflags)
        self.__namespace.open("namespace", dbname, dbtype, dbopenflags, dbmode)

        self.__prefix = db.DB(db_env)
        self.__prefix.set_flags(dbsetflags)
        self.__prefix.open("prefix", dbname, dbtype, dbopenflags, dbmode)

        self.__k2i = db.DB(db_env)
        self.__k2i.set_flags(dbsetflags)
        self.__k2i.open("k2i", dbname, db.DB_HASH, dbopenflags, dbmode)

        self.__i2k = db.DB(db_env)
        self.__i2k.set_flags(dbsetflags)
        self.__i2k.open("i2k", dbname, db.DB_RECNO, dbopenflags, dbmode)

        self.__needs_sync = False
        t = Thread(target=self.__sync_run)
        t.setDaemon(True)
        t.start()
        self.__sync_thread = t
        return VALID_STORE
Esempio n. 48
0
def extract_base64_data_from_html_page(file: pathlib.Path):
    """
    Extract all base64 data from the given HTML page and store the data in
    separate files.

    We do this by building a new HTML string using the non-base64 data pieces
    from the original file and the filenames we will generate for each of the
    base64 data strings.

    Base64 data is found with regex matching. Each data string is decoded and
    saved as a separate file.
    """
    with file.open(encoding='utf-8') as fp:
        html = fp.read()

    # Make the subresources directory
    subresources_directory = file.parent / 'resources' / f'{file.stem}_resources'
    subresources_directory.mkdir(parents=True, exist_ok=True)

    offset = 0
    new_html = ''

    filename_counter = 0
    # A cache for elements that are repeated (e.g. icons)
    saved_strings = {}

    # Remove any existing `<base>` tag
    html = BASE_TAG_PATTERN.sub('', html)

    # Add `'self'` to the Content Security Policy
    # so we can load our extracted resources
    html = OLD_CSP.sub(NEW_CSP, html)

    base64_data_matches = BASE64_DATA_PATTERN.finditer(html)
    for match in base64_data_matches:
        # Add the content before the base64 data
        new_html += html[offset:match.start(1)]

        base64_string = match.group('string')

        # Check to see if we have already encountered this base64 string.
        # If we haven't seen it, we'll go through the process of decoding,
        # saving, and adding it to our cache.
        file_path = saved_strings.get(base64_string)
        if file_path is None:
            mime_type = match.group('mime')
            filename_counter += 1
            filename = generate_filename(mime_type, str(filename_counter))
            binary_data = base64.b64decode(base64_string)
            file_path = subresources_directory / filename
            with file_path.open('wb') as resource_file:
                resource_file.write(binary_data)
            saved_strings[base64_string] = file_path

        # "Replace" the old base64 data with the relative
        # path to the newly created file
        new_html += pathname2url(file_path.relative_to(file.parent).as_posix())

        # Move our offset to the end of the old base64 data
        offset = match.end(1)

    # Add the remainder of the content
    new_html += html[offset:]

    return new_html
Esempio n. 49
0
 def path2url(path):  # noqa: E303
     return urljoin('file:', pathname2url(path))
Esempio n. 50
0
def path2url(path):
    return urlparse.urljoin('file:', urllib.pathname2url(path))
Esempio n. 51
0
 def on_open_clicked(self, button):
     Gio.AppInfo.launch_default_for_uri(
         'file:///' + pathname2url(os.path.dirname(self.selected.fullpath)),
         None)
    def __generate_access_token(configuration, okta_response_handler):
        """
        This function generates an access token by making a call to Okta

        :param ApiConfiguration configuration: The configuration to use
        :param typing.callable okta_response_handler: An optional function to handle the Okta response

        :return: RefreshingToken api_token: A refreshing API token
        """
        # Encode credentials that may contain special characters
        encoded_password = pathname2url(configuration.password)
        encoded_client_id = pathname2url(configuration.client_id)
        encoded_client_secret = pathname2url(configuration.client_secret)

        # Prepare our authentication request
        token_request_body = f"grant_type=password&username={configuration.username}" \
            f"&password={encoded_password}&scope=openid client groups offline_access" \
            f"&client_id={encoded_client_id}&client_secret={encoded_client_secret}"

        headers = {
            "Accept": "application/json",
            "Content-Type": "application/x-www-form-urlencoded"
        }

        # extra request args
        kwargs = {"headers": headers}

        if configuration.proxy_config is not None:
            kwargs["proxies"] = configuration.proxy_config.format_proxy_schema(
            )

        # use certificate if supplied
        if configuration.certificate_filename is not None:
            kwargs["verify"] = configuration.certificate_filename

        # make request to Okta to get an authentication token
        okta_response = requests.post(configuration.token_url,
                                      data=token_request_body,
                                      **kwargs)

        if okta_response_handler is not None:
            okta_response_handler(okta_response)

        # Ensure that we have a 200 response code
        if okta_response.status_code != 200:
            raise ValueError(okta_response.json())

        # convert the json encoded response to be able to extract the token values
        okta_json = okta_response.json()

        # Retrieve our api token from the authentication response
        api_token = RefreshingToken(
            token_url=configuration.token_url,
            client_id=encoded_client_id,
            client_secret=encoded_client_secret,
            initial_access_token=okta_json["access_token"],
            initial_token_expiry=okta_json["expires_in"],
            refresh_token=okta_json["refresh_token"],
            proxies=kwargs.get("proxies", None),
            certificate_filename=kwargs.get("verify", None))

        return api_token
Esempio n. 53
0
    def convert(self, remove_executed=False):
        """
        Convert the executed notebook to a restructured text (RST) file or HTML.

        Parameters
        ----------
        delete_executed : bool, optional
            Controls whether to remove the executed notebook or not.

        """

        if not path.exists(self._executed_nb_path):
            raise IOError(
                "Executed notebook file doesn't exist! Expected: {0}".format(
                    self._executed_nb_path))

        if path.exists(self._output_path) and not self.overwrite:
            logger.debug(
                "{0} version of notebook already exists at {1}. Use "
                "overwrite=True or --overwrite (at cmd line) to re-run".format(
                    self._output_type, self._output_path))
            return self._output_path

        # Initialize the resources dict - see:
        # https://github.com/jupyter/nbconvert/blob/master/nbconvert/nbconvertapp.py#L327
        resources = {}
        resources['config_dir'] = ''  # we don't need to specify config
        resources['unique_key'] = self.nb_name

        # path to store extra files, like plots generated
        resources['output_files_dir'] = 'nboutput'

        if self.base_path is None:
            path_to_root = ''
        else:
            path_to_root = path.relpath(self.base_path,
                                        start=path.split(self.nb_path)[0])
            path_to_root += path.sep
        resources['path_to_pages_root'] = request.pathname2url(path_to_root)

        # Exports the notebook to the output format
        logger.debug('Exporting notebook to {}...'.format(self._output_type))
        if self._output_type == 'RST':
            exporter = RSTExporter()
        elif self._output_type == 'HTML':
            exporter = HTMLExporter()
        else:
            raise ValueError('This should be impossible... output_type should '
                             'have been checked earlier, but it is '
                             'unrecognized')

        if self.template_file:
            exporter.template_file = self.template_file
        output, resources = exporter.from_filename(self._executed_nb_path,
                                                   resources=resources)

        # Write the output file
        writer = FilesWriter()
        output_file_path = writer.write(output,
                                        resources,
                                        notebook_name=self.nb_name)

        if self._output_type == 'RST':
            self._add_filter_keywords(output_file_path)

        if remove_executed:  # optionally, clean up the executed notebook file
            remove(self._executed_nb_path)

        title = ''
        try:
            with open(self.nb_path) as f:
                nb = nbformat.reader.read(f)
                title = nb['cells'][0]['source'].split('#')[1].split(
                    "\n")[0].strip()
        except Exception:
            print(
                'Failed to parse notebook title from first cell, please check notebook.'
            )

        page_info = dict(output_file_path=output_file_path,
                         name=self.nb_name.replace("_", ' ').title(),
                         title=title)

        return page_info
 def build_cdx_request(self, request):
     cdx_url = self.cdx_url_template.format(url=pathname2url(request.url))
     cdx_request = Request(cdx_url)
     cdx_request.meta['wayback_machine_original_request'] = request
     cdx_request.meta['wayback_machine_cdx_request'] = True
     return cdx_request
Esempio n. 55
0
class Highchart(WebView):
    """Create a Highcharts webview widget.

    Parameters
    ----------
    parent: QObject
        Qt parent object, if any.
    bridge: QObject
        Exposed as ``window.pybridge`` in JavaScript.
    options: dict
        Default options for this chart. See Highcharts docs. Some
        options are already set in the default theme.
    highchart: str
        One of `Chart`, `StockChart`, or `Map` Highcharts JS types.
    enable_zoom: bool
        Enables scroll wheel zooming and right-click zoom reset.
    enable_select: str
        If '+', allow series' points to be selected by clicking
        on the markers, bars or pie slices. Can also be one of
        'x', 'y', or 'xy' (all of which can also end with '+' for the
        above), in which case it indicates the axes on which
        to enable rectangle selection. The list of selected points
        for each input series (i.e. a list of arrays) is
        passed to the ``selection_callback``.
        Each selected point is represented as its index in the series.
        If the selection is empty, the callback parameter is a single
        empty list.
    javascript: str
        Additional JavaScript code to evaluate beforehand. If you
        need something exposed in the global namespace,
        assign it as an attribute to the ``window`` object.
    debug: bool
        Enables right-click context menu and inspector tools.
    **kwargs:
        The additional options. The underscores in argument names imply
        hierarchy, e.g., keyword argument such as ``chart_type='area'``
        results in the following object, in JavaScript::

            {
                chart: {
                    type: 'area'
                }
            }

        The original `options` argument is updated with options from
        these kwargs-derived objects.
    """

    _HIGHCHARTS_HTML = urljoin(
        'file:',
        pathname2url(join(join(dirname(__file__), '_highcharts'),
                          'chart.html')))

    def __init__(self,
                 parent=None,
                 bridge=None,
                 options=None,
                 *,
                 highchart='Chart',
                 enable_zoom=False,
                 enable_select=False,
                 selection_callback=None,
                 javascript='',
                 debug=False,
                 **kwargs):
        options = (options or {}).copy()
        enable_select = enable_select or ''

        if not isinstance(options, dict):
            raise ValueError('options must be dict')
        if enable_select not in ('', '+', 'x', 'y', 'xy', 'x+', 'y+', 'xy+'):
            raise ValueError("enable_select must be '+', 'x', 'y', or 'xy'")
        if enable_select and not selection_callback:
            raise ValueError('enable_select requires selection_callback')

        super().__init__(parent,
                         bridge,
                         debug=debug,
                         url=QUrl(self._HIGHCHARTS_HTML))
        self.debug = debug
        self.highchart = highchart
        self.enable_zoom = enable_zoom
        enable_point_select = '+' in enable_select
        enable_rect_select = enable_select.replace('+', '')
        if enable_zoom:
            _merge_dicts(
                options,
                _kwargs_options(
                    dict(mapNavigation_enableMouseWheelZoom=True,
                         mapNavigation_enableButtons=False)))
        if enable_select:
            self._selection_callback = selection_callback
            self.frame.addToJavaScriptWindowObject('__highchart', self)
            _merge_dicts(
                options,
                _kwargs_options(
                    dict(chart_events_click='/**/unselectAllPoints/**/')))
        if enable_point_select:
            _merge_dicts(
                options,
                _kwargs_options(
                    dict(plotOptions_series_allowPointSelect=True,
                         plotOptions_series_point_events_click=
                         '/**/clickedPointSelect/**/')))
        if enable_rect_select:
            _merge_dicts(
                options,
                _kwargs_options(
                    dict(chart_zoomType=enable_rect_select,
                         chart_events_selection='/**/rectSelectPoints/**/')))
        if kwargs:
            _merge_dicts(options, _kwargs_options(kwargs))

        super_evalJS = super().evalJS

        def evalOptions():
            super_evalJS(javascript)
            self.evalJS('''
                var options = {options};
                fixupOptionsObject(options);
                Highcharts.setOptions(options);
            '''.format(options=json(options)))

        self.frame.loadFinished.connect(evalOptions)
        # Give above scripts time to load
        qApp.processEvents(QEventLoop.ExcludeUserInputEvents)
        qApp.processEvents(QEventLoop.ExcludeUserInputEvents)

    def contextMenuEvent(self, event):
        """ Zoom out on right click. Also disable context menu."""
        if self.enable_zoom:
            self.evalJS('chart.zoomOut();')
        if self.debug:
            super().contextMenuEvent(event)

    @staticmethod
    def _JSObject_factory(obj):
        pyqt_type = type(obj).__mro__[-2]
        if isinstance(obj, (list, np.ndarray)):
            pyqt_type = 'QVariantList'
        elif isinstance(obj, Mapping):
            pyqt_type = 'QVariantMap'
        else:
            raise TypeError("Can't expose object of type {}. Too easy. Use "
                            "evalJS method instead.".format(type(obj)))

        class _JSObject(QObject):
            """ This class hopefully prevent options data from being marshalled
            into a string-like dumb (JSON) object when passed into JavaScript. """
            def __init__(self, parent, obj):
                super().__init__(parent)
                self._obj = obj

            @pyqtProperty(pyqt_type)
            def _options(self):
                return self._obj

        return _JSObject

    def exposeObject(self, name, obj):
        """Expose the object `obj` as ``window.<name>`` in JavaScript.

        If the object contains any string values that start and end with
        literal ``/**/``, those are evaluated as JS expressions the result
        value replaces the string in the object.

        The exposure, as defined here, represents a snapshot of object at
        the time of execution. Any future changes on the original Python
        object are not (necessarily) visible in its JavaScript counterpart.

        Parameters
        ----------
        name: str
            The global name the object is exposed as.
        obj: object
            The object to expose. Must contain only primitive types, such as:
            int, float, str, bool, list, dict, set, numpy.ndarray.
        """
        try:
            obj = _to_primitive_types(obj)
        except TypeError:
            raise TypeError('object must consist of primitive types '
                            '(allowed: int, float, str, bool, list, '
                            'dict, set, numpy.ndarray, ...)') from None

        pydata = self._JSObject_factory(obj)(self, obj)
        self.frame.addToJavaScriptWindowObject('_' + name, pydata)
        self.evalJS('''
            window.{0} = window._{0}._options;
            fixupOptionsObject({0});
        '''.format(name))

    def chart(self,
              options=None,
              *,
              highchart=None,
              javascript='',
              javascript_after='',
              **kwargs):
        """ Populate the webview with a new Highcharts JS chart.

        Parameters
        ----------
        options, highchart, javascript, **kwargs:
            The parameters are the same as for the object constructor.
        javascript_after: str
            Same as `javascript`, except that the code is evaluated
            after the chart, available as ``window.chart``, is created.

        Notes
        -----
        Passing ``{ series: [{ data: some_data }] }``, if ``some_data`` is
        a numpy array, it is **more efficient** to leave it as numpy array
        instead of converting it ``some_data.tolist()``, which is done
        implicitly.
        """
        # Give default options some time to apply
        qApp.processEvents(QEventLoop.ExcludeUserInputEvents)

        options = (options or {}).copy()
        if not isinstance(options, MutableMapping):
            raise ValueError('options must be dict')

        if kwargs:
            _merge_dicts(options, _kwargs_options(kwargs))
        self.exposeObject('pydata', options)
        highchart = highchart or self.highchart or 'Chart'
        self.evalJS('''
            {javascript};
            window.chart = new Highcharts.{highchart}(pydata);
            {javascript_after};
        '''.format(
            javascript=javascript,
            javascript_after=javascript_after,
            highchart=highchart,
        ))

    def evalJS(self, javascript):
        """ Asynchronously evaluate JavaScript code. """
        # Why do we need this async? I don't know. But performance of
        # loading/evaluating any JS code is greatly improved this way.
        _ASYNC = 'setTimeout(function() { %s; }, 10);'
        super().evalJS(_ASYNC % javascript)

    def clear(self):
        """Remove all series from the chart"""
        self.evalJS('''
            if (window.chart) {
                while(chart.series.length > 0) {
                    chart.series[0].remove(false);
                }
                chart.redraw();
            }
        ''')

    @pyqtSlot('QVariantList')
    def _on_selected_points(self, points):
        self._selection_callback(
            [np.sort(selected).astype(int) for selected in points])
Esempio n. 56
0
 def rel_url(self):
     rel_path = os.path.relpath(self._path(),
                                os.path.dirname(settings.MEDIA_ROOT))
     return pathname2url(rel_path)
Esempio n. 57
0
 def as_uri(self, scheme='ssh'):
     return '{0}://{1}{2}'.format(scheme, self.remote._fqhost,
                                  urllib.pathname2url(str(self)))
Esempio n. 58
0
def path_to_url(path):
    if six.PY2:
        path = path.encode('utf-8')
    return pathname2url(path)
Esempio n. 59
0
 def as_uri(self, scheme='file'):
     return urlparse.urljoin(
         str(scheme) + ':', urllib.pathname2url(str(self)))
Esempio n. 60
0
File: adas.py Progetto: ukaea/Indica
    def get_adf15(
        self,
        element: str,
        charge: str,
        filetype: str,
        year="",
    ) -> DataArray:
        """Read data from the specified ADF15 ADAS file.

        Implementation is capable of reading files with compact and expanded formatting
        e.g. pec96][ne_pju][ne9.dat and pec40][ar_cl][ar16.dat respectively

        Parameters
        ----------
        element
            The atomic symbol for the element which will be retrieved.
        charge
            Charge state of the ion (e.g. 16 for Ar 16+), can also include
            other string for more complicated path (transport_llu][ar15ic.dat
            setting charge to "15ic")
        filetype
            The type of data to retrieve. Options: ic, cl, ca, ls, llu, ...
        year
            The two-digit year label for the data. = "transport" if special
            transport path


        Returns
        -------
        :
            The data in the specified file. Dimensions are density and
            temperature. Each members of the dataset correspond to a
            different charge state.

        """
        def explicit_reshape(data_to_reshape, nd, nt):
            data = np.empty((nd, nt))
            for id in range(nd):
                for it in range(nt):
                    data[id, it] = data_to_reshape[id * nt + it]

            return data

        def build_file_component(year, element):
            file_component = "transport"
            if year != "transport":
                file_component = f"pec{year}][{element.lower()}"

            return file_component

        def file_type(identifier):
            identifier_dict = {
                "+": "compact",
                ":": "expanded",
            }
            file_type = identifier_dict.get(identifier)
            if file_type is None:
                raise ValueError(
                    f"Unknown file header identified ({identifier}).")

            return file_type

        def transition_match(transition_line):
            transition_type = "orbitals"
            match = (
                r"c\s+(\d+.)"  # isel
                r"\s+(\d+.\d+)"  # wavelength
                r"\s+(\d+)(\(\d\)\d\(.+\d?.\d\))-"  # transition upper level
                r".+(\d+)(\(\d\)\d\(.+\d?.\d\))"  # transition lower level
            )
            header_re = re.compile(match)
            m = header_re.search(transition_line)
            if not m:
                transition_type = "n_levels"
                match = r"c\s+(\d+.)\s+(\d+.\d+)\s+([n]\=.\d+.-.[n]\=.\d+)"
                header_re = re.compile(match)
                m = header_re.search(transition_line)
                if not m:
                    raise ValueError(
                        f"Unknown transition formatting ({identifier}).")

            return transition_type, match

        now = datetime.datetime.now()
        file_component = build_file_component(year, element)
        filename = Path(pathname2url(file_component)) / pathname2url(
            f"{file_component}_{filetype.lower()}]"
            f"[{element.lower()}{charge.lower()}.dat")

        header_match = {
            "compact": r"(\d+).+/(\S+).*\+(.*)photon",
            "expanded": r"(\d+).+/(\S+).*\:(.*)photon",
        }
        section_header_match = {
            "compact":
            r"(\d+.\d+).+\s+(\d+)\s+(\d+).+type\s?"
            r"=\s?(\S+).+isel.+\s+(\d+)",
            "expanded":
            r"(\d+.\d+)\s+(\d+)\s+(\d+).+type\s?="
            r"\s?(\S+).+isel\s+?=\s+?(\d+)",
        }
        with self._get_file("adf15", filename) as f:
            header = f.readline().strip().lower()
            identifier = file_type(header.split("/")[1][2])

            match = header_match[identifier]
            m = re.search(match, header, re.I)
            assert isinstance(m, re.Match)
            ntrans = int(m.group(1))
            element_name = m.group(2).strip().lower()
            charge_state = int(m.group(3))
            assert element_name == element.lower()
            m = re.search(r"(\d+)(\S*)", charge)
            assert isinstance(m, re.Match)
            extracted_charge = m.group(1)
            if charge_state != int(extracted_charge):
                raise ValueError(
                    f"Charge state in ADF15 file ({charge_state}) does not "
                    f"match argument ({charge}).")

            # Read first section header to build arrays outside of reading loop
            match = section_header_match[identifier]
            header_re = re.compile(match)
            m = None
            while not m:
                line = f.readline().strip().lower()
                m = header_re.search(line)
            assert isinstance(m, re.Match)
            nd = int(m.group(2))
            nt = int(m.group(3))
            ttype: List[str] = []
            tindex = np.empty(ntrans)
            wavelength = np.empty(ntrans)

            # Read Photon Emissivity Coefficient rates
            data = np.empty((ntrans, nd, nt))
            for i in range(ntrans):
                m = header_re.search(line)
                assert isinstance(m, re.Match)
                assert int(m.group(5)) - 1 == i
                tindex[i] = i + 1
                ttype.append(m.group(4))
                wavelength[i] = float(m.group(1))  # (Angstroms)

                densities = np.fromfile(f, float, nd, " ")
                temperatures = np.fromfile(f, float, nt, " ")
                data_tmp = np.fromfile(f, float, nd * nt, " ")
                data[i, :, :] = explicit_reshape(data_tmp, nd, nt)
                line = f.readline().strip().lower()

            data = np.transpose(np.array(data), (0, 2, 1))

            # Read Transition information from end of file
            file_end_re = re.compile(r"c\s+[isel].+\s+[transition].+\s+[type]")
            while not file_end_re.search(line):
                line = f.readline().strip().lower()
            _ = f.readline()
            if identifier == "expanded":
                _ = f.readline()
            line = f.readline().strip().lower()
            transition_type, match = transition_match(line)
            transition_re = re.compile(match)

            format_transition = {
                "orbitals":
                lambda m: f"{m.group(4)}-{m.group(6)}".replace(" ", ""),
                "n_levels": lambda m: m.group(3).replace(" ", ""),
            }
            transition = []
            for i in tindex:
                m = transition_re.search(line)
                assert isinstance(m, re.Match)
                assert int(m.group(1)[:-1]) == i
                transition_tmp = format_transition[transition_type](m)
                transition.append(transition_tmp)
                line = f.readline().strip().lower()

        gen_type = ADF15_GENERAL_DATATYPES[filetype]
        spec_type = element
        name = f"{spec_type}_{gen_type}"
        attrs = {
            "datatype": (gen_type, spec_type),
            "provenance": self.create_provenance(filename, now),
        }

        coords = [
            ("index", tindex),
            ("electron_temperature", temperatures),  # eV
            ("electron_density", densities * 10**6),  # m**-3
        ]

        pecs = DataArray(
            data * 10**-6,
            coords=coords,
            name=name,
            attrs=attrs,
        )

        # Add extra dimensions attached to index
        pecs = pecs.assign_coords(wavelength=("index", wavelength))  # (A)
        pecs = pecs.assign_coords(
            transition=("index", transition)
        )  # (2S+1)L(w-1/2)-(2S+1)L(w-1/2) of upper-lower levels, no blank spaces
        pecs = pecs.assign_coords(type=("index", ttype))  # (excit, recomb, cx)

        return pecs