def _media(self):
     return forms.Media(
         css= {'screen': (posixpath.join(self.miu_skin, 'style.css'),
                          posixpath.join(self.miu_set, 'style.css'))},
         js=(absolute_jquery_url(),
             absolute_url('markitup/jquery.markitup.js'),
             posixpath.join(self.miu_set, 'set.js')))
Example #2
0
    def iter_hashes(self, name='RECORD'):
        '''Iterate over the files and hashes of a RECORD file.

        The RECORD file with the given name will be iterated over
        yielding a three tuple with each iteration: filename (relative
        to the package), computed hash (just calculated), and expected
        hash (from RECORD file).
        '''
        hashless = [posixpath.join(self.dist_info, name + ext)
                    for ext in ['', '.jws', '.p7s']]
        path = posixpath.join(self.dist_info, name)
        with closing(self.open(path)) as record_file:
            for row in csv.reader(record_file):
                filename, hashspec = row[:2]
                if not hashspec:
                    if filename not in hashless:
                        yield filename, None, None
                    continue
                algo, expected_hash = hashspec.split('=', 1)
                hash = hashlib.new(algo)
                with closing(self.open(filename, 'rb')) as file:
                    while True:
                        data = file.read(4096)
                        if not data:
                            break
                        hash.update(data)
                hash = base64.urlsafe_b64encode(hash.digest()).rstrip('=')
                yield filename, hash, expected_hash
Example #3
0
    def __init__(self, **kwargs):
        """:py:class:`~mrjob.hadoop.HadoopJobRunner` takes the same arguments
        as :py:class:`~mrjob.runner.MRJobRunner`, plus some additional options
        which can be defaulted in :ref:`mrjob.conf <mrjob.conf>`.
        """
        super(HadoopJobRunner, self).__init__(**kwargs)

        self._hdfs_tmp_dir = fully_qualify_hdfs_path(
            posixpath.join(
            self._opts['hdfs_scratch_dir'], self._job_name))

        # Keep track of local files to upload to HDFS. We'll add them
        # to this manager just before we need them.
        hdfs_files_dir = posixpath.join(self._hdfs_tmp_dir, 'files', '')
        self._upload_mgr = UploadDirManager(hdfs_files_dir)

        # Set output dir if it wasn't set explicitly
        self._output_dir = fully_qualify_hdfs_path(
            self._output_dir or
            posixpath.join(self._hdfs_tmp_dir, 'output'))

        self._hadoop_log_dir = hadoop_log_dir(self._opts['hadoop_home'])

        # Running jobs via hadoop assigns a new timestamp to each job.
        # Running jobs via mrjob only adds steps.
        # Store both of these values to enable log parsing.
        self._job_timestamp = None
        self._start_step_num = 0

        # init hadoop version cache
        self._hadoop_version = None
Example #4
0
    def upload(self, source, target):
        logger.debug("Copying '%s' -> '%s'", source, target)

        if self.isdir(target):
            target = posixpath.join(target, os.path.basename(source))

        source = os.path.expanduser(source)
        if not os.path.isdir(source):
            self._sftp.put(source, target)
            return

        for rootdir, _, files in os.walk(source):
            targetdir = os.path.normpath(
                os.path.join(
                    target,
                    os.path.relpath(rootdir, source))).replace("\\", "/")

            self.mkdir(targetdir)

            for entry in files:
                local_path = os.path.join(rootdir, entry)
                remote_path = posixpath.join(targetdir, entry)
                if self.exists(remote_path):
                    self._sftp.unlink(remote_path)
                self._sftp.put(local_path, remote_path)
Example #5
0
 def test_folder(self):
   self.client.write('hello', 'hello, world!')
   self.client.write('foo/hey', 'hey, world!')
   infos = list(self.client.walk(''))
   eq_(len(infos), 2)
   eq_(infos[0], (psp.join(self.client.root), ['foo'], ['hello']))
   eq_(infos[1], (psp.join(self.client.root, 'foo'), [], ['hey']))
  def _Run(self):
    # Copy actual-results.json to skimage/actuals
    print '\n\n====Uploading skimage actual-results to Google Storage====\n\n'
    src_dir = os.path.abspath(os.path.join(self._skimage_out_dir,
                                           self._builder_name))
    bucket_url = gs_utils.GSUtils.with_gs_prefix(
        skia_vars.GetGlobalVariable('googlestorage_bucket'))
    dest_dir = posixpath.join(
        bucket_url, 'skimage', 'actuals', self._builder_name)
    http_header_lines = ['Cache-Control:public,max-age=3600']
    old_gs_utils.upload_dir_contents(local_src_dir=src_dir,
                                     remote_dest_dir=dest_dir,
                                     gs_acl='public-read',
                                     http_header_lines=http_header_lines)

    # Copy actual images to Google Storage at skimage/output. This will merge
    # with the existing files.
    print '\n\n========Uploading skimage results to Google Storage=======\n\n'
    src_dir = os.path.abspath(os.path.join(self._skimage_out_dir, 'images'))
    dest_dir = posixpath.join(
        bucket_url, 'skimage', 'output', 'images')
    if os.path.isdir(src_dir) and os.listdir(src_dir):
      old_gs_utils.upload_dir_contents(
          local_src_dir=src_dir, remote_dest_dir=dest_dir,
          gs_acl=gs_utils.GSUtils.PLAYBACK_CANNED_ACL)
Example #7
0
    def _hadoop_log_dirs(self, output_dir=None):
        """Yield all possible places to look for hadoop logs."""
        # hadoop_log_dirs opt overrides all this
        if self._opts['hadoop_log_dirs']:
            for path in self._opts['hadoop_log_dirs']:
                yield path
            return

        hadoop_log_dir = os.environ.get('HADOOP_LOG_DIR')
        if hadoop_log_dir:
            yield hadoop_log_dir

        yarn = uses_yarn(self.get_hadoop_version())

        if yarn:
            yarn_log_dir = os.environ.get('YARN_LOG_DIR')
            if yarn_log_dir:
                yield yarn_log_dir

            yield _DEFAULT_YARN_HDFS_LOG_DIR

        if output_dir:
            # Cloudera style of logging
            yield posixpath.join(output_dir, '_logs')

        for hadoop_dir in self._hadoop_dirs():
            yield posixpath.join(hadoop_dir, 'logs')

        # hard-coded fallback paths
        if yarn:
            for path in _FALLBACK_HADOOP_YARN_LOG_DIRS:
                yield path

        for path in _FALLBACK_HADOOP_LOG_DIRS:
            yield path
Example #8
0
def create_environments(interpreters=INTERPRETERS,
                        conda_packages=REQUIRED_CONDA_PYTHON_PACKAGES,
                        pip_packages=REQUIRED_PIP_PYTHON_PACKAGES):
    """
    Task for virtual *Anaconda* environments.

    Parameters
    ----------
    interpreters : dict
        *Python* interpreters to create.
    conda_packages : array_like
        Required *Conda* *Python* packages to install.
    pip_packages : array_like
        Required *Pip* *Python* packages to install.
    """

    for interpreter, version in interpreters.items():
        conda_environment_directory = posixpath.join(CONDA_DIRECTORY, 'envs',
                                                   interpreter)
        if not exists(conda_environment_directory):
            run('{0} create --yes -n {1} python={2} anaconda'.format(
                posixpath.join(HOME_DIRECTORY, 'miniconda', 'bin', 'conda'),
                interpreter, version))
            run('source activate {0} && conda install --yes {1}'.format(
                interpreter, " ".join(conda_packages)))
            run('source activate {0} && pip install {1}'.format(
                interpreter, " ".join(pip_packages)))
Example #9
0
    def runTest(self):
        """This tests copying a directory structure to the device.
        """
        dvroot = self.dm.deviceRoot
        dvpath = posixpath.join(dvroot, 'infratest')
        self.dm.removeDir(dvpath)
        self.dm.mkDir(dvpath)

        p1 = os.path.join('test-files', 'push1')
        # Set up local stuff
        try:
            os.rmdir(p1)
        except:
            pass

        if not os.path.exists(p1):
            os.makedirs(os.path.join(p1, 'sub.1', 'sub.2'))
        if not os.path.exists(os.path.join(p1, 'sub.1', 'sub.2', 'testfile')):
            file(os.path.join(p1, 'sub.1', 'sub.2', 'testfile'), 'w').close()

        self.dm.pushDir(p1, posixpath.join(dvpath, 'push1'))

        self.assertTrue(
            self.dm.dirExists(posixpath.join(dvpath, 'push1', 'sub.1')))
        self.assertTrue(self.dm.dirExists(
            posixpath.join(dvpath, 'push1', 'sub.1', 'sub.2')))
 def cwd(self):
     """Return the 'current working directory', which this hierarchy
     represents"""
     if self.parent:
         return posixpath.join(self.parent.cwd(), self.name())
     else:
         return posixpath.join("/", self.name())
def _GetAPISchemaFilename(api_name, file_system, version):
  '''Gets the name of the file which may contain the schema for |api_name| in
  |file_system|, or None if the API is not found. Note that this may be the
  single _EXTENSION_API file which all APIs share in older versions of Chrome,
  in which case it is unknown whether the API actually exists there.
  '''
  if version == 'trunk' or version > _ORIGINAL_FEATURES_MIN_VERSION:
    # API schema filenames switch format to unix_hacker_style.
    api_name = UnixName(api_name)

  # Devtools API names have 'devtools.' prepended to them.
  # The corresponding filenames do not.
  if 'devtools_' in api_name:
    api_name = api_name.replace('devtools_', '')

  for api_path in API_PATHS:
    try:
      for base, _, filenames in file_system.Walk(api_path):
        for ext in ('json', 'idl'):
          filename = '%s.%s' % (api_name, ext)
          if filename in filenames:
            return posixpath.join(api_path, base, filename)
          if _EXTENSION_API in filenames:
            return posixpath.join(api_path, base, _EXTENSION_API)
    except FileNotFoundError:
      continue
  return None
Example #12
0
    def __init__(self, **kwargs):
        """:py:class:`~mrjob.hadoop.HadoopJobRunner` takes the same arguments
        as :py:class:`~mrjob.runner.MRJobRunner`, plus some additional options
        which can be defaulted in :ref:`mrjob.conf <mrjob.conf>`.
        """
        super(HadoopJobRunner, self).__init__(**kwargs)

        self._hadoop_tmp_dir = fully_qualify_hdfs_path(
            posixpath.join(
                self._opts['hadoop_tmp_dir'], self._job_key))

        # Keep track of local files to upload to HDFS. We'll add them
        # to this manager just before we need them.
        hdfs_files_dir = posixpath.join(self._hadoop_tmp_dir, 'files', '')
        self._upload_mgr = UploadDirManager(hdfs_files_dir)

        # Set output dir if it wasn't set explicitly
        self._output_dir = fully_qualify_hdfs_path(
            self._output_dir or
            posixpath.join(self._hadoop_tmp_dir, 'output'))

        # Track job and (YARN) application ID to enable log parsing
        self._application_id = None
        self._job_id = None

        # Keep track of where the hadoop streaming jar is
        self._hadoop_streaming_jar = self._opts['hadoop_streaming_jar']
        self._searched_for_hadoop_streaming_jar = False

        # List of dicts (one for each step) potentially containing
        # the keys 'history', 'step', and 'task' ('step' will always
        # be filled because it comes from the hadoop jar command output,
        # others will be filled as needed)
        self._log_interpretations = []
Example #13
0
    def run(self):
        self.env = env = self.state.document.settings.env
        self.genopt = {}
        self.warnings = []

        names = [x.strip().split()[0] for x in self.content
                 if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0])]
        items = self.get_items(names)
        nodes = self.get_table(items)

        if 'toctree' in self.options:
            suffix = env.config.source_suffix
            dirname = posixpath.dirname(env.docname)

            tree_prefix = self.options['toctree'].strip()
            docnames = []
            for name, sig, summary, real_name in items:
                docname = posixpath.join(tree_prefix, real_name)
                docname = posixpath.normpath(posixpath.join(dirname, docname))
                if docname not in env.found_docs:
                    self.warn('toctree references unknown document %r'
                              % docname)
                docnames.append(docname)

            tocnode = addnodes.toctree()
            tocnode['includefiles'] = docnames
            tocnode['entries'] = [(None, docname) for docname in docnames]
            tocnode['maxdepth'] = -1
            tocnode['glob'] = None

            tocnode = autosummary_toc('', '', tocnode)
            nodes.append(tocnode)

        return self.warnings + nodes
Example #14
0
    def cleanup(self):
        if not self.restore:
            return

        Runner.cleanup(self)

        self.dm.remount()
        # Restore the original profile
        for added_file in self.added_files:
            self.dm.removeFile(added_file)

        for backup_file in self.backup_files:
            if self.dm.fileExists('%s.orig' % backup_file):
                self.dm.shellCheckOutput(['dd', 'if=%s.orig' % backup_file, 'of=%s' % backup_file])
                self.dm.removeFile("%s.orig" % backup_file)

        # Delete any bundled extensions
        extension_dir = posixpath.join(self.remote_profile, 'extensions', 'staged')
        if self.dm.dirExists(extension_dir):
            for filename in self.dm.listFiles(extension_dir):
                try:
                    self.dm.removeDir(posixpath.join(self.bundles_dir, filename))
                except DMError:
                    pass
        # Remove the test profile
        self.dm.removeDir(self.remote_profile)
def _Install(vm):
  """Install YCSB and HBase on 'vm'."""
  vm.Install('hbase')
  vm.Install('ycsb')
  vm.Install('curl')

  cluster_name = (FLAGS.google_bigtable_cluster_name or
                  'pkb-bigtable-{0}'.format(FLAGS.run_uri))
  hbase_lib = posixpath.join(hbase.HBASE_DIR, 'lib')
  for url in [FLAGS.google_bigtable_hbase_jar_url, TCNATIVE_BORINGSSL_URL]:
    jar_name = os.path.basename(url)
    jar_path = posixpath.join(YCSB_HBASE_LIB, jar_name)
    vm.RemoteCommand('curl -Lo {0} {1}'.format(jar_path, url))
    vm.RemoteCommand('cp {0} {1}'.format(jar_path, hbase_lib))

  vm.RemoteCommand('echo "export JAVA_HOME=/usr" >> {0}/hbase-env.sh'.format(
      hbase.HBASE_CONF_DIR))

  context = {
      'google_bigtable_endpoint': FLAGS.google_bigtable_endpoint,
      'google_bigtable_admin_endpoint': FLAGS.google_bigtable_admin_endpoint,
      'project': FLAGS.project or _GetDefaultProject(),
      'cluster': cluster_name,
      'zone': FLAGS.google_bigtable_zone_name,
      'hbase_version': HBASE_VERSION.replace('.', '_')
  }

  for file_name in HBASE_CONF_FILES:
    file_path = data.ResourcePath(file_name)
    remote_path = posixpath.join(hbase.HBASE_CONF_DIR,
                                 os.path.basename(file_name))
    if file_name.endswith('.j2'):
      vm.RenderTemplate(file_path, os.path.splitext(remote_path)[0], context)
    else:
      vm.RemoteCopy(file_path, remote_path)
Example #16
0
  def __init__(self, confentry):
    #Make sure namespace parameter is there, even if it's empty
    if not "namespace" in confentry["rosbind"]:
      confentry["rosbind"]["namespace"] = ""

    self.confentry = confentry
    self.topic = rospy.Subscriber(
      posixpath.join(
        confentry["rosbind"]["namespace"],
        confentry["rosbind"]["eventtopic"]
      ),
      eva_behavior.msg.event,
      self._pend_msg(self._handle_event)
    )

    #A dictionary {face id: Face instance} of which pi_vision is currently tracking
    self.faces = {}

    def handle_camerainfo(msg):
      self.camerainfo = msg
      ci_sub.unregister()
    ci_sub = rospy.Subscriber(
      posixpath.join(confentry["rosbind"]["namespace"], "camera/camera_info"),
      sensor_msgs.msg.CameraInfo,
      handle_camerainfo
    )
Example #17
0
    def run(self):
        """Run substitutions on files."""
        super(checkbox_install_data, self).run()

        examplesfiles = [o for o in self.outfiles if "examples" in o]
        if not examplesfiles:
            return

        # Create etc directory
        if self.install_dir == "/usr":
            basedir = posixpath.sep
        else:
            basedir = self.install_dir
        etcdir = posixpath.join(basedir, "etc", "checkbox.d")
        self.mkpath(etcdir)

        # Create configs symbolic link
        dstdir = posixpath.dirname(examplesfiles[0]).replace("examples",
            "configs")
        if not os.path.exists(dstdir):
            os.symlink(etcdir, dstdir)

        # Substitute version in examplesfiles and etcfiles
        version = changelog_version()
        for examplesfile in examplesfiles:
            etcfile = posixpath.join(etcdir,
                posixpath.basename(examplesfile))
            infile = posixpath.join("examples",
                posixpath.basename(examplesfile))
            for outfile in examplesfile, etcfile:
                substitute_variables(infile, outfile, {
                    "version = dev": "version = %s" % version})
Example #18
0
 def parseCustomTemplateDir(self, template_dir):
     res = {}
     allowed_ext = [".html"]
     try:
         import pyjade
         allowed_ext.append(".jade")
     except ImportError:  # pragma: no cover
         log.msg("pyjade not installed. Ignoring .jade files from %s" % (template_dir,))
         pyjade = None
     for root, dirs, files in os.walk(template_dir):
         if root == template_dir:
             template_name = posixpath.join("views", "%s.html")
         else:
             # template_name is a url, so we really want '/'
             # root is a os.path, though
             template_name = posixpath.join(os.path.basename(root), "views", "%s.html")
         for f in files:
             fn = os.path.join(root, f)
             basename, ext = os.path.splitext(f)
             if ext not in allowed_ext:
                 continue
             if ext == ".html":
                 with open(fn) as f:
                     html = f.read().strip()
             elif ext == ".jade":
                 with open(fn) as f:
                     jade = f.read()
                     parser = pyjade.parser.Parser(jade)
                     block = parser.parse()
                     compiler = pyjade.ext.html.Compiler(block, pretty=False)
                     html = compiler.compile()
             res[template_name % (basename,)] = json.dumps(html)
         pass
     return res
  def ContainerCopy(self, file_name, container_path='', copy_to=True):
    """Copies a file to and from container_path to the host's vm_util.VM_TMP_DIR.

    Args:
      file_name: Name of the file in the host's vm_util.VM_TMP_DIR.
      container_path: Optional path of where to copy file on container.
      copy_to: True to copy to container, False to copy from container.
    Raises:
      RemoteExceptionError: If the source container_path is blank.
    """
    if copy_to:
      if container_path == '':
        container_path = CONTAINER_WORK_DIR

      # Everything in vm_util.VM_TMP_DIR is directly accessible
      # both in the host and in the container
      source_path = posixpath.join(CONTAINER_MOUNT_DIR, file_name)
      command = 'cp %s %s' % (source_path, container_path)
      self.RemoteCommand(command)
    else:
      if container_path == '':
        raise errors.VirtualMachine.RemoteExceptionError('Cannot copy '
                                                         'from blank target')
      destination_path = posixpath.join(CONTAINER_MOUNT_DIR, file_name)
      command = 'cp %s %s' % (container_path, destination_path)
      self.RemoteCommand(command)
Example #20
0
 def get_icon_url(self):
     base = join(self.ICON_PATH, self.get_ext())
     for ext in self.ICON_EXTENSIONS:
         relative = "%s.%s" % (base, ext)
         if exists(join(settings.MEDIA_ROOT, relative)):
             return join(settings.MEDIA_URL, relative)
     return None
Example #21
0
    def put_dir(self, local_path, remote_path, use_sudo, mirror_local_mode,
        mode, temp_dir):
        if os.path.basename(local_path):
            strip = os.path.dirname(local_path)
        else:
            strip = os.path.dirname(os.path.dirname(local_path))

        remote_paths = []

        for context, dirs, files in os.walk(local_path):
            rcontext = context.replace(strip, '', 1)
            # normalize pathname separators with POSIX separator
            rcontext = rcontext.replace(os.sep, '/')
            rcontext = rcontext.lstrip('/')
            rcontext = posixpath.join(remote_path, rcontext)

            if not self.exists(rcontext):
                self.mkdir(rcontext, use_sudo)

            for d in dirs:
                n = posixpath.join(rcontext, d)
                if not self.exists(n):
                    self.mkdir(n, use_sudo)

            for f in files:
                local_path = os.path.join(context, f)
                n = posixpath.join(rcontext, f)
                p = self.put(local_path, n, use_sudo, mirror_local_mode, mode,
                    True, temp_dir)
                remote_paths.append(p)
        return remote_paths
Example #22
0
    def retrieve_keyring(self, keyring_path):
        # ignore local
        if self.local:
            return

        keyring = Keyring(keyring_path)
        # prevent previously signed repos from going unsigned
        if not self.signed and keyring.exists():
            raise RepositoryUnavailable('Previously signed repository can not go unsigned')
        if not self.signed:
            return

        if not keyring.exists() or self.key_update > keyring.version:
            # This is a remote repository, download file
            browser = WeboobBrowser()
            try:
                keyring_data = browser.readurl(posixpath.join(self.url, self.KEYRING))
                sig_data = browser.readurl(posixpath.join(self.url, self.KEYRING + '.sig'))
            except BrowserUnavailable, e:
                raise RepositoryUnavailable(unicode(e))
            if keyring.exists():
                if not keyring.is_valid(keyring_data, sig_data):
                    raise InvalidSignature('the keyring itself')
                print 'The keyring was updated (and validated by the previous one).'
            else:
                print 'First time saving the keyring, blindly accepted.'
            keyring.save(keyring_data, self.key_update)
            print keyring
Example #23
0
def update_virtualenv(preindex=False):
    """
    update external dependencies on remote host

    assumes you've done a code update

    """
    _require_target()
    if preindex:
        root_to_use = env.code_root_preindex
        env_to_use = env.virtualenv_root_preindex
    else:
        root_to_use = env.code_root
        env_to_use = env.virtualenv_root
    requirements = posixpath.join(root_to_use, 'requirements')
    with cd(root_to_use):
        cmd_prefix = 'export HOME=/home/%s && source %s/bin/activate && ' % (
            env.sudo_user, env_to_use)
        # uninstall requirements in uninstall-requirements.txt
        # but only the ones that are actually installed (checks pip freeze)
        sudo("%s bash scripts/uninstall-requirements.sh" % cmd_prefix,
             user=env.sudo_user)
        sudo('%s pip install --requirement %s --requirement %s' % (
            cmd_prefix,
            posixpath.join(requirements, 'prod-requirements.txt'),
            posixpath.join(requirements, 'requirements.txt'),
        ), user=env.sudo_user)
Example #24
0
def main():
    global VERBOSE, INPUT_DIR, OUTPUT_DIR, INCLUDE, EXCLUDE, SOURCE_PREFIX, TARGET_PREFIX

    parser = OptionParser()
    (options, args) = parser.parse_args()

    if options.verbose != None:
        VERBOSE = options.verbose
    if options.input != None:
        INPUT_DIR = options.input
    if options.output != None:
        OUTPUT_DIR = options.output
    if options.include != None:
        INCLUDE = options.include
    if options.exclude != None:
        EXCLUDE = options.exclude
    if options.sourceprefix != None:
        SOURCE_PREFIX = options.sourceprefix
    if options.targetprefix != None:
        TARGET_PREFIX = options.targetprefix

    themes = lookup_themes(INPUT_DIR)
    for name, theme in themes.iteritems():
        theme.initialize()
        if theme.hidden():
            print "Generating: %s.iby" % name
            theme.write_iby(posixpath.join(OUTPUT_DIR, "%s.iby" % name))
        else:
            print "Generating: %s.thx" % name
            theme.write_thx(posixpath.join(OUTPUT_DIR, "%s.thx" % name))

    return EXIT_STATUS
Example #25
0
def india():
    """Our production server in India."""
    env.home = '/home/commcarehq/'
    env.environment = 'india'
    env.sudo_user = '******'
    env.hosts = ['220.226.209.82']
    env.user = prompt("Username: ", default=env.user)
    env.django_port = '8001'
    env.should_migrate = True

    _setup_path()
    env.virtualenv_root = posixpath.join(
        env.home, '.virtualenvs/commcarehq27')
    env.virtualenv_root_preindex = posixpath.join(
        env.home, '.virtualenvs/commcarehq27_preindex')

    env.roledefs = {
        'couch': [],
        'pg': [],
        'rabbitmq': [],
        'django_celery': [],
        'sms_queue': [],
        'pillow_retry_queue': [],
        'django_app': [],
        'django_pillowtop': [],
        'formsplayer': [],
        'staticfiles': [],
        'lb': [],
        'deploy': [],

        'django_monolith': ['220.226.209.82'],
    }
    env.roles = ['django_monolith']
    env.es_endpoint = 'localhost'
    env.flower_port = 5555
def run_tests_remote(tests, prefix, options):
    # Setup device with everything needed to run our tests.
    from mozdevice import devicemanager, devicemanagerADB, devicemanagerSUT

    if options.device_transport == 'adb':
        if options.device_ip:
            dm = devicemanagerADB.DeviceManagerADB(options.device_ip, options.device_port, deviceSerial=options.device_serial, packageName=None, deviceRoot=options.remote_test_root)
        else:
            dm = devicemanagerADB.DeviceManagerADB(deviceSerial=options.device_serial, packageName=None, deviceRoot=options.remote_test_root)
    else:
        dm = devicemanagerSUT.DeviceManagerSUT(options.device_ip, options.device_port, deviceRoot=options.remote_test_root)
        if options.device_ip == None:
            print('Error: you must provide a device IP to connect to via the --device option')
            sys.exit(1)

    # Update the test root to point to our test directory.
    jit_tests_dir = posixpath.join(options.remote_test_root, 'jit-tests')
    options.remote_test_root = posixpath.join(jit_tests_dir, 'jit-tests')

    # Push js shell and libraries.
    if dm.dirExists(jit_tests_dir):
        dm.removeDir(jit_tests_dir)
    dm.mkDirs(options.remote_test_root)
    push_libs(options, dm)
    push_progs(options, dm, [prefix[0]])
    dm.chmodDir(options.remote_test_root)

    dm.pushDir(ECMA6_DIR, posixpath.join(jit_tests_dir, 'tests', 'ecma_6'), timeout=600)
    dm.pushDir(os.path.dirname(TEST_DIR), options.remote_test_root, timeout=600)
    prefix[0] = os.path.join(options.remote_test_root, 'js')

    # Run all tests.
    gen = get_remote_results(tests, dm, prefix, options)
    ok = process_test_results(gen, len(tests), options)
    return ok
Example #27
0
    def generate_code(self):
        union_types = self.info_provider.union_types
        if not union_types:
            return ()
        header_template = self.jinja_env.get_template('union.h')
        cpp_template = self.jinja_env.get_template('union.cpp')
        template_context = v8_union.union_context(
            union_types, self.info_provider.interfaces_info)
        template_context['code_generator'] = module_pyname
        capitalized_component = self.target_component.capitalize()
        template_context['header_filename'] = 'bindings/%s/v8/UnionTypes%s.h' % (
            self.target_component, capitalized_component)
        template_context['macro_guard'] = 'UnionType%s_h' % capitalized_component

        # Add UnionTypesCore.h as a dependency when we generate modules union types
        # because we only generate union type containers which are used by both
        # core and modules in UnionTypesCore.h.
        # FIXME: This is an ad hoc workaround and we need a general way to
        # handle core <-> modules dependency.
        if self.target_component == 'modules':
            template_context['header_includes'] = sorted(
                template_context['header_includes'] +
                ['bindings/core/v8/UnionTypesCore.h'])

        header_text = header_template.render(template_context)
        cpp_text = cpp_template.render(template_context)
        header_path = posixpath.join(self.output_dir,
                                     'UnionTypes%s.h' % capitalized_component)
        cpp_path = posixpath.join(self.output_dir,
                                  'UnionTypes%s.cpp' % capitalized_component)
        return (
            (header_path, header_text),
            (cpp_path, cpp_text),
        )
Example #28
0
    def __init__(self, **kwargs):
        """:py:class:`~mrjob.hadoop.HadoopJobRunner` takes the same arguments
        as :py:class:`~mrjob.runner.MRJobRunner`, plus some additional options
        which can be defaulted in :ref:`mrjob.conf <mrjob.conf>`.
        """
        super(HadoopJobRunner, self).__init__(**kwargs)

        self._hdfs_tmp_dir = fully_qualify_hdfs_path(
            posixpath.join(
            self._opts['hdfs_scratch_dir'], self._job_name))

        # Set output dir if it wasn't set explicitly
        self._output_dir = fully_qualify_hdfs_path(
            self._output_dir or
            posixpath.join(self._hdfs_tmp_dir, 'output'))

        # we'll set this up later
        self._hdfs_input_files = None
        # temp dir for input
        self._hdfs_input_dir = None

        self._hadoop_log_dir = hadoop_log_dir()

        # Running jobs via hadoop assigns a new timestamp to each job.
        # Running jobs via mrjob only adds steps.
        # Store both of these values to enable log parsing.
        self._job_timestamp = None
        self._start_step_num = 0

        # init hadoop version cache
        self._hadoop_version = None
Example #29
0
    def create_board_post(self, board_name, board_id, current_uid = -1):
        board_info = board.get_board_info(board_id)
        if not acl.is_allowed('board', board_id, current_uid, 'create'):
            return util.render().error(error_message = _('NO_PERMISSION'), help_context='error')
        user_data = web.input()
        comment = 1 if user_data.has_key('commentable') else 0
        write_by_other = 1 if user_data.has_key('writable') else 0
        indexable = 1 if user_data.has_key('indexable') else 0
        show_avatar = 1 if user_data.has_key('show_avatar') else 0

        owner_uid = user._get_uid_from_username(user_data.owner)
        if owner_uid < 0:
            return util.render().error(error_message=_('NO_SUCH_USER_FOR_BOARD_ADMIN'), help_context='error')
        if user_data.name.strip() == '':
            return util.render().error(error_message = _('NO_NAME_SPECIFIED'), help_context='error')
        if board_name == '^root':
            new_path = posixpath.join('/', user_data.name)
        else:
            new_path = posixpath.join('/', board_name, user_data.name)
        if board._get_board_id_from_path(new_path) > 0:
            return util.render().error(error_message = _('BOARD_EXISTS'), help_context='error')

        settings = dict(path=new_path, board_owner = owner_uid,
                cover = user_data.information,
                description = user_data.description,
                type = int(user_data.type),
                guest_write = write_by_other,
                can_comment = comment,
                indexable = indexable, show_avatar = show_avatar,
                current_uid = current_uid)
        ret = board.create_board(board_id, settings)
        if ret[0] == False:
            return util.render().error(error_message = ret[1] ,help_context = 'error')
        raise web.seeother(util.link('%s') % (new_path))
def prepare_site_db_and_overrides():
    '''Prepare overrides and create _SITE_DB

    _SITE_DB.keys() need to be ready for filter_translations
    '''
    _SITE_DB.clear()
    _SITE_DB[_MAIN_LANG] = _MAIN_SITEURL
    # make sure it works for both root-relative and absolute
    main_siteurl = '/' if _MAIN_SITEURL == '' else _MAIN_SITEURL
    for lang, overrides in _SUBSITE_QUEUE.items():
        if 'SITEURL' not in overrides:
            overrides['SITEURL'] = posixpath.join(main_siteurl, lang)
        _SITE_DB[lang] = overrides['SITEURL']
        # default subsite hierarchy
        if 'OUTPUT_PATH' not in overrides:
            overrides['OUTPUT_PATH'] = os.path.join(
                _MAIN_SETTINGS['OUTPUT_PATH'], lang)
        if 'CACHE_PATH' not in overrides:
            overrides['CACHE_PATH'] = os.path.join(
                _MAIN_SETTINGS['CACHE_PATH'], lang)
        if 'STATIC_PATHS' not in overrides:
            overrides['STATIC_PATHS'] = []
        if ('THEME' not in overrides and 'THEME_STATIC_DIR' not in overrides and
                'THEME_STATIC_PATHS' not in overrides):
            relpath = relpath_to_site(lang, _MAIN_LANG)
            overrides['THEME_STATIC_DIR'] = posixpath.join(
                relpath, _MAIN_SETTINGS['THEME_STATIC_DIR'])
            overrides['THEME_STATIC_PATHS'] = []
        # to change what is perceived as translations
        overrides['DEFAULT_LANG'] = lang
Example #31
0
def scan_image_for_system_packages(project, image, detect_licenses=True):
    """
    Given a `project` and an `image`, scan the `image` layer by layer for
    installed system packages. Create a DiscoveredPackage for each.

    Then for each installed DiscoveredPackage installed file, check if it exists
    as a CodebaseResource and relate that CodebaseResource to its
    DiscoveredPackage or keep that as a missing file.
    """
    if not image.distro:
        raise rootfs.DistroNotFound(f"Distro not found.")

    distro_id = image.distro.identifier
    if distro_id not in rootfs.PACKAGE_GETTER_BY_DISTRO:
        raise rootfs.DistroNotSupported(
            f'Distro "{distro_id}" is not supported.')

    package_getter = partial(
        rootfs.PACKAGE_GETTER_BY_DISTRO[distro_id],
        distro=distro_id,
        detect_licenses=detect_licenses,
    )

    installed_packages = image.get_installed_packages(package_getter)

    for i, (purl, package, layer) in enumerate(installed_packages):
        logger.info(f"Creating package #{i}: {purl}")
        created_package = pipes.update_or_create_package(
            project, package.to_dict())

        # We have no files for this installed package, we cannot go further.
        if not package.installed_files:
            logger.info(f"  No installed_files for: {purl}")
            continue

        missing_resources = created_package.missing_resources[:]
        modified_resources = created_package.modified_resources[:]

        codebase_resources = CodebaseResource.objects.project(project)

        for install_file in package.installed_files:
            install_file_path = pipes.normalize_path(install_file.path)
            layer_rootfs_path = posixpath.join(
                layer.layer_id,
                install_file_path.strip("/"),
            )
            logger.info(f"   installed file rootfs_path: {install_file_path}")
            logger.info(f"   layer rootfs_path: {layer_rootfs_path}")
            cbr_qs = codebase_resources.filter(
                path__endswith=layer_rootfs_path,
                rootfs_path=install_file_path,
            )
            found_res = False
            for codebase_resource in cbr_qs:
                found_res = True
                if created_package not in codebase_resource.discovered_packages.all(
                ):
                    codebase_resource.discovered_packages.add(created_package)
                    codebase_resource.status = "system-package"
                    logger.info(f"      added as system-package to: {purl}")
                    codebase_resource.save()

                if ((install_file.sha512 and codebase_resource.sha512
                     and codebase_resource.sha512 != install_file.sha512) or
                    (install_file.sha256 and codebase_resource.sha256
                     and codebase_resource.sha256 != install_file.sha256)
                        or (install_file.sha1 and codebase_resource.sha1
                            and codebase_resource.sha1 != install_file.sha1)
                        or (install_file.md5 and codebase_resource.md5
                            and codebase_resource.md5 != install_file.md5)):
                    # Alpine uses SHA1 while Debian uses MD5, we prefer te strongest
                    # hash that's present
                    if install_file.path not in modified_resources:
                        modified_resources.append(install_file.path)

            if not found_res and install_file_path not in missing_resources:
                missing_resources.append(install_file_path)
                logger.info(
                    f"      installed file is missing: {install_file_path}")

        created_package.missing_resources = missing_resources
        created_package.modified_resources = modified_resources
        created_package.save()
Example #32
0
    try:
        #Delete the case from the database
        dbh.delete('meta',
                   DB.expand("property='flag_db' and value=%r", case),
                   _fast=True)
        dbh.execute("drop database if exists `%s`", case)
    except DB.DBError, e:
        pass

    ## Delete the temporary directory corresponding to this case and all its content
    try:
        temporary_dir = "%s/case_%s" % (config.RESULTDIR, case)
        for root, dirs, files in os.walk(temporary_dir, topdown=False):
            for name in files:
                os.remove(join(root, name))
            for name in dirs:
                os.rmdir(join(root, name))

        os.rmdir(temporary_dir)
    except Exception, e:
        print e

    ## Expire any caches we have relating to this case:
    key_re = "%s[/|]?.*" % case

    import pyflag.IO as IO
    import pyflag.Scanner as Scanner

    IO.IO_Cache.expire(key_re)
    DB.DBO.DBH.expire(key_re)
Example #33
0
 def record_url(self, record_id):
     """ Builds URL with record id """
     return posixpath.join(self.url_table, record_id)
Example #34
0
class Airtable():

    VERSION = 'v0'
    API_BASE_URL = 'https://api.airtable.com/'
    API_LIMIT = 1.0 / 5  # 5 per second
    API_URL = posixpath.join(API_BASE_URL, VERSION)

    def __init__(self, base_key, table_name, api_key=None):
        """
        If api_key is not provided, :any:`AirtableAuth` will attempt
        to use ``os.environ['AIRTABLE_API_KEY']``
        """
        session = requests.Session()
        session.auth = AirtableAuth(api_key=api_key)
        self.session = session
        self.table_name = table_name
        urlsafe_table_name = quote(table_name, safe='')
        self.url_table = posixpath.join(self.API_URL, base_key,
                                        urlsafe_table_name)
        self.is_authenticated = self.validate_session(self.url_table)

    def validate_session(self, url):
        response = self.session.get(url, params={'maxRecords': 1})
        if response.ok:
            return True
        elif response.status_code == 404:
            raise ValueError('Invalid base or table name: {}'.format(url))
        else:
            raise ValueError('Authentication failed: {}'.format(
                response.reason))

    def _process_params(self, params):
        """
        Process params names or values as needed using filters
        """
        for param_name, param_value in params.copy().items():
            param_value = params.pop(param_name)
            ParamClass = AirtableParams._get(param_name)
            new_param = ParamClass(param_value).to_param_dict()
            params.update(new_param)
        return params

    def _process_response(self, response):
        # Removed due to IronPython Bug
        # https://github.com/IronLanguages/ironpython2/issues/242
        # if response.status_code == 422:
        #     raise HTTPError('Unprocessable Entity for url(
        #                        decoded): {}'.format(unquote(response.url)))
        response.raise_for_status()
        return response.json()

    def record_url(self, record_id):
        """ Builds URL with record id """
        return posixpath.join(self.url_table, record_id)

    def _request(self, method, url, params=None, json_data=None):
        response = self.session.request(method,
                                        url,
                                        params=params,
                                        json=json_data)
        # self._dump_request_data(response)
        return self._process_response(response)

    # def _dump_request_data(self, response):
    #     """ For Debugging """
    #     timestamp = str(time.time()).split('.')[-1]
    #     url = response.request.url
    #     method = response.request.method
    #     response_json = response.json()
    #     status = response.status_code
    #     filepath = os.path.join('tests', 'dump', '{}-{}_{}.json'.format(
    #                                                                 method,
    #                                                                 status,
    #                                                                 timestamp))
    #     dump = {
    #             'url': url,
    #             'method': method,
    #             'response_json': response_json,
    #             }
    #     with open(filepath, 'w') as fp:
    #         json.dump(dump, fp, indent=4)

    def _get(self, url, **params):
        processed_params = self._process_params(params)
        return self._request('get', url, params=processed_params)

    def _post(self, url, json_data):
        return self._request('post', url, json_data=json_data)

    def _put(self, url, json_data):
        return self._request('put', url, json_data=json_data)

    def _patch(self, url, json_data):
        return self._request('patch', url, json_data=json_data)

    def _delete(self, url):
        return self._request('delete', url)

    def get(self, record_id):
        """
        Retrieves a record by its id

        >>> record = airtable.get('recwPQIfs4wKPyc9D')

        Args:
            record_id(``str``): Airtable record id

        Returns:
            record (``dict``): Record
        """
        record_url = self.record_url(record_id)
        return self._get(record_url)

    def get_iter(self, **options):
        """
        Record Retriever Iterator

        Returns iterator with lists in batches according to pageSize.
        To get all records at once use :any:`get_all`

        >>> for page in airtable.get_iter():
        ...     for record in page:
        ...         print(record)
        [{'fields': ... }, ...]

        Keyword Args:
            maxRecords (``int``, optional): The maximum total number of records
                that will be returned. See :any:`MaxRecordsParam`
            view (``str``, optional): The name or ID of a view.
                See :any:`ViewParam`.
            pageSize (``int``, optional ): The number of records returned
                in each request. Must be less than or equal to 100.
                Default is 100. See :any:`PageSizeParam`.
            fields (``str``, ``list``, optional): Name of field or fields to
                be retrieved. Default is all fields. See :any:`FieldsParam`.
            sort (``list``, optional): List of fields to sort by.
                Default order is ascending. See :any:`SortParam`.
            formula (``str``, optional): Airtable formula.
                See :any:`FormulaParam`.

        Returns:
            iterator (``list``): List of Records, grouped by pageSize

        """
        offset = None
        while True:
            data = self._get(self.url_table, offset=offset, **options)
            records = data.get('records', [])
            time.sleep(self.API_LIMIT)
            yield records
            offset = data.get('offset')
            if not offset:
                break

    def get_all(self, **options):
        """
        Retrieves all records repetitively and returns a single list.

        >>> airtable.get_all()
        >>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])
        >>> airtable.get_all(maxRecords=50)
        [{'fields': ... }, ...]

        Keyword Args:
            maxRecords (``int``, optional): The maximum total number of records
                that will be returned. See :any:`MaxRecordsParam`
            view (``str``, optional): The name or ID of a view.
                See :any:`ViewParam`.
            fields (``str``, ``list``, optional): Name of field or fields to
                be retrieved. Default is all fields. See :any:`FieldsParam`.
            sort (``list``, optional): List of fields to sort by.
                Default order is ascending. See :any:`SortParam`.
            formula (``str``, optional): Airtable formula.
                See :any:`FormulaParam`.

        Returns:
            records (``list``): List of Records

        >>> records = get_all(maxRecords=3, view='All')

        """
        all_records = []
        for records in self.get_iter(**options):
            all_records.extend(records)
        return all_records

    def match(self, field_name, field_value, **options):
        """
        Returns first match found in :any:`get_all`

        >>> airtable.match('Name', 'John')
        {'fields': {'Name': 'John'} }

        Args:
            field_name (``str``): Name of field to match (column name).
            field_value (``str``): Value of field to match.

        Keyword Args:
            maxRecords (``int``, optional): The maximum total number of records
                that will be returned. See :any:`MaxRecordsParam`
            view (``str``, optional): The name or ID of a view.
                See :any:`ViewParam`.
            fields (``str``, ``list``, optional): Name of field or fields to
                be retrieved. Default is all fields. See :any:`FieldsParam`.
            sort (``list``, optional): List of fields to sort by.
                Default order is ascending. See :any:`SortParam`.

        Returns:
            record (``dict``): First record to match the field_value provided
        """
        formula = self.formula_from_name_and_value(field_name, field_value)
        options['formula'] = formula
        for record in self.get_all(**options):
            return record
        else:
            return {}

    def search(self, field_name, field_value, record=None, **options):
        """
        Returns all matching records found in :any:`get_all`

        >>> airtable.search('Gender', 'Male')
        [{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]

        Args:
            field_name (``str``): Name of field to match (column name).
            field_value (``str``): Value of field to match.

        Keyword Args:
            maxRecords (``int``, optional): The maximum total number of records
                that will be returned. See :any:`MaxRecordsParam`
            view (``str``, optional): The name or ID of a view.
                See :any:`ViewParam`.
            fields (``str``, ``list``, optional): Name of field or fields to
                be retrieved. Default is all fields. See :any:`FieldsParam`.
            sort (``list``, optional): List of fields to sort by.
                Default order is ascending. See :any:`SortParam`.

        Returns:
            records (``list``): All records that matched ``field_value``

        """
        records = []
        formula = self.formula_from_name_and_value(field_name, field_value)
        options['formula'] = formula
        records = self.get_all(**options)
        return records

    def insert(self, fields):
        """
        Inserts a record

        >>> record = {'Name': 'John'}
        >>> airtable.insert(record)

        Args:
            fields(``dict``): Fields to insert.
                Must be dictionary with Column names as Key.

        Returns:
            record (``dict``): Inserted record

        """
        return self._post(self.url_table, json_data={"fields": fields})

    def _batch_request(self, func, iterable):
        """ Internal Function to limit batch calls to API limit """
        responses = []
        for item in iterable:
            responses.append(func(item))
            time.sleep(self.API_LIMIT)
        return responses

    def batch_insert(self, records):
        """
        Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
        To change the rate limit use ``airtable.API_LIMIT = 0.2``
        (5 per second)

        >>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
        >>> airtable.batch_insert(records)

        Args:
            records(``list``): Records to insert

        Returns:
            records (``list``): list of added records

        """
        return self._batch_request(self.insert, records)

    def update(self, record_id, fields):
        """
        Updates a record by its record id.
        Only Fields passed are updated, the rest are left as is.

        >>> record = airtable.match('Employee Id', 'DD13332454')
        >>> fields = {'Status': 'Fired'}
        >>> airtable.update(record['id'], fields)

        Args:
            record_id(``str``): Id of Record to update
            fields(``dict``): Fields to update.
                Must be dictionary with Column names as Key

        Returns:
            record (``dict``): Updated record
        """
        record_url = self.record_url(record_id)
        return self._patch(record_url, json_data={"fields": fields})

    def update_by_field(self, field_name, field_value, fields, **options):
        """
        Updates the first record to match field name and value.
        Only Fields passed are updated, the rest are left as is.

        >>> record = {'Name': 'John', 'Tel': '540-255-5522'}
        >>> airtable.update_by_field('Name', 'John', record)

        Args:
            field_name (``str``): Name of field to match (column name).
            field_value (``str``): Value of field to match.
            fields(``dict``): Fields to update.
                Must be dictionary with Column names as Key

        Keyword Args:
            view (``str``, optional): The name or ID of a view.
                See :any:`ViewParam`.
            sort (``list``, optional): List of fields to sort by.
                Default order is ascending. See :any:`SortParam`.

        Returns:
            record (``dict``): Updated record
        """
        record = self.match(field_name, field_value, **options)
        return {} if not record else self.update(record['id'], fields)

    def replace(self, record_id, fields):
        """
        Replaces a record by its record id.
        All Fields are updated to match the new ``fields`` provided.
        If a field is not included in ``fields``, value will bet set to null.
        To update only selected fields, use :any:`update`.

        >>> record = airtable.match('Seat Number', '22A')
        >>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'}
        >>> airtable.replace(record['id'], fields)

        Args:
            record_id(``str``): Id of Record to update
            fields(``dict``): Fields to replace with.
                Must be dictionary with Column names as Key.

        Returns:
            record (``dict``): New record
        """
        record_url = self.record_url(record_id)
        return self._put(record_url, json_data={"fields": fields})

    def replace_by_field(self, field_name, field_value, fields, **options):
        """
        Replaces the first record to match field name and value.
        All Fields are updated to match the new ``fields`` provided.
        If a field is not included in ``fields``, value will bet set to null.
        To update only selected fields, use :any:`update`.

        Args:
            field_name (``str``): Name of field to match (column name).
            field_value (``str``): Value of field to match.
            fields(``dict``): Fields to replace with.
                Must be dictionary with Column names as Key.

        Keyword Args:
            view (``str``, optional): The name or ID of a view.
                See :any:`ViewParam`.
            sort (``list``, optional): List of fields to sort by.
                Default order is ascending. See :any:`SortParam`.

        Returns:
            record (``dict``): New record
        """
        record = self.match(field_name, field_value, **options)
        return {} if not record else self.replace(record['id'], fields)

    def delete(self, record_id):
        """
        Deletes a record by its id

        >>> record = airtable.match('Employee Id', 'DD13332454')
        >>> airtable.delete(record['id'])

        Args:
            record_id(``str``): Airtable record id

        Returns:
            record (``dict``): Deleted Record
        """
        record_url = self.record_url(record_id)
        return self._delete(record_url)

    def delete_by_field(self, field_name, field_value, **options):
        """
        Deletes first record  to match provided ``field_name`` and
        ``field_value``.

        >>> record = airtable.delete_by_field('Employee Id', 'DD13332454')

        Args:
            field_name (``str``): Name of field to match (column name).
            field_value (``str``): Value of field to match.

        Keyword Args:
            view (``str``, optional): The name or ID of a view.
                See :any:`ViewParam`.
            sort (``list``, optional): List of fields to sort by.
                Default order is ascending. See :any:`SortParam`.

        Returns:
            record (``dict``): Deleted Record
        """
        record = self.match(field_name, field_value, **options)
        record_url = self.record_url(record['id'])
        return self._delete(record_url)

    def batch_delete(self, record_ids):
        """
        Calls :any:`delete` repetitively, following set API Rate Limit (5/sec)
        To change the rate limit use ``airtable.API_LIMIT = 0.2`` (5 per second)

        >>> record_ids = ['recwPQIfs4wKPyc9D', 'recwDxIfs3wDPyc3F']
        >>> airtable.batch_delete(records)

        Args:
            records(``list``): Record Ids to delete

        Returns:
            records (``list``): list of records deleted

        """
        return self._batch_request(self.delete, record_ids)

    def mirror(self, records, **options):
        """
        Deletes all records on table or view and replaces with records.

        >>> records = [{'Name': 'John'}, {'Name': 'Marc'}]

        >>> record = airtable.,mirror(records)

        If view options are provided, only records visible on that view will
        be deleted.

        >>> record = airtable.mirror(records, view='View')
        ([{'id': 'recwPQIfs4wKPyc9D', ... }], [{'deleted': True, ... }])

        Args:
            records(``list``): Records to insert

        Keyword Args:
            maxRecords (``int``, optional): The maximum total number of records
                that will be returned. See :any:`MaxRecordsParam`
            maxRecords (``int``, optional): Maximum number of records to retrieve

        Returns:
            records (``tuple``): (new_records, deleted_records)
        """

        all_record_ids = [r['id'] for r in self.get_all(**options)]
        deleted_records = self.batch_delete(all_record_ids)
        new_records = self.batch_insert(records)
        return (new_records, deleted_records)

    @staticmethod
    def formula_from_name_and_value(field_name, field_value):
        """ Creates a formula to match cells from from field_name and value """
        if isinstance(field_value, str):
            field_value = "'{}'".format(field_value)

        formula = "{{{name}}}={value}".format(name=field_name,
                                              value=field_value)
        return formula

    def __repr__(self):
        return '<Airtable table:{}>'.format(self.table_name)
Example #35
0
    def __setup(self):
        """Construct the series of shell commands, i.e., fill in
           self.__commands"""

        # The download URL has the format MAJOR.MINOR in the path and
        # the tarball contains MAJOR.MINOR.REVISION, so pull apart the
        # full version to get the individual components.
        match = re.match(r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<revision>\d+)',
                         self.__version)
        major_minor = '{0}.{1}'.format(match.groupdict()['major'],
                                       match.groupdict()['minor'])

        if self.__inbox:
            # Use inbox OFED
            self.__label = 'hpcx-v{0}-gcc-inbox-{1}-{2}'.format(
                self.__version, self.__oslabel, self.__arch)
        else:
            # Use MLNX OFED
            self.__label = 'hpcx-v{0}-gcc-MLNX_OFED_LINUX-{1}-{2}-{3}'.format(
                self.__version, self.__mlnx_ofed, self.__oslabel, self.__arch)

        tarball = self.__label + '.tbz'
        url = '{0}/v{1}/{2}'.format(self.__baseurl, major_minor, tarball)

        # Download source from web
        self.__commands.append(self.download_step(url=url,
                                                  directory=self.__wd))

        # "Install"
        self.__commands.append(
            self.untar_step(tarball=posixpath.join(self.__wd, tarball),
                            directory=self.__wd))
        self.__commands.append('cp -a {0} {1}'.format(
            posixpath.join(self.__wd, self.__label), self.__prefix))

        # Set the environment
        if self.__hpcxinit:
            # Use hpcxinit script
            if self.__multi_thread:
                self.__commands.append('echo "source {0}" >> {1}'.format(
                    posixpath.join(self.__prefix, 'hpcx-mt-init-ompi.sh'),
                    self.__bashrc))
            else:
                self.__commands.append('echo "source {0}" >> {1}'.format(
                    posixpath.join(self.__prefix, 'hpcx-init-ompi.sh'),
                    self.__bashrc))
            self.__commands.append('echo "hpcx_load" >> {0}'.format(
                self.__bashrc))
        else:
            # Set environment manually
            hpcx_dir = self.__prefix
            if self.__multi_thread:
                hpcx_ucx_dir = posixpath.join(hpcx_dir, 'ucx', 'mt')
            else:
                hpcx_ucx_dir = posixpath.join(hpcx_dir, 'ucx')
            hpcx_sharp_dir = posixpath.join(hpcx_dir, 'sharp')
            hpcx_nccl_rdma_sharp_plugin_dir = posixpath.join(
                hpcx_dir, 'nccl_rdma_sharp_plugin')
            hpcx_hcoll_dir = posixpath.join(hpcx_dir, 'hcoll')
            hpcx_mpi_dir = posixpath.join(hpcx_dir, 'ompi')
            hpcx_oshmem_dir = hpcx_mpi_dir
            hpcx_mpi_tests_dir = posixpath.join(hpcx_mpi_dir, 'tests')
            if StrictVersion(self.__version) >= StrictVersion('2.7'):
                hpcx_osu_dir = posixpath.join(hpcx_mpi_tests_dir,
                                              'osu-micro-benchmarks-5.6.2')
                hpcx_osu_cuda_dir = posixpath.join(
                    hpcx_mpi_tests_dir, 'osu-micro-benchmarks-5.6.2-cuda')
            else:
                hpcx_osu_dir = posixpath.join(hpcx_mpi_tests_dir,
                                              'osu-micro-benchmarks-5.3.2')
                hpcx_osu_cuda_dir = posixpath.join(
                    hpcx_mpi_tests_dir, 'osu-micro-benchmarks-5.3.2-cuda')
            hpcx_ipm_dir = posixpath.join(hpcx_mpi_tests_dir, 'ipm-2.0.6')
            hpcx_ipm_lib = posixpath.join(hpcx_ipm_dir, 'lib', 'libipm.so')
            hpcx_clusterkit_dir = posixpath.join(hpcx_dir, 'clusterkit')

            self.environment_variables = {
                'CPATH':
                ':'.join([
                    posixpath.join(hpcx_hcoll_dir, 'include'),
                    posixpath.join(hpcx_mpi_dir, 'include'),
                    posixpath.join(hpcx_sharp_dir, 'include'),
                    posixpath.join(hpcx_ucx_dir, 'include'), '$CPATH'
                ]),
                'HPCX_CLUSTERKIT_DIR':
                hpcx_clusterkit_dir,
                'HPCX_DIR':
                hpcx_dir,
                'HPCX_HCOLL_DIR':
                hpcx_hcoll_dir,
                'HPCX_IPM_DIR':
                hpcx_ipm_dir,
                'HPCX_IPM_LIB':
                hpcx_ipm_lib,
                'HPCX_MPI_DIR':
                hpcx_mpi_dir,
                'HPCX_MPI_TESTS_DIR':
                hpcx_mpi_tests_dir,
                'HPCX_NCCL_RDMA_SHARP_PLUGIN_DIR':
                hpcx_nccl_rdma_sharp_plugin_dir,
                'HPCX_OSHMEM_DIR':
                hpcx_oshmem_dir,
                'HPCX_OSU_CUDA_DIR':
                hpcx_osu_cuda_dir,
                'HPCX_OSU_DIR':
                hpcx_osu_dir,
                'HPCX_SHARP_DIR':
                hpcx_sharp_dir,
                'HPCX_UCX_DIR':
                hpcx_ucx_dir,
                'LIBRARY_PATH':
                ':'.join([
                    posixpath.join(hpcx_hcoll_dir, 'lib'),
                    posixpath.join(hpcx_mpi_dir, 'lib'),
                    posixpath.join(hpcx_nccl_rdma_sharp_plugin_dir, 'lib'),
                    posixpath.join(hpcx_sharp_dir, 'lib'),
                    posixpath.join(hpcx_ucx_dir, 'lib'), '$LIBRARY_PATH'
                ]),
                'MPI_HOME':
                hpcx_mpi_dir,
                'OMPI_HOME':
                hpcx_mpi_dir,
                'OPAL_PREFIX':
                hpcx_mpi_dir,
                'OSHMEM_HOME':
                hpcx_mpi_dir,
                'PATH':
                ':'.join([
                    posixpath.join(hpcx_clusterkit_dir, 'bin'),
                    posixpath.join(hpcx_hcoll_dir, 'bin'),
                    posixpath.join(hpcx_mpi_dir, 'bin'),
                    posixpath.join(hpcx_ucx_dir, 'bin'), '$PATH'
                ]),
                'PKG_CONFIG_PATH':
                ':'.join([
                    posixpath.join(hpcx_hcoll_dir, 'lib', 'pkgconfig'),
                    posixpath.join(hpcx_mpi_dir, 'lib', 'pkgconfig'),
                    posixpath.join(hpcx_sharp_dir, 'lib', 'pkgconfig'),
                    posixpath.join(hpcx_ucx_dir, 'lib', 'pkgconfig'),
                    '$PKG_CONFIG_PATH'
                ]),
                'SHMEM_HOME':
                hpcx_mpi_dir
            }

            # Set library path
            if self.ldconfig:
                self.__commands.append(
                    self.ldcache_step(
                        directory=posixpath.join(hpcx_hcoll_dir, 'lib')))
                self.__commands.append(
                    self.ldcache_step(
                        directory=posixpath.join(hpcx_mpi_dir, 'lib')))
                self.__commands.append(
                    self.ldcache_step(directory=posixpath.join(
                        hpcx_nccl_rdma_sharp_plugin_dir, 'lib')))
                self.__commands.append(
                    self.ldcache_step(
                        directory=posixpath.join(hpcx_sharp_dir, 'lib')))
                self.__commands.append(
                    self.ldcache_step(
                        directory=posixpath.join(hpcx_ucx_dir, 'lib')))
                self.__commands.append(
                    self.ldcache_step(
                        directory=posixpath.join(hpcx_ucx_dir, 'lib', 'ucx')))
            else:
                self.environment_variables['LD_LIBRARY_PATH'] = ':'.join([
                    posixpath.join(hpcx_hcoll_dir, 'lib'),
                    posixpath.join(hpcx_mpi_dir, 'lib'),
                    posixpath.join(hpcx_nccl_rdma_sharp_plugin_dir, 'lib'),
                    posixpath.join(hpcx_sharp_dir, 'lib'),
                    posixpath.join(hpcx_ucx_dir, 'lib'),
                    posixpath.join(hpcx_ucx_dir, 'lib', 'ucx'),
                    '$LD_LIBRARY_PATH'
                ])

        # Cleanup tarball and directory
        self.__commands.append(
            self.cleanup_step(items=[
                posixpath.join(self.__wd, tarball),
                posixpath.join(self.__wd, self.__label)
            ]))
Example #36
0
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
    {
        'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
    },
    {
        'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
    },
    {
        'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
    },
    {
        'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
    },
]

# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True

# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = posixpath.join(*(BASE_DIR.split(os.path.sep) + ['static']))
Example #37
0
 def rooted_uri(self, *args):
     return posixpath.join(self._host_url, *args)
Example #38
0
def urljoin(base, *args):
    """Remove any leading slashes so no subpaths look absolute."""
    return posixpath.join(base, *[str(s).lstrip('/') for s in args])
Example #39
0
def save_model(
    pytorch_model,
    path,
    conda_env=None,
    mlflow_model=None,
    code_paths=None,
    pickle_module=None,
    signature: ModelSignature = None,
    input_example: ModelInputExample = None,
    requirements_file=None,
    extra_files=None,
    pip_requirements=None,
    extra_pip_requirements=None,
    **kwargs,
):
    """
    Save a PyTorch model to a path on the local file system.

    :param pytorch_model: PyTorch model to be saved. Can be either an eager model (subclass of
                          ``torch.nn.Module``) or scripted model prepared via ``torch.jit.script``
                          or ``torch.jit.trace``.

                          The model accept a single ``torch.FloatTensor`` as
                          input and produce a single output tensor.

                          If saving an eager model, any code dependencies of the
                          model's class, including the class definition itself, should be
                          included in one of the following locations:

                          - The package(s) listed in the model's Conda environment, specified
                            by the ``conda_env`` parameter.
                          - One or more of the files specified by the ``code_paths`` parameter.

    :param path: Local path where the model is to be saved.
    :param conda_env: {{ conda_env }}
    :param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to.
    :param code_paths: A list of local filesystem paths to Python file dependencies (or directories
                       containing file dependencies). These files are *prepended* to the system
                       path when the model is loaded.
    :param pickle_module: The module that PyTorch should use to serialize ("pickle") the specified
                          ``pytorch_model``. This is passed as the ``pickle_module`` parameter
                          to ``torch.save()``. By default, this module is also used to
                          deserialize ("unpickle") the PyTorch model at load time.

    :param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
                      describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
                      The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
                      from datasets with valid model input (e.g. the training dataset with target
                      column omitted) and valid model output (e.g. model predictions generated on
                      the training dataset), for example:

                      .. code-block:: python

                        from mlflow.models.signature import infer_signature
                        train = df.drop_column("target_label")
                        predictions = ... # compute model predictions
                        signature = infer_signature(train, predictions)
    :param input_example: Input example provides one or several instances of valid
                          model input. The example can be used as a hint of what data to feed the
                          model. The given example can be a Pandas DataFrame where the given
                          example will be serialized to json using the Pandas split-oriented
                          format, or a numpy array where the example will be serialized to json
                          by converting it to a list. Bytes are base64-encoded.

    :param requirements_file:

        .. warning::

            ``requirements_file`` has been deprecated. Please use ``pip_requirements`` instead.

        A string containing the path to requirements file. Remote URIs are resolved to absolute
        filesystem paths. For example, consider the following ``requirements_file`` string:

        .. code-block:: python

            requirements_file = "s3://my-bucket/path/to/my_file"

        In this case, the ``"my_file"`` requirements file is downloaded from S3. If ``None``,
        no requirements file is added to the model.

    :param extra_files: A list containing the paths to corresponding extra files. Remote URIs
                      are resolved to absolute filesystem paths.
                      For example, consider the following ``extra_files`` list -

                      extra_files = ["s3://my-bucket/path/to/my_file1",
                                    "s3://my-bucket/path/to/my_file2"]

                      In this case, the ``"my_file1 & my_file2"`` extra file is downloaded from S3.

                      If ``None``, no extra files are added to the model.
    :param pip_requirements: {{ pip_requirements }}
    :param extra_pip_requirements: {{ extra_pip_requirements }}
    :param kwargs: kwargs to pass to ``torch.save`` method.

    .. code-block:: python
        :caption: Example

        import os

        import torch
        import mlflow.pytorch

        # Class defined here
        class LinearNNModel(torch.nn.Module):
            ...

        # Initialize our model, criterion and optimizer
        ...

        # Training loop
        ...

        # Save PyTorch models to current working directory
        with mlflow.start_run() as run:
            mlflow.pytorch.save_model(model, "model")

            # Convert to a scripted model and save it
            scripted_pytorch_model = torch.jit.script(model)
            mlflow.pytorch.save_model(scripted_pytorch_model, "scripted_model")

        # Load each saved model for inference
        for model_path in ["model", "scripted_model"]:
            model_uri = "{}/{}".format(os.getcwd(), model_path)
            loaded_model = mlflow.pytorch.load_model(model_uri)
            print("Loaded {}:".format(model_path))
            for x in [6.0, 8.0, 12.0, 30.0]:
                X = torch.Tensor([[x]])
                y_pred = loaded_model(X)
                print("predict X: {}, y_pred: {:.2f}".format(x, y_pred.data.item()))
            print("--")

    .. code-block:: text
        :caption: Output

        Loaded model:
        predict X: 6.0, y_pred: 11.90
        predict X: 8.0, y_pred: 15.92
        predict X: 12.0, y_pred: 23.96
        predict X: 30.0, y_pred: 60.13
        --
        Loaded scripted_model:
        predict X: 6.0, y_pred: 11.90
        predict X: 8.0, y_pred: 15.92
        predict X: 12.0, y_pred: 23.96
        predict X: 30.0, y_pred: 60.13
    """
    import torch

    _validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)

    pickle_module = pickle_module or mlflow_pytorch_pickle_module

    if not isinstance(pytorch_model, torch.nn.Module):
        raise TypeError("Argument 'pytorch_model' should be a torch.nn.Module")
    path = os.path.abspath(path)
    if os.path.exists(path):
        raise RuntimeError("Path '{}' already exists".format(path))

    if mlflow_model is None:
        mlflow_model = Model()

    os.makedirs(path)
    if signature is not None:
        mlflow_model.signature = signature
    if input_example is not None:
        _save_example(mlflow_model, input_example, path)

    code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)

    model_data_subpath = "data"
    model_data_path = os.path.join(path, model_data_subpath)
    os.makedirs(model_data_path)

    # Persist the pickle module name as a file in the model's `data` directory. This is necessary
    # because the `data` directory is the only available parameter to `_load_pyfunc`, and it
    # does not contain the MLmodel configuration; therefore, it is not sufficient to place
    # the module name in the MLmodel
    #
    # TODO: Stop persisting this information to the filesystem once we have a mechanism for
    # supplying the MLmodel configuration to `mlflow.pytorch._load_pyfunc`
    pickle_module_path = os.path.join(model_data_path, _PICKLE_MODULE_INFO_FILE_NAME)
    with open(pickle_module_path, "w") as f:
        f.write(pickle_module.__name__)
    # Save pytorch model
    model_path = os.path.join(model_data_path, _SERIALIZED_TORCH_MODEL_FILE_NAME)
    if isinstance(pytorch_model, torch.jit.ScriptModule):
        torch.jit.ScriptModule.save(pytorch_model, model_path)
    else:
        torch.save(pytorch_model, model_path, pickle_module=pickle_module, **kwargs)

    torchserve_artifacts_config = {}

    if extra_files:
        torchserve_artifacts_config[_EXTRA_FILES_KEY] = []
        if not isinstance(extra_files, list):
            raise TypeError("Extra files argument should be a list")

        with TempDir() as tmp_extra_files_dir:
            for extra_file in extra_files:
                _download_artifact_from_uri(
                    artifact_uri=extra_file, output_path=tmp_extra_files_dir.path()
                )
                rel_path = posixpath.join(_EXTRA_FILES_KEY, os.path.basename(extra_file))
                torchserve_artifacts_config[_EXTRA_FILES_KEY].append({"path": rel_path})
            shutil.move(
                tmp_extra_files_dir.path(),
                posixpath.join(path, _EXTRA_FILES_KEY),
            )

    if requirements_file:

        warnings.warn(
            "`requirements_file` has been deprecated. Please use `pip_requirements` instead.",
            FutureWarning,
            stacklevel=2,
        )

        if not isinstance(requirements_file, str):
            raise TypeError("Path to requirements file should be a string")

        with TempDir() as tmp_requirements_dir:
            _download_artifact_from_uri(
                artifact_uri=requirements_file, output_path=tmp_requirements_dir.path()
            )
            rel_path = os.path.basename(requirements_file)
            torchserve_artifacts_config[_REQUIREMENTS_FILE_KEY] = {"path": rel_path}
            shutil.move(tmp_requirements_dir.path(rel_path), path)

    mlflow_model.add_flavor(
        FLAVOR_NAME,
        model_data=model_data_subpath,
        pytorch_version=str(torch.__version__),
        code=code_dir_subpath,
        **torchserve_artifacts_config,
    )
    pyfunc.add_to_model(
        mlflow_model,
        loader_module="mlflow.pytorch",
        data=model_data_subpath,
        pickle_module_name=pickle_module.__name__,
        code=code_dir_subpath,
        env=_CONDA_ENV_FILE_NAME,
    )
    mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))

    if conda_env is None:
        if pip_requirements is None:
            default_reqs = get_default_pip_requirements()
            # To ensure `_load_pyfunc` can successfully load the model during the dependency
            # inference, `mlflow_model.save` must be called beforehand to save an MLmodel file.
            inferred_reqs = mlflow.models.infer_pip_requirements(
                model_data_path,
                FLAVOR_NAME,
                fallback=default_reqs,
            )
            default_reqs = sorted(set(inferred_reqs).union(default_reqs))
        else:
            default_reqs = None
        conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
            default_reqs,
            pip_requirements,
            extra_pip_requirements,
        )
    else:
        conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)

    with open(os.path.join(path, _CONDA_ENV_FILE_NAME), "w") as f:
        yaml.safe_dump(conda_env, stream=f, default_flow_style=False)

    # Save `constraints.txt` if necessary
    if pip_constraints:
        write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))

    if not requirements_file:
        # Save `requirements.txt`
        write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
Example #40
0
    def _GetSuccessResponse(self, request_path, server_instance):
        '''Returns the Response from trying to render |path| with
    |server_instance|.  If |path| isn't found then a FileNotFoundError will be
    raised, such that the only responses that will be returned from this method
    are Ok and Redirect.
    '''
        content_provider, serve_from, path = (
            server_instance.content_providers.GetByServeFrom(request_path))
        assert content_provider, 'No ContentProvider found for %s' % path

        redirect = Redirector(server_instance.compiled_fs_factory,
                              content_provider.file_system).Redirect(
                                  self._request.host, path)
        if redirect is not None:
            # Absolute redirects stay absolute, relative redirects are relative to
            # |serve_from|; all redirects eventually need to be *served* as absolute.
            if not redirect.startswith('/'):
                redirect = '/' + posixpath.join(serve_from, redirect)
            return Response.Redirect(redirect, permanent=False)

        canonical_path = content_provider.GetCanonicalPath(path)
        if canonical_path != path:
            redirect_path = posixpath.join(serve_from, canonical_path)
            return Response.Redirect('/' + redirect_path, permanent=False)

        if request_path.endswith('/'):
            # Directory request hasn't been redirected by now. Default behaviour is
            # to redirect as though it were a file.
            return Response.Redirect('/' + request_path.rstrip('/'),
                                     permanent=False)

        content_and_type = content_provider.GetContentAndType(path).Get()
        if not content_and_type.content:
            logging.error('%s had empty content' % path)

        content = content_and_type.content
        if isinstance(content, Handlebar):
            template_content, template_warnings = (
                server_instance.template_renderer.Render(
                    content, self._request))
            # HACK: the site verification file (google2ed...) doesn't have a title.
            content, doc_warnings = server_instance.document_renderer.Render(
                template_content,
                path,
                render_title=path != SITE_VERIFICATION_FILE)
            warnings = template_warnings + doc_warnings
            if warnings:
                sep = '\n - '
                logging.warning('Rendering %s:%s%s' %
                                (path, sep, sep.join(warnings)))
            # Content was dynamic. The new etag is a hash of the content.
            etag = None
        elif content_and_type.version is not None:
            # Content was static. The new etag is the version of the content. Hash it
            # to make sure it's valid.
            etag = '"%s"' % hashlib.md5(str(
                content_and_type.version)).hexdigest()
        else:
            # Sometimes non-dynamic content does not have a version, for example
            # .zip files. The new etag is a hash of the content.
            etag = None

        content_type = content_and_type.content_type
        if isinstance(content, unicode):
            content = content.encode('utf-8')
            content_type += '; charset=utf-8'

        if etag is None:
            # Note: we're using md5 as a convenient and fast-enough way to identify
            # content. It's not intended to be cryptographic in any way, and this
            # is *not* what etags is for. That's what SSL is for, this is unrelated.
            etag = '"%s"' % hashlib.md5(content).hexdigest()

        headers = _MakeHeaders(content_type, etag=etag)
        if etag == self._request.headers.get('If-None-Match'):
            return Response.NotModified('Not Modified', headers=headers)
        return Response.Ok(content, headers=headers)
import jinja2

from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import units
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import fio
import six
from six.moves import range

PKB_FIO_LOG_FILE_NAME = 'pkb_fio_avg'
LOCAL_JOB_FILE_SUFFIX = '_fio.job'  # used with vm_util.PrependTempDir()
REMOTE_JOB_FILE_PATH = posixpath.join(vm_util.VM_TMP_DIR, 'fio.job')
DEFAULT_TEMP_FILE_NAME = 'fio-temp-file'
MOUNT_POINT = '/scratch'


# This dictionary maps scenario names to dictionaries of fio settings.
SCENARIOS = {
    'sequential_write': {
        'name': 'sequential_write',
        'rwkind': 'write',
        'blocksize': '512k'
    },
    'sequential_read': {
        'name': 'sequential_read',
        'rwkind': 'read',
        'blocksize': '512k'
Example #42
0
def get_repo_dirents(request, repo, commit, path, offset=-1, limit=-1):
    """List repo dirents based on commit id and path. Use ``offset`` and
    ``limit`` to do paginating.

    Returns: A tupple of (file_list, dir_list, dirent_more)

    TODO: Some unrelated parts(file sharing, stars, modified info, etc) need
    to be pulled out to multiple functions.
    """

    dir_list = []
    file_list = []
    dirent_more = False
    if commit.root_id == EMPTY_SHA1:
        return ([], [], False) if limit == -1 else ([], [], False)
    else:
        try:
            dirs = seafile_api.list_dir_by_commit_and_path(commit.repo_id,
                                                           commit.id, path,
                                                           offset, limit)
            if not dirs:
                return ([], [], False)
        except SearpcError as e:
            logger.error(e)
            return ([], [], False)

        if limit != -1 and limit == len(dirs):
            dirent_more = True

        username = request.user.username
        starred_files = get_dir_starred_files(username, repo.id, path)
        fileshares = FileShare.objects.filter(repo_id=repo.id).filter(username=username)
        uploadlinks = UploadLinkShare.objects.filter(repo_id=repo.id).filter(username=username)


        view_dir_base = reverse("view_common_lib_dir", args=[repo.id, ''])
        dl_dir_base = reverse('repo_download_dir', args=[repo.id])
        file_history_base = reverse('file_revisions', args=[repo.id])
        for dirent in dirs:
            dirent.last_modified = dirent.mtime
            dirent.sharelink = ''
            dirent.uploadlink = ''
            if stat.S_ISDIR(dirent.props.mode):
                dpath = posixpath.join(path, dirent.obj_name)
                if dpath[-1] != '/':
                    dpath += '/'
                for share in fileshares:
                    if dpath == share.path:
                        dirent.sharelink = gen_dir_share_link(share.token)
                        dirent.sharetoken = share.token
                        break
                for link in uploadlinks:
                    if dpath == link.path:
                        dirent.uploadlink = gen_shared_upload_link(link.token)
                        dirent.uploadtoken = link.token
                        break
                p_dpath = posixpath.join(path, dirent.obj_name)
                dirent.view_link = view_dir_base + '?p=' + urlquote(p_dpath)
                dirent.dl_link = dl_dir_base + '?p=' + urlquote(p_dpath)
                dir_list.append(dirent)
            else:
                file_list.append(dirent)
                if repo.version == 0:
                    dirent.file_size = get_file_size(repo.store_id, repo.version, dirent.obj_id)
                else:
                    dirent.file_size = dirent.size
                dirent.starred = False
                fpath = posixpath.join(path, dirent.obj_name)
                p_fpath = posixpath.join(path, dirent.obj_name)
                dirent.view_link = reverse('view_lib_file', args=[repo.id, p_fpath])
                dirent.dl_link = get_file_download_link(repo.id, dirent.obj_id,
                                                        p_fpath)
                dirent.history_link = file_history_base + '?p=' + urlquote(p_fpath)
                if fpath in starred_files:
                    dirent.starred = True
                for share in fileshares:
                    if fpath == share.path:
                        dirent.sharelink = gen_file_share_link(share.token)
                        dirent.sharetoken = share.token
                        break

        return (file_list, dir_list, dirent_more)
Example #43
0
def load_mappings(app):
    """Load all intersphinx mappings into the environment."""
    now = int(time.time())
    cache_time = now - app.config.intersphinx_cache_limit * 86400
    env = app.builder.env
    if not hasattr(env, 'intersphinx_cache'):
        env.intersphinx_cache = {}
        env.intersphinx_inventory = {}
        env.intersphinx_named_inventory = {}
    cache = env.intersphinx_cache
    update = False
    for key, value in iteritems(app.config.intersphinx_mapping):
        if isinstance(value, (list, tuple)):
            # new format
            name, (uri, inv) = key, value
            if not isinstance(name, string_types):
                app.warn('intersphinx identifier %r is not string. Ignored' % name)
                continue
        else:
            # old format, no name
            name, uri, inv = None, key, value
        # we can safely assume that the uri<->inv mapping is not changed
        # during partial rebuilds since a changed intersphinx_mapping
        # setting will cause a full environment reread
        if not isinstance(inv, tuple):
            invs = (inv, )
        else:
            invs = inv

        for inv in invs:
            if not inv:
                inv = posixpath.join(uri, INVENTORY_FILENAME)
            # decide whether the inventory must be read: always read local
            # files; remote ones only if the cache time is expired
            if '://' not in inv or uri not in cache \
                    or cache[uri][1] < cache_time:
                safe_inv_url = _get_safe_url(inv)
                app.info(
                    'loading intersphinx inventory from %s...' % safe_inv_url)
                invdata = fetch_inventory(app, uri, inv)
                if invdata:
                    cache[uri] = (name, now, invdata)
                    update = True
                    break

    if update:
        env.intersphinx_inventory = {}
        env.intersphinx_named_inventory = {}
        # Duplicate values in different inventories will shadow each
        # other; which one will override which can vary between builds
        # since they are specified using an unordered dict.  To make
        # it more consistent, we sort the named inventories and then
        # add the unnamed inventories last.  This means that the
        # unnamed inventories will shadow the named ones but the named
        # ones can still be accessed when the name is specified.
        cached_vals = list(cache.values())
        named_vals = sorted(v for v in cached_vals if v[0])
        unnamed_vals = [v for v in cached_vals if not v[0]]
        for name, _x, invdata in named_vals + unnamed_vals:
            if name:
                env.intersphinx_named_inventory[name] = invdata
            for type, objects in iteritems(invdata):
                env.intersphinx_inventory.setdefault(
                    type, {}).update(objects)
Example #44
0
    def test(self):
        pass


# Configure logging
logging.basicConfig(
    stream=sys.stdout,
    level=logging.DEBUG,
    format="%(asctime)s %(levelname)7s %(name)34s | %(message)s")

logger = logging.getLogger(__name__)

# Initialize emulator
emulator = Emulator(
    vfs_root=posixpath.join(posixpath.dirname(__file__), "vfs"))

# Register Java class.
emulator.java_classloader.add_class(MainActivity)
emulator.mu.hook_add(UC_HOOK_CODE, hook_code, emulator)

emulator.mu.hook_add(UC_HOOK_MEM_WRITE, hook_mem_write)
emulator.mu.hook_add(UC_HOOK_MEM_READ, hook_mem_read)

# Load all libraries.
lib_module = emulator.load_library("tests/bin/libnative-lib_jni.so")

#androidemu.utils.debug_utils.dump_symbols(emulator, sys.stdout)

# Show loaded modules.
logger.info("Loaded modules:")
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import

import posixpath

from dashboard.pinpoint.models.quest import run_telemetry_test

# Benchmarks that need a Daydream View headset paired instead of Cardboard.
DAYDREAM_BENCHMARKS = [
    'xr.browsing.static',
    'xr.browsing.wpr.static',
    'xr.browsing.wpr.smoothness',
]
CARDBOARD_PREFS = posixpath.join('chrome', 'android',
                                 'shared_preference_files', 'test',
                                 'vr_cardboard_skipdon_setupcomplete.json')
DAYDREAM_PREFS = posixpath.join('chrome', 'android', 'shared_preference_files',
                                'test', 'vr_ddview_skipdon_setupcomplete.json')
# We need to apply a profile containing the VR browsing environment asset files
# if the benchmark is a VR browsing benchmark. Currently, this is the same as
# the set of benchmarks that need Daydream View paired.
BROWSING_BENCHMARKS = DAYDREAM_BENCHMARKS
ASSET_PROFILE_PATH = posixpath.join('gen', 'tools', 'perf', 'contrib',
                                    'vr_benchmarks', 'vr_assets_profile')


class RunVrTelemetryTest(run_telemetry_test.RunTelemetryTest):
    @classmethod
    def _ExtraTestArgs(cls, arguments):
        extra_test_args = []
Example #46
0
 def _sge_path(self, path):
     return posixpath.join(self.SGE_ROOT, path)
Example #47
0
    def put_guest(self, local_path, remote_path, use_sudo, mirror_local_mode, mode,
        local_is_path):
        """
        Upload file to a guest container
        """
        pre = self.ftp.getcwd()
        pre = pre if pre else ''
        if local_is_path and self.isdir(remote_path):
            basename = os.path.basename(local_path)
            remote_path = posixpath.join(remote_path, basename)
        if output.running:
            print("[%s] put: %s -> %s" % (
                env.host_string,
                local_path if local_is_path else '<file obj>',
                posixpath.join(pre, remote_path)
            ))

        # Have to bounce off FS if doing file-like objects
        fd, real_local_path = None, local_path
        if not local_is_path:
            fd, real_local_path = tempfile.mkstemp()
            old_pointer = local_path.tell()
            local_path.seek(0)
            file_obj = os.fdopen(fd, 'wb')
            file_obj.write(local_path.read())
            file_obj.close()
            local_path.seek(old_pointer)

        # Use temporary file with a unique name on the host machine
        guest_path = remote_path
        hasher = hashlib.sha1()
        hasher.update(env.host_string)
        hasher.update(name_or_ctid)
        hasher.update(guest_path)
        host_path = hasher.hexdigest()

        # Upload the file to host machine
        rattrs = self.ftp.put(real_local_path, host_path)

        # Copy file to the guest container
        with settings(hide('everything'), cwd=""):
            _orig_run_command("cat \"%s\" | vzctl exec \"%s\" 'cat - > \"%s\"'"
                % (host_path, name_or_ctid, guest_path), sudo=True)

        # Revert to original remote_path for return value's sake
        remote_path = guest_path

        # Clean up
        if not local_is_path:
            os.remove(real_local_path)

        # Handle modes if necessary
        if (local_is_path and mirror_local_mode) or (mode is not None):
            lmode = os.stat(local_path).st_mode if mirror_local_mode else mode
            lmode = lmode & 07777
            rmode = rattrs.st_mode & 07777
            if lmode != rmode:
                with hide('everything'):
                    sudo('chmod %o \"%s\"' % (lmode, remote_path))

        return remote_path
Example #48
0
def GetArchiveUrl(host_os, version):
    basename = 'naclsdk_%s.tar.bz2' % (host_os, )
    return urlparse.urljoin(HTTPS_BASE_URL, posixpath.join(version, basename))
Example #49
0
def get_share_link_thumbnail_src(token, size, path):
    return posixpath.join("thumbnail", token, str(size), path.lstrip('/'))
Example #50
0
import re
import os
import posixpath

from funcy import retry
from funcy.py3 import cat
from pydrive2.files import ApiRequestError
from shutil import copyfile

newline_pattern = re.compile(r"[\r\n]")

GDRIVE_USER_CREDENTIALS_DATA = "GDRIVE_USER_CREDENTIALS_DATA"
DEFAULT_USER_CREDENTIALS_FILE = "credentials/default.dat"

TESTS_ROOTDIR = os.path.dirname(__file__)
SETTINGS_PATH = posixpath.join(TESTS_ROOTDIR, "settings/")
LOCAL_PATH = posixpath.join(TESTS_ROOTDIR, "settings/local/")


def setup_credentials(credentials_path=DEFAULT_USER_CREDENTIALS_FILE):
    os.chdir(TESTS_ROOTDIR)
    if os.getenv(GDRIVE_USER_CREDENTIALS_DATA):
        if not os.path.exists(os.path.dirname(credentials_path)):
            os.makedirs(os.path.dirname(credentials_path), exist_ok=True)
        with open(credentials_path, "w") as credentials_file:
            credentials_file.write(os.getenv(GDRIVE_USER_CREDENTIALS_DATA))


def settings_file_path(settings_file):
    template_path = SETTINGS_PATH + settings_file
    local_path = LOCAL_PATH + settings_file
Example #51
0
def create():
    """
    Creates the environment needed to host the project.
    The environment consists of: system locales, virtualenv, database, project
    files, SSL certificate, and project-specific Python requirements.
    """
    # Generate project locale
    locale = env.locale.replace("UTF-8", "utf8")
    with hide("stdout"):
        if locale not in run("locale -a"):
            sudo("locale-gen %s" % env.locale)
            sudo("update-locale %s" % env.locale)
            sudo("service postgresql restart")
            run("exit")

    # Create project path
    run("mkdir -p %s" % env.proj_path)

    # Set up virtual env
    run("mkdir -p %s" % env.venv_home)
    with cd(env.venv_home):
        if exists(env.proj_name):
            if confirm("Virtualenv already exists in host server: %s"
                       "\nWould you like to replace it?" % env.proj_name):
                run("rm -rf %s" % env.proj_name)
            else:
                abort()
        run("virtualenv %s" % env.proj_name)

    # Upload project files
    if env.deploy_tool in env.vcs_tools:
        vcs_upload()
    else:
        rsync_upload()

    # Create DB and DB user
    pw = db_pass()
    user_sql_args = (env.proj_name, pw.replace("'", "\'"))
    user_sql = "CREATE USER %s WITH ENCRYPTED PASSWORD '%s';" % user_sql_args
    psql(user_sql, show=False)
    shadowed = "*" * len(pw)
    print_command(user_sql.replace("'%s'" % pw, "'%s'" % shadowed))
    psql("CREATE DATABASE %s WITH OWNER %s ENCODING = 'UTF8' "
         "LC_CTYPE = '%s' LC_COLLATE = '%s' TEMPLATE template0;" %
         (env.proj_name, env.proj_name, env.locale, env.locale))

    # Set up SSL certificate
    if not env.ssl_disabled:
        conf_path = "/etc/nginx/conf"
        if not exists(conf_path):
            sudo("mkdir %s" % conf_path)
        with cd(conf_path):
            crt_file = env.proj_name + ".crt"
            key_file = env.proj_name + ".key"
            if not exists(crt_file) and not exists(key_file):
                try:
                    crt_local, = glob(join("deploy", "*.crt"))
                    key_local, = glob(join("deploy", "*.key"))
                except ValueError:
                    parts = (crt_file, key_file, env.domains[0])
                    sudo("openssl req -new -x509 -nodes -out %s -keyout %s "
                         "-subj '/CN=%s' -days 3650" % parts)
                else:
                    upload_template(crt_local, crt_file, use_sudo=True)
                    upload_template(key_local, key_file, use_sudo=True)

    # Install project-specific requirements
    upload_template_and_reload("settings")
    with project():
        if env.reqs_path:
            pip("-r %s/%s" % (env.proj_path, env.reqs_path))
        pip("gunicorn setproctitle psycopg2 "
            "django-compressor python-memcached")
        # Bootstrap the DB
        manage("createdb --noinput --nodata")
        python(
            "from django.conf import settings;"
            "from django.contrib.sites.models import Site;"
            "Site.objects.filter(id=settings.SITE_ID).update(domain='%s');" %
            env.domains[0])
        for domain in env.domains:
            python("from django.contrib.sites.models import Site;"
                   "Site.objects.get_or_create(domain='%s');" % domain)
        if env.admin_pass:
            pw = env.admin_pass
            user_py = ("from django.contrib.auth import get_user_model;"
                       "User = get_user_model();"
                       "u, _ = User.objects.get_or_create(username='******');"
                       "u.is_staff = u.is_superuser = True;"
                       "u.set_password('%s');"
                       "u.save();" % pw)
            python(user_py, show=False)
            shadowed = "*" * len(pw)
            print_command(user_py.replace("'%s'" % pw, "'%s'" % shadowed))

    return True
def SmiPath():
  return posixpath.join(flags.cuda_toolkit_installation_dir,
                        'nvidia-smi')
def _setup_path():
    # using posixpath to ensure unix style slashes.
    # See bug-ticket: http://code.fabfile.org/attachments/61/posixpath.patch
    env.root = posixpath.join(env.home, 'www', env.deploy_env)
    env.log_dir = posixpath.join(env.home, 'www', env.deploy_env, 'log')
    env.releases = posixpath.join(env.root, 'releases')
    env.code_current = posixpath.join(env.root, 'current')
    env.code_root = posixpath.join(env.releases, env.deploy_metadata.timestamp)
    env.project_root = posixpath.join(env.code_root, env.project)
    env.project_media = posixpath.join(env.code_root, 'media')
    env.virtualenv_current = posixpath.join(env.code_current, 'python_env')
    env.virtualenv_root = posixpath.join(env.code_root, 'python_env')
    env.services = posixpath.join(env.code_root, 'services')
    env.jython_home = '/usr/local/lib/jython'
    env.db = '%s_%s' % (env.project, env.deploy_env)
    env.offline_releases = posixpath.join('/home/{}/releases'.format(env.user))
    env.offline_code_dir = posixpath.join('{}/{}'.format(
        env.offline_releases, 'offline'))
Example #54
0
def get_thumbnail_src(repo_id, size, path):
    return posixpath.join("thumbnail", repo_id, str(size), path.lstrip('/'))
Example #55
0
def collect_pages(
        app: Sphinx) -> Generator[Tuple[str, Dict[str, Any], str], None, None]:
    env = app.builder.env
    if not hasattr(env, '_viewcode_modules'):
        return
    if not is_supported_builder(app.builder):
        return
    highlighter = app.builder.highlighter  # type: ignore
    urito = app.builder.get_relative_uri

    modnames = set(env._viewcode_modules)  # type: ignore

    for modname, entry in status_iterator(
            sorted(env._viewcode_modules.items()),  # type: ignore
            __('highlighting module code... '),
            "blue",
            len(env._viewcode_modules),  # type: ignore
            app.verbosity,
            lambda x: x[0]):
        if not entry:
            continue
        if not should_generate_module_page(app, modname):
            continue

        code, tags, used, refname = entry
        # construct a page name for the highlighted source
        pagename = posixpath.join(OUTPUT_DIRNAME, modname.replace('.', '/'))
        # highlight the source using the builder's highlighter
        if env.config.highlight_language in ('python3', 'default', 'none'):
            lexer = env.config.highlight_language
        else:
            lexer = 'python'
        highlighted = highlighter.highlight_block(code, lexer, linenos=False)
        # split the code into lines
        lines = highlighted.splitlines()
        # split off wrap markup from the first line of the actual code
        before, after = lines[0].split('<pre>')
        lines[0:1] = [before + '<pre>', after]
        # nothing to do for the last line; it always starts with </pre> anyway
        # now that we have code lines (starting at index 1), insert anchors for
        # the collected tags (HACK: this only works if the tag boundaries are
        # properly nested!)
        maxindex = len(lines) - 1
        for name, docname in used.items():
            type, start, end = tags[name]
            backlink = urito(pagename, docname) + '#' + refname + '.' + name
            lines[start] = (
                '<div class="viewcode-block" id="%s"><a class="viewcode-back" '
                'href="%s">%s</a>' % (name, backlink, _('[docs]')) +
                lines[start])
            lines[min(end, maxindex)] += '</div>'
        # try to find parents (for submodules)
        parents = []
        parent = modname
        while '.' in parent:
            parent = parent.rsplit('.', 1)[0]
            if parent in modnames:
                parents.append({
                    'link':
                    urito(
                        pagename,
                        posixpath.join(OUTPUT_DIRNAME,
                                       parent.replace('.', '/'))),
                    'title':
                    parent
                })
        parents.append({
            'link':
            urito(pagename, posixpath.join(OUTPUT_DIRNAME, 'index')),
            'title':
            _('Module code')
        })
        parents.reverse()
        # putting it all together
        context = {
            'parents':
            parents,
            'title':
            modname,
            'body':
            (_('<h1>Source code for %s</h1>') % modname + '\n'.join(lines)),
        }
        yield (pagename, context, 'page.html')

    if not modnames:
        return

    html = ['\n']
    # the stack logic is needed for using nested lists for submodules
    stack = ['']
    for modname in sorted(modnames):
        if modname.startswith(stack[-1]):
            stack.append(modname + '.')
            html.append('<ul>')
        else:
            stack.pop()
            while not modname.startswith(stack[-1]):
                stack.pop()
                html.append('</ul>')
            stack.append(modname + '.')
        html.append(
            '<li><a href="%s">%s</a></li>\n' %
            (urito(posixpath.join(OUTPUT_DIRNAME, 'index'),
                   posixpath.join(OUTPUT_DIRNAME, modname.replace(
                       '.', '/'))), modname))
    html.append('</ul>' * (len(stack) - 1))
    context = {
        'title':
        _('Overview: module code'),
        'body': (_('<h1>All modules for which code is available</h1>') +
                 ''.join(html)),
    }

    yield (posixpath.join(OUTPUT_DIRNAME, 'index'), context, 'page.html')
Example #56
0
        except (KeyError, ValueError):
            raise ImportError
    except (ImportError, AttributeError):
        print("Aborting, no hosts defined.")
        exit()

env.db_pass = conf.get("DB_PASS", None)
env.admin_pass = conf.get("ADMIN_PASS", None)
env.user = conf.get("SSH_USER", getuser())
env.password = conf.get("SSH_PASS", None)
env.key_filename = conf.get("SSH_KEY_PATH", None)
env.hosts = conf.get("HOSTS", [""])

env.proj_name = conf.get("PROJECT_NAME", env.proj_app)
env.venv_home = conf.get("VIRTUALENV_HOME", "/home/%s/.virtualenvs" % env.user)
env.venv_path = join(env.venv_home, env.proj_name)
env.proj_path = "/home/%s/mezzanine/%s" % (env.user, env.proj_name)
env.manage = "%s/bin/python %s/manage.py" % (env.venv_path, env.proj_path)
env.domains = conf.get("DOMAINS", [conf.get("LIVE_HOSTNAME", env.hosts[0])])
env.domains_nginx = " ".join(env.domains)
env.domains_regex = "|".join(env.domains)
env.domains_python = ", ".join(["'%s'" % s for s in env.domains])
env.ssl_disabled = "#" if len(env.domains) > 1 else ""
env.vcs_tools = ["git", "hg"]
env.deploy_tool = conf.get("DEPLOY_TOOL", "rsync")
env.reqs_path = conf.get("REQUIREMENTS_PATH", None)
env.locale = conf.get("LOCALE", "en_US.UTF-8")
env.num_workers = conf.get("NUM_WORKERS",
                           "multiprocessing.cpu_count() * 2 + 1")

env.secret_key = conf.get("SECRET_KEY", "")
Example #57
0
 def __div__(self, other):
     return self.replace(path=posixpath.join(self._spath, other))
Example #58
0
def doctree_read(app: Sphinx, doctree: Node) -> None:
    env = app.builder.env
    if not hasattr(env, '_viewcode_modules'):
        env._viewcode_modules = {}  # type: ignore

    def has_tag(modname: str, fullname: str, docname: str,
                refname: str) -> bool:
        entry = env._viewcode_modules.get(modname, None)  # type: ignore
        if entry is False:
            return False

        code_tags = app.emit_firstresult('viewcode-find-source', modname)
        if code_tags is None:
            try:
                analyzer = ModuleAnalyzer.for_module(modname)
                analyzer.find_tags()
            except Exception:
                env._viewcode_modules[modname] = False  # type: ignore
                return False

            code = analyzer.code
            tags = analyzer.tags
        else:
            code, tags = code_tags

        if entry is None or entry[0] != code:
            entry = code, tags, {}, refname
            env._viewcode_modules[modname] = entry  # type: ignore
        _, tags, used, _ = entry
        if fullname in tags:
            used[fullname] = docname
            return True

        return False

    for objnode in doctree.traverse(addnodes.desc):
        if objnode.get('domain') != 'py':
            continue
        names = set()  # type: Set[str]
        for signode in objnode:
            if not isinstance(signode, addnodes.desc_signature):
                continue
            modname = signode.get('module')
            fullname = signode.get('fullname')
            refname = modname
            if env.config.viewcode_follow_imported_members:
                new_modname = app.emit_firstresult(
                    'viewcode-follow-imported',
                    modname,
                    fullname,
                )
                if not new_modname:
                    new_modname = _get_full_modname(app, modname, fullname)
                modname = new_modname
            if not modname:
                continue
            fullname = signode.get('fullname')
            if not has_tag(modname, fullname, env.docname, refname):
                continue
            if fullname in names:
                # only one link per name, please
                continue
            names.add(fullname)
            pagename = posixpath.join(OUTPUT_DIRNAME,
                                      modname.replace('.', '/'))
            signode += viewcode_anchor(reftarget=pagename,
                                       refid=fullname,
                                       refdoc=env.docname)
Example #59
0
def upimg_save(**kwargs):
    res = dict(code=1)
    try:
        filename = kwargs["filename"]
        stream = kwargs["stream"]
        upload_path = kwargs.get("upload_path") or ""
        if not filename or not stream:
            raise ValueError
    except (KeyError, ValueError):
        res.update(msg="Parameter error")
    else:
        token = g.cfg.github_token
        repo = g.cfg.github_repo
        branch = g.cfg.github_branch
        dn = g.cfg.github_dn
        github_basedir = g.cfg.github_basedir or '/'
        if not token or not repo or "/" not in repo:
            res.update(msg="The github parameter error")
            return res
        if isinstance(upload_path, string_types):
            if upload_path.startswith("/"):
                upload_path = upload_path.lstrip('/')
            if github_basedir.startswith("/"):
                github_basedir = github_basedir.lstrip('/')
            saveto = join(github_basedir, upload_path)
            filepath = join(saveto, filename)
            #: 通过API上传图片
            data = dict(
                message="Create %s by sapic" % filepath,
                content=b64encode(stream).decode("utf-8"),
            )
            if branch:
                data["branch"] = branch
            headers = {
                "User-Agent": "sapic-up2github/%s" % __version__,
                "Authorization": "token %s" % token
            }
            try:
                r = try_proxy_request(
                    "https://api.github.com/repos/%s/contents/%s" %
                    (repo, filepath),
                    data=json.dumps(data),
                    headers=headers,
                    timeout=30,
                    method="put")
            except requests.exceptions.RequestException as e:
                res.update(msg=e)
            else:
                result = r.json()
                if r.status_code == 201:
                    content = result["content"]
                    src = content["download_url"]
                    if dn:
                        src = slash_join(dn, filepath)
                    elif is_true(g.cfg.github_jsdelivr):
                        src = slash_join("https://cdn.jsdelivr.net/gh/", repo,
                                         filepath)
                    res.update(
                        code=0,
                        src=src,
                        basedir=github_basedir,
                        branch=branch,
                        size=content["size"],
                        content_sha=content["sha"],
                        download_url=content["download_url"],
                        repo=repo,
                    )
                else:
                    res.update(
                        code=r.status_code,
                        msg=result.get("message",
                                       "").replace('"', '\'').replace(
                                           '\n\n',
                                           ' ').replace('\n',
                                                        '').replace('\\', ''),
                    )
        else:
            res.update(msg="The upload_path type error")
    return res
Example #60
0
def remoteJoin(path1, path2):
    return posixpath.join(path1, path2)