コード例 #1
0
    def run(self):
        directory = os.path.dirname(os.path.realpath(__file__))

        # Relationship
        print("Load Relationships type")

        # user
        print("Load Users")
        with open(directory + '/users.csv', 'r') as csv_file:
            spam_reader = csv.reader(csv_file, delimiter=',', quotechar='|')
            for row in spam_reader:
                UserActions.create_user_from_csv(row)

        # id,owner_id,friend_id,relation_type,active
        print("Load Friend relationships")
        with open(directory + '/friend_relationships.csv', 'r') as csv_file:
            spam_reader = csv.reader(csv_file, delimiter=',', quotechar='|')
            for row in spam_reader:
                FriendRelationshipActions.create_from_csv(row)

        # id;story;created;user_id
        print("Load Posts")
        with open(directory + '/posts.csv', 'r') as csv_file:
            spam_reader = csv.reader(csv_file, delimiter=',', quotechar='|')
            for row in spam_reader:
                PostActions.create_from_csv(row)

        # id}name}description}start_time}user_id
        print("Load Events")
        with open(directory + '/events.csv', 'r') as csv_file:
            spam_reader = csv.reader(csv_file, delimiter='}', quotechar='|')
            for row in spam_reader:
                EventActions.create_from_csv(row)

        pass
コード例 #2
0
ファイル: test_base.py プロジェクト: onaio/onadata
    def _make_submission_w_attachment(self, path, attachment_path):
        with open(path, encoding='utf-8') as f:
            data = {'xml_submission_file': f}
            if attachment_path is not None:
                if isinstance(attachment_path, list):
                    for c in range(len(attachment_path)):
                        data['media_file_{}'.format(c)] = open(
                            attachment_path[c], 'rb')
                else:
                    data['media_file'] = open(
                        attachment_path, 'rb')

            url = '/%s/submission' % self.user.username
            auth = DigestAuth('bob', 'bob')
            self.factory = APIRequestFactory()
            request = self.factory.post(url, data)
            request.user = authenticate(username='******',
                                        password='******')
            self.response = submission(request,
                                       username=self.user.username)

            if auth and self.response.status_code == 401:
                request.META.update(auth(request.META, self.response))
                self.response = submission(request,
                                           username=self.user.username)
コード例 #3
0
ファイル: ice_simulator.py プロジェクト: mbus/m3-python
def create_fake_serial(
        endpoint1=_FAKE_SERIAL_CONNECTTO_ENDPOINT,
        endpoint2=_FAKE_SERIAL_SIMULATOR_ENDPOINT,
        ):
    global _socat_fpre
    global _socat_proc
    global _socat_devnull

    _socat_devnull = open(os.devnull, 'w')
    _socat_proc = subprocess.Popen(
                "socat -x pty,link={},raw,echo=0 pty,link={},raw,echo=0".format(endpoint1, endpoint2),
                stdout=open(_socat_fpre + 'socat-stdout', 'w'),
                stderr=open(_socat_fpre + 'socat-stderr', 'w'),
                shell=True,
                )

    # Hack, b/c socat doesn't exit but do need to wait for pipe to be set up
    limit = time.time() + 5
    while not (os.path.exists(endpoint1) and os.path.exists(endpoint2)):
        time.sleep(.1)
        if time.time() > limit:
            _socat_proc.kill()
            for l in open(_socat_fpre + 'socat-stdout'):
                logger.debug(l)
            for l in open(_socat_fpre + 'socat-stderr'):
                logger.debug(l)
            raise NotImplementedError("socat endpoint never appeared?")

    logger.debug("Fake serial bridge created.")

    atexit.register(destroy_fake_serial)
コード例 #4
0
 def encode_multipart_formdata(self, fields, files, baseName, verbose=False):
     """
     fields is a sequence of (name, value) elements for regular form fields.
     files is a sequence of (name, filename, value) elements for data
     to be uploaded as files
     Return (content_type, body) ready for httplib.HTTP instance
     """
     BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
     CRLF = '\r\n'
     content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
     if verbose is True:
         print(('Encoding ' + baseName + ' for upload...'))
     fin = open(files, 'rb')
     fout = open(files + '.b64', 'wb')
     fout.write(bytearray('--' + BOUNDARY + CRLF, 'utf-8'))
     fout.write(bytearray('Content-Disposition: form-data'
                          '; name="file"; filename="' +
                          baseName + '"' + CRLF, "utf-8"))
     fout.write(bytearray('Content-Type: application/octet-stream' + CRLF,
                          'utf-8'))
     fout.write(bytearray(CRLF, 'utf-8'))
     shutil.copyfileobj(fin, fout)
     fout.write(bytearray(CRLF, 'utf-8'))
     fout.write(bytearray('--' + BOUNDARY + '--' + CRLF, 'utf-8'))
     fout.write(bytearray(CRLF, 'utf-8'))
     fout.close()
     fin.close()
     return content_type
コード例 #5
0
  def test_generating_report_from_fetch(self):
    # Ensure the ivy report file gets generated and populated.
    with self.temporary_workdir() as workdir, temporary_dir() as cache_dir:
      config = {'cache': {'write_to': [cache_dir],'read_from': [cache_dir]}}

      def run_pants(command):
        return self.run_pants_with_workdir(command, workdir, config=config)
      with temporary_dir() as ivy_report_dir:
        first_run = run_pants(['resolve', '3rdparty:junit', '--resolve-ivy-report',
                               '--resolve-ivy-outdir={reportdir}'.format(reportdir=ivy_report_dir)])
        self.assert_success(first_run)

        html_report_file, listdir = self._find_html_report(ivy_report_dir)
        self.assertIsNotNone(html_report_file,
                        msg="Couldn't find ivy report in {report_dir} containing files {listdir}"
                        .format(report_dir=ivy_report_dir, listdir=listdir))

        with open(os.path.join(ivy_report_dir, html_report_file), 'r') as file:
          self.assertIn('junit', file.read())

      run_pants(['clean-all'])

      with temporary_dir() as ivy_report_dir:
        fetch_run = run_pants(['resolve', '3rdparty:junit', '--resolve-ivy-report',
                               '--resolve-ivy-outdir={reportdir}'.format(reportdir=ivy_report_dir)])
        self.assert_success(fetch_run)

        # Find the ivy report.
        html_report_file, listdir = self._find_html_report(ivy_report_dir)
        self.assertIsNotNone(html_report_file,
                        msg="Couldn't find ivy report in {report_dir} containing files {listdir}"
                        .format(report_dir=ivy_report_dir, listdir=listdir))

        with open(os.path.join(ivy_report_dir, html_report_file), 'r') as file:
          self.assertIn('junit', file.read())
コード例 #6
0
ファイル: __init__.py プロジェクト: erikdejonge/pycodequality
def main():
    """
    main
    """
    arguments = IArguments(__doc__)
    confilepathath = os.path.expanduser("~/.pylint.conf")
    pylintconf = get_pylint_conf()

    pylintconf += "\n# Regexp for a line that is allowed to be longer than the limit.\n"
    pylintconf += r"ignore-long-lines=^\s*(# )?<?https?://\S+>?$\n\n"

    print("\033[91mRating your code:", arguments.folder, "\033[0m")
    open(confilepathath, "w").write(pylintconf)
    checkfiles = set()

    if os.path.isfile(arguments.folder):
        checkfiles = [os.path.expanduser(arguments.folder)]
        arguments.showhints = True
    else:
        for root, _, files in os.walk(arguments.folder):
            check_files(checkfiles, files, root)

    checkfiles = list(checkfiles)
    checkfiles.sort(key=lambda x: (os.path.dirname(x), os.path.basename(x)))
    cnt = 0
    totalscore = 0.0

    for filepath in checkfiles:
        cnt += 1
        totalscore += rate_code(cnt, filepath, arguments.showhints, len(checkfiles))

    if cnt > 0:
        print("\033[34m---\nstotalscore:\033[34m {:.2f}".format(old_div(totalscore, cnt)), "\033[0m")
コード例 #7
0
ファイル: m3_common.py プロジェクト: mbus/m3-python
    def read_binfile_static(binfile):
        def guess_type_is_hex(binfile):
            try:
                for line in open(binfile):
                    for c in line.strip():
                        c = ord(c)
                        if c < 0x20 or c > 0x7a:
                            return False
                return True
            except UnicodeDecodeError:
                return False

        if guess_type_is_hex(binfile):
            binfd = open(binfile, 'r')
            hexencoded = ""
            for line in binfd:
                hexencoded += line[0:2].upper()
        else:
            binfd = open(binfile, 'rb')
            hexencoded = binascii.hexlify(binfd.read()).upper()

        if (len(hexencoded) % 4 == 0) and (len(hexencoded) % 8 != 0):
            # Image is halfword-aligned. Some tools generate these, but our system
            # assumes things are word-aligned. We pad an extra nop to the end to fix
            hexencoded += '46C0' # nop; (mov r8, r8)

        if (len(hexencoded) % 8) != 0:
            logger.warn("Binfile is not word-aligned. This is not a valid image")
            return None

        return hexencoded
コード例 #8
0
ファイル: test_preprocess.py プロジェクト: Conxz/nipype
def setup_infile():
    global tmp_infile, tmp_dir
    ext = Info.output_type_to_ext(Info.output_type())
    tmp_dir = tempfile.mkdtemp()
    tmp_infile = os.path.join(tmp_dir, 'foo' + ext)
    open(tmp_infile, 'w')
    return tmp_infile, tmp_dir
コード例 #9
0
def load_index( dataDirectory ):
    global sKgsUrl, urls, fileInfos
    try:
        indexpagefile = open( 'cached_indexpage.html', 'r' )
        indexpagecontents = indexpagefile.read()
        indexpagefile.close()
        print('reading index page from cache')
#        import zip_urls.py
    except:
        #print('no cached_indexpage.py found')
        print('downloading index page...')
        indexpagecontents = downloadPage( sKgsUrl )
        print( indexpagecontents )
        print( type( indexpagecontents ) )
        indexpagefile = open( 'cached_indexpage.~html', 'w')
        indexpagefile.write( indexpagecontents )
        indexpagefile.close()
        os.rename( dataDirectory + 'cached_indexpage.html' )
#    print page
    splitpage = indexpagecontents.split('<a href="')
    urls = []
    for downloadUrlBit in splitpage:
        if downloadUrlBit.startswith( "http://" ):
            downloadUrl = downloadUrlBit.split('">Download')[0]
            if downloadUrl.endswith('.zip'):
                urls.append( downloadUrl )
    for url in urls:
        filename = os.path.basename( url )
        splitFilename = filename.split('-')
        numGamesString = splitFilename[len(splitFilename)-2]
        numGames = int( numGamesString )
        print( filename + ' ' + str( numGames ) )
        fileInfos.append( { 'url': url, 'filename': filename, 'numGames': numGames } )
コード例 #10
0
ファイル: bgzip.py プロジェクト: sein-tao/pyBioUtil
    def __new__(cls, filename, mode='r'):
        "Constructor, program indecates the path to excutable file"
        raw_mode = mode
        if 't' in mode:
            warnings.warn("'t' in BGzipFile: not competable mode.")
            mode = mode.replace('t','')
        if 'b' not in mode:
            mode += 'b'

        if 'r' in mode:
            pipe = subprocess.Popen([cls._cmd, '-d'], 
                    stdin = builtins.open(filename, mode), 
                    stdout=subprocess.PIPE)
            fh = pipe.stdout
        elif 'w' in mode:
            pipe = subprocess.Popen([cls._cmd,],
                    stdin = subprocess.PIPE,
                    stdout = builtins.open(filename, mode))
            fh = pipe.stdin
        else:
            raise ValueError("Invalid mode: {}".format(raw_mode))

        def close(fh):
            fh._close()
            if pipe.wait() != 0:
                warnings.warn("file close error:{}".format(filename))
            # else:
            #     print("pipe closed correctly")
        fh._close = fh.close 
        fh.close = types.MethodType(close, fh)
        return fh
コード例 #11
0
ファイル: test_io.py プロジェクト: amoliu/nipype
def test_datasink_substitutions():
    indir = mkdtemp(prefix='-Tmp-nipype_ds_subs_in')
    outdir = mkdtemp(prefix='-Tmp-nipype_ds_subs_out')
    files = []
    for n in ['ababab.n', 'xabababyz.n']:
        f = os.path.join(indir, n)
        files.append(f)
        open(f, 'w')
    ds = nio.DataSink(
        parametrization=False,
        base_directory=outdir,
        substitutions=[('ababab', 'ABABAB')],
        # end archoring ($) is used to assure operation on the filename
        # instead of possible temporary directories names matches
        # Patterns should be more comprehendable in the real-world usage
        # cases since paths would be quite more sensible
        regexp_substitutions=[(r'xABABAB(\w*)\.n$', r'a-\1-b.n'),
                              ('(.*%s)[-a]([^%s]*)$' % ((os.path.sep,) * 2),
                               r'\1!\2')])
    setattr(ds.inputs, '@outdir', files)
    ds.run()
    yield assert_equal, \
        sorted([os.path.basename(x) for
            x in glob.glob(os.path.join(outdir, '*'))]), \
        ['!-yz-b.n', 'ABABAB.n']  # so we got re used 2nd and both patterns
    shutil.rmtree(indir)
    shutil.rmtree(outdir)
コード例 #12
0
    def test_static_path_helper_refuses_to_write_to_non_empty_paths(self):
        # The static path helper should only write to paths that are empty
        # or that include a version file.
        # Only such paths are considered safe to write to.
        import os

        def try_init(path, should_succeed):
            success = False
            try:
                init_static_path(path)
                success = True
            except SijaxError:
                pass

            self.assertEqual(should_succeed, success)

        # New empty temporary dir should work
        with temporary_dir() as static_path:
            try_init(static_path, True)

        # A directory with some files (but not version file) should fail
        with temporary_dir() as static_path:
            with open(os.path.join(static_path, 'some.file'), 'w') as fp:
                fp.write('blah')
            try_init(static_path, False)

        # A directory with some files, but also a version file
        with temporary_dir() as static_path:
            with open(os.path.join(static_path, 'some.file'), 'w') as fp:
                fp.write('blah')
            with open(os.path.join(static_path, 'sijax_version'), 'w') as fp:
                fp.write('version_string')
            try_init(static_path, True)
コード例 #13
0
ファイル: test_join.py プロジェクト: Conxz/nipype
def test_set_join_node_file_input():
    """Test collecting join inputs to a set."""
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)
    open('test.nii', 'w+').close()
    open('test2.nii', 'w+').close()

    # Make the workflow.
    wf = pe.Workflow(name='test')
    # the iterated input node
    inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec')
    inputspec.iterables = [('n', [os.path.join(wd, 'test.nii'), os.path.join(wd, 'test2.nii')])]
    # a pre-join node in the iterated path
    pre_join1 = pe.Node(IdentityInterface(fields=['n']), name='pre_join1')
    wf.connect(inputspec, 'n', pre_join1, 'n')
    # the set join node
    join = pe.JoinNode(PickFirst(), joinsource='inputspec',
                       joinfield='in_files', name='join')
    wf.connect(pre_join1, 'n', join, 'in_files')

    wf.run()

    os.chdir(cwd)
    rmtree(wd)
コード例 #14
0
ファイル: test_io.py プロジェクト: amoliu/nipype
def test_jsonsink():
    import simplejson
    import os

    ds = nio.JSONFileSink()
    yield assert_equal, ds.inputs._outputs, {}
    ds = nio.JSONFileSink(in_dict={'foo': 'var'})
    yield assert_equal, ds.inputs.in_dict, {'foo': 'var'}
    ds = nio.JSONFileSink(infields=['test'])
    yield assert_true, 'test' in ds.inputs.copyable_trait_names()

    curdir = os.getcwd()
    outdir = mkdtemp()
    os.chdir(outdir)
    js = nio.JSONFileSink(infields=['test'], in_dict={'foo': 'var'})
    js.inputs.new_entry = 'someValue'
    setattr(js.inputs, 'contrasts.alt', 'someNestedValue')
    res = js.run()

    with open(res.outputs.out_file, 'r') as f:
        data = simplejson.load(f)
    yield assert_true, data == {"contrasts": {"alt": "someNestedValue"}, "foo": "var", "new_entry": "someValue"}

    js = nio.JSONFileSink(infields=['test'], in_dict={'foo': 'var'})
    js.inputs.new_entry = 'someValue'
    js.inputs.test = 'testInfields'
    setattr(js.inputs, 'contrasts.alt', 'someNestedValue')
    res = js.run()

    with open(res.outputs.out_file, 'r') as f:
        data = simplejson.load(f)
    yield assert_true, data == {"test": "testInfields", "contrasts": {"alt": "someNestedValue"}, "foo": "var", "new_entry": "someValue"}

    os.chdir(curdir)
    shutil.rmtree(outdir)
  def test_pydist_invalidation(self):
    """Test that the current version of a python_dist() is resolved after modifying its sources."""
    hello_run = '{}:main_with_no_conflict'.format(self.hello_install_requires_dir)

    with self.mock_buildroot(
        dirs_to_copy=[self.hello_install_requires_dir]) as buildroot, buildroot.pushd():
      run_target = lambda: self.run_pants_with_workdir(
        command=['--quiet', 'run', hello_run],
        workdir=os.path.join(buildroot.new_buildroot, '.pants.d'),
        build_root=buildroot.new_buildroot,
      )

      unmodified_pants_run = run_target()
      self.assert_success(unmodified_pants_run)
      self._assert_nation_and_greeting(unmodified_pants_run.stdout_data)

      # Modify one of the source files for this target so that the output is different.
      py_source_file = os.path.join(
        self.hello_install_requires_dir, 'hello_package/hello.py')
      with open(py_source_file, 'r') as f:
        orig_contents = f.read()
      # Replace hello! with hello?
      modified_contents = re.sub('!', '?', orig_contents)
      with open(py_source_file, 'w') as f:
        f.write(modified_contents)

      modified_pants_run = run_target()
      self.assert_success(modified_pants_run)
      self._assert_nation_and_greeting(modified_pants_run.stdout_data, punctuation='?')
コード例 #16
0
ファイル: test_io.py プロジェクト: mfalkiewicz/nipype
def test_datasink_localcopy(dummy_input, tmpdir):
    '''
    Function to validate DataSink will make local copy via local_copy
    attribute
    '''

    # Init variables
    local_dir = tmpdir.strpath
    container = 'outputs'
    attr_folder = 'text_file'

    # Make dummy input file and datasink
    input_path = dummy_input

    ds = nio.DataSink()

    # Set up datasink
    ds.inputs.container = container
    ds.inputs.local_copy = local_dir

    setattr(ds.inputs, attr_folder, input_path)

    # Expected local copy path
    local_copy = os.path.join(local_dir, container, attr_folder,
                              os.path.basename(input_path))

    # Run the datasink
    ds.run()

    # Check md5sums of both
    src_md5 = hashlib.md5(open(input_path, 'rb').read()).hexdigest()
    dst_md5 = hashlib.md5(open(local_copy, 'rb').read()).hexdigest()

    # Perform test
    assert src_md5 == dst_md5
コード例 #17
0
    def _convert_dir(self, notes):
        if self.output_dir is None:
            sys.stdout.write(json.dumps(notes))
        else:
            if (os.path.exists(self.output_dir) and
                    not os.path.isdir(self.output_dir)):
                print('"{}" exists but is not a directory.'.format(
                  self.output_dir))
                sys.exit(1)
            elif not os.path.exists(self.output_dir):
                os.makedirs(self.output_dir)
            for i, note in enumerate(notes):
                if self.preserve_title:
                    # (nicholaskuechler) try to preserve the title, but replace
                    # spaces with underscores, replace forward slash with dash,
                    # and preserve the note number in case of duplicate titles.
                    note_title = note['title']
                    note_title = self.sanitize_note_title(note_title)
                    note_title = "%s-%s" % (note_title, i)
                else:
                    note_title = str(i)

                try:
                    output_file_path = \
                        os.path.join(self.output_dir, note_title + '.txt')
                    with open(output_file_path, 'w') as output_file:
                        output_file.write(note['content'])
                except Exception as e:
                    output_file_path = os.path.join(
                        self.output_dir,
                        "title_fail" + '-' + str(i) + '.txt')
                    print("failed to use title for filename: {}".format(e))
                    with open(output_file_path, 'w') as output_file:
                        output_file.write(note['content'])
コード例 #18
0
  def test_pantsd_lifecycle_non_invalidation_on_config_string(self):
    with temporary_dir() as dist_dir_root, temporary_dir() as config_dir:
      config_files = [
        os.path.abspath(os.path.join(config_dir, 'pants.ini.{}'.format(i))) for i in range(2)
      ]
      for config_file in config_files:
        print('writing {}'.format(config_file))
        with open(config_file, 'w') as fh:
          fh.write('[GLOBAL]\npants_distdir: {}\n'.format(os.path.join(dist_dir_root, 'v1')))

      invalidating_config = os.path.join(config_dir, 'pants.ini.invalidates')
      with open(invalidating_config, 'w') as fh:
        fh.write('[GLOBAL]\npants_distdir: {}\n'.format(os.path.join(dist_dir_root, 'v2')))

      with self.pantsd_successful_run_context() as (pantsd_run, checker, _, _):
        variants = [['--pants-config-files={}'.format(f), 'help'] for f in config_files]
        pantsd_pid = None
        for cmd in itertools.chain(*itertools.repeat(variants, 2)):
          pantsd_run(cmd)
          if not pantsd_pid:
            pantsd_pid = checker.assert_started()
          else:
            checker.assert_running()

        pantsd_run(['--pants-config-files={}'.format(invalidating_config), 'help'])
        self.assertNotEqual(pantsd_pid, checker.assert_started())
コード例 #19
0
    def test_post_submission_uuid_other_user_username_not_provided(self):
        """
        Test submission without formhub/uuid done by a different user who has
        no permission to the form fails.
        """
        alice_data = {'username': '******', 'email': '*****@*****.**'}
        self._create_user_profile(alice_data)
        s = self.surveys[0]
        media_file = "1335783522563.jpg"
        path = os.path.join(self.main_directory, 'fixtures',
                            'transportation', 'instances', s, media_file)
        with open(path, 'rb') as f:
            f = InMemoryUploadedFile(f, 'media_file', media_file, 'image/jpg',
                                     os.path.getsize(path), None)
            path = os.path.join(
                self.main_directory, 'fixtures',
                'transportation', 'instances', s, s + '.xml')
            path = self._add_uuid_to_submission_xml(path, self.xform)

            with open(path, 'rb') as sf:
                data = {'xml_submission_file': sf, 'media_file': f}
                request = self.factory.post('/submission', data)
                response = self.view(request)
                self.assertEqual(response.status_code, 401)
                auth = DigestAuth('alice', 'bobbob')
                request.META.update(auth(request.META, response))
                response = self.view(request)
                self.assertEqual(response.status_code, 403)
コード例 #20
0
    def update_statistics(self, sm, energy, member_energies=[], change=""):
        """
        Add a new structure to the statistics.

        :param sm: The spatial model.
        :param energy: The energy used for sampling of the structure, or None.
        :param member_energies: A list of tuples `(energy_shortname, value)`
        """
        self.step+=1
        line=["{:6d}\t{:10.3f}".format(self.step, energy)]
        if self.options["constituing_energies"]=="no_clash":
            ignore_names=[fbe.RoughJunctionClosureEnergy().shortname(),
                         fbe.StemVirtualResClashEnergy().shortname()]
        else:
            ignore_names=[]
        line.append("( "+" ".join("{} {:10.3f}".format(*x) for x in member_energies
                                      if x[0] not in ignore_names)+" )")
        line.append(self.collector.update(sm, self.step))
        line.append(change)
        self.printline("\t".join(line))

        if self.best_cgs.can_insert_right((None, energy)):
            self.best_cgs.insert_right((sm.bg.to_cg_string(), energy))

        if self.step % 10 == 0:
            for i, cg_stri in enumerate(self.best_cgs):
                with open(os.path.join(conf.Configuration.sampling_output_dir,
                                      'best{:d}.coord'.format(i)), 'w') as f:
                    f.write(cg_stri[0])

        if self.options["step_save"]>0 and self.step % self.options["step_save"] ==0:
            cg_stri = sm.bg.to_cg_string()
            with open(os.path.join(conf.Configuration.sampling_output_dir,
                              'step{:06d}.coord'.format(self.step)), "w") as f:
                f.write(cg_stri)
コード例 #21
0
 def test_post_submission_authenticated(self):
     """
     Test authenticated user can make a submission.
     """
     s = self.surveys[0]
     media_file = "1335783522563.jpg"
     path = os.path.join(self.main_directory, 'fixtures',
                         'transportation', 'instances', s, media_file)
     with open(path, 'rb') as f:
         f = InMemoryUploadedFile(f, 'media_file', media_file, 'image/jpg',
                                  os.path.getsize(path), None)
         submission_path = os.path.join(
             self.main_directory, 'fixtures',
             'transportation', 'instances', s, s + '.xml')
         with open(submission_path, 'rb') as sf:
             data = {'xml_submission_file': sf, 'media_file': f}
             request = self.factory.post('/submission', data)
             response = self.view(request)
             self.assertEqual(response.status_code, 401)
             auth = DigestAuth('bob', 'bobbob')
             request.META.update(auth(request.META, response))
             response = self.view(request, username=self.user.username)
             self.assertContains(response, 'Successful submission',
                                 status_code=201)
             self.assertTrue(response.has_header('X-OpenRosa-Version'))
             self.assertTrue(
                 response.has_header('X-OpenRosa-Accept-Content-Length'))
             self.assertTrue(response.has_header('Date'))
             self.assertEqual(response['Content-Type'],
                              'text/xml; charset=utf-8')
             self.assertEqual(response['Location'],
                              'http://testserver/submission')
コード例 #22
0
 def test_post_submission_require_auth_anonymous_user(self):
     """
     Test anonymous user cannot make a submission if the form requires
     authentication.
     """
     self.user.profile.require_auth = True
     self.user.profile.save()
     count = Attachment.objects.count()
     s = self.surveys[0]
     media_file = "1335783522563.jpg"
     path = os.path.join(self.main_directory, 'fixtures',
                         'transportation', 'instances', s, media_file)
     with open(path, 'rb') as f:
         f = InMemoryUploadedFile(f, 'media_file', media_file, 'image/jpg',
                                  os.path.getsize(path), None)
         submission_path = os.path.join(
             self.main_directory, 'fixtures',
             'transportation', 'instances', s, s + '.xml')
         with open(submission_path, 'rb') as sf:
             data = {'xml_submission_file': sf, 'media_file': f}
             request = self.factory.post('/submission', data)
             response = self.view(request)
             self.assertEqual(response.status_code, 401)
             response = self.view(request, username=self.user.username)
             self.assertEqual(response.status_code, 401)
             self.assertEqual(count, Attachment.objects.count())
コード例 #23
0
ファイル: security.py プロジェクト: pccsei/adit
def secure(path, force=False):
    assert force, 'Do not run this unless you know what you are doing!'
    engine = me.Encrypter(KEY, PRIMER)
    with builtins.open(path, 'rb') as file:
        data = engine.process(file.read())
    with builtins.open(path, 'wb') as file:
        file.write(data)
コード例 #24
0
ファイル: test_engine.py プロジェクト: djarecka/nipype
def test_write_graph_dotfile_iterables(tmpdir, graph_type, simple):
    """ checking dot files for a workflow with iterables"""
    tmpdir.chdir()

    pipe = pe.Workflow(name='pipe')
    mod1 = pe.Node(interface=EngineTestInterface(), name='mod1')
    mod1.iterables = ('input1', [1, 2])
    mod2 = pe.Node(interface=EngineTestInterface(), name='mod2')
    pipe.connect([(mod1, mod2, [('output1', 'input1')])])
    pipe.write_graph(
        graph2use=graph_type, simple_form=simple, format='dot')

    with open("graph.dot") as f:
        graph_str = f.read()

    if simple:
        for line in dotfiles_iter[graph_type]:
            assert line in graph_str
    else:
        # if simple=False graph.dot uses longer names
        for line in dotfiles_iter[graph_type]:
            if graph_type in ["hierarchical", "colored"]:
                assert line.replace("mod1 (engine)", "mod1.EngineTestInterface.engine").replace(
                    "mod2 (engine)", "mod2.EngineTestInterface.engine") in graph_str
            else:
                assert line.replace(
                    "mod1 (engine)", "pipe.mod1.EngineTestInterface.engine").replace(
                    "mod2 (engine)", "pipe.mod2.EngineTestInterface.engine") in graph_str

    # graph_detailed is not created for hierachical or colored
    if graph_type not in ["hierarchical", "colored"]:
        with open("graph_detailed.dot") as f:
            graph_str = f.read()
        for line in dotfiles_detailed_iter[graph_type]:
            assert line in graph_str
コード例 #25
0
ファイル: pbsgraph.py プロジェクト: mfalkiewicz/nipype
    def _submit_graph(self, pyfiles, dependencies, nodes):
        batch_dir, _ = os.path.split(pyfiles[0])
        submitjobsfile = os.path.join(batch_dir, 'submit_jobs.sh')
        with open(submitjobsfile, 'wt') as fp:
            fp.writelines('#!/usr/bin/env sh\n')
            for idx, pyscript in enumerate(pyfiles):
                node = nodes[idx]
                template, qsub_args = self._get_args(
                    node, ["template", "qsub_args"])

                batch_dir, name = os.path.split(pyscript)
                name = '.'.join(name.split('.')[:-1])
                batchscript = '\n'.join((template,
                                         '%s %s' % (sys.executable, pyscript)))
                batchscriptfile = os.path.join(batch_dir,
                                               'batchscript_%s.sh' % name)
                with open(batchscriptfile, 'wt') as batchfp:
                    batchfp.writelines(batchscript)
                    batchfp.close()
                deps = ''
                if idx in dependencies:
                    values = ['$job%05d' %
                              jobid for jobid in dependencies[idx]]
                    if len(values):
                        deps = '-W depend=afterok:%s' % ':'.join(values)
                fp.writelines('job%05d=`qsub %s %s %s`\n' % (idx, deps,
                                                             qsub_args,
                                                             batchscriptfile))
        cmd = CommandLine('sh', environ=dict(os.environ),
                          resource_monitor=False,
                          terminal_output='allatonce')
        cmd.inputs.args = '%s' % submitjobsfile
        cmd.run()
        logger.info('submitted all jobs to queue')
コード例 #26
0
    def write_pddl_files(self, domain_file, problem_file, mdd_actions):
        """ Writes the domain and problem pddl files for a sdac compilation.
        """
        #TODO A better design would be to take an action compilation as input and
        # generate the pddl description for the compilation. This way we could
        # use this method for the exponential compilation as well
        domain_tokens = lisp_parser.parse_nested_list(builtins.open(domain_file))
        problem_tokens = lisp_parser.parse_nested_list(builtins.open(problem_file))
        # delete actions in token list
        domain_tokens = [x for x in domain_tokens if not (':action' in x[0])]
        # TODO decouple add_predicates and mdd_action_to_pddl
        actions = [self._mdd_action_to_pddl(mdd_action) for mdd_action in mdd_actions]
        self._add_requirements(domain_tokens)
        self._add_aux_predicates(domain_tokens)
        self._add_total_cost_function(domain_tokens)
        self._prob_obj_to_constants(domain_tokens, problem_tokens)
        # Remove last ')' as we have to append actions
        output = self._get_pddl_string(domain_tokens)[:-1] + "\n"

        #TODO handle indent in output
        output += '\n'.join(str(self._action_to_pddl_string(action)) for action in
                actions)
        output += ')'
        with open("domain-out.pddl", "w") as output_file:
            print(output, file=output_file)
    
        self._add_metric(problem_tokens)
        #TODO Prettier output for objects (no newlines)
        with open("problem-out.pddl", "w") as output_file:
            print(self._get_pddl_string(problem_tokens), file=output_file)
コード例 #27
0
ファイル: publish_release.py プロジェクト: cdiener/cobrapy
def build_hugo_md(filename, tag, bump):
    """
    Build the markdown release notes for Hugo.

    Inserts the required TOML header with specific values and adds a break
    for long release notes.

    Parameters
    ----------
    filename : str, path
        The release notes file.
    tag : str
        The tag, following semantic versioning, of the current release.
    bump : {"major", "minor", "patch", "alpha", "beta"}
        The type of release.

    """
    header = [
        '+++\n',
        'date = "{}"\n'.format(date.today().isoformat()),
        'title = "{}"\n'.format(tag),
        'author = "The COBRApy Team"\n',
        'release = "{}"\n'.format(bump),
        '+++\n',
        '\n'
    ]
    with open(filename, "r") as file_h:
        content = insert_break(file_h.readlines())
    header.extend(content)
    with open(filename, "w") as file_h:
        file_h.writelines(header)
コード例 #28
0
  def _test_compile(self, target_level, class_name, source_contents, platform_args=None):
    with temporary_dir(root_dir=os.path.abspath('.')) as tmpdir:
      with open(os.path.join(tmpdir, 'BUILD'), 'w') as f:
        f.write(dedent('''
        java_library(name='{target_name}',
          sources=['{class_name}.java'],
          platform='{target_level}',
        )
        '''.format(target_name=os.path.basename(tmpdir),
                   class_name=class_name,
                   target_level=target_level)))
      with open(os.path.join(tmpdir, '{}.java'.format(class_name)), 'w') as f:
        f.write(source_contents)
      platforms = str({
        str(target_level): {
          'source': str(target_level),
          'target': str(target_level),
          'args': platform_args or [],
        }
      })
      command = []
      command.extend(['--jvm-platform-platforms={}'.format(platforms),
                      '--jvm-platform-default-platform={}'.format(target_level)])
      command.extend(self.get_pants_compile_args())
      command.extend([tmpdir])

      pants_run = self.run_pants(command)
      return pants_run
コード例 #29
0
ファイル: gzip.py プロジェクト: johndpope/sims4-ai-engine
def _test():
    args = sys.argv[1:]
    decompress = args and args[0] == '-d'
    if decompress:
        args = args[1:]
    if not args:
        args = ['-']
    for arg in args:
        if decompress:
            if arg == '-':
                f = GzipFile(filename='', mode='rb', fileobj=sys.stdin.buffer)
                g = sys.stdout.buffer
            else:
                if arg[-3:] != '.gz':
                    print("filename doesn't end in .gz:", repr(arg))
                f = open(arg, 'rb')
                g = builtins.open(arg[:-3], 'wb')
        elif arg == '-':
            f = sys.stdin.buffer
            g = GzipFile(filename='', mode='wb', fileobj=sys.stdout.buffer)
        else:
            f = builtins.open(arg, 'rb')
            g = open(arg + '.gz', 'wb')
        while True:
            chunk = f.read(1024)
            if not chunk:
                break
            g.write(chunk)
        if g is not sys.stdout.buffer:
            g.close()
        while f is not sys.stdin.buffer:
            f.close()
コード例 #30
0
ファイル: test_preprocess.py プロジェクト: mick-d/nipype
def setup_infile(tmpdir):
    ext = Info.output_type_to_ext(Info.output_type())
    tmp_dir = str(tmpdir)
    tmp_infile = os.path.join(tmp_dir, 'foo' + ext)
    open(tmp_infile, 'w')

    return (tmp_infile, tmp_dir)
コード例 #31
0
            return
        (tv_sec, tv_usec, caplen,
         length) = struct.unpack(self._endian + 'IIII', hdr)
        datum = self.stream.read(caplen)
        return ((tv_sec, tv_usec, length), datum)

    def write(self, packet):
        (header, datum) = packet
        (tv_sec, tv_usec, length) = header
        hdr = struct.pack(self._endian + 'IIII', tv_sec, tv_usec, length,
                          len(datum))
        self.stream.write(hdr)
        self.stream.write(datum)

    def __iter__(self):
        return iter(self.read, None)


open = pcap
open_offline = pcap

if __name__ == '__main__':
    p = open('test.pcap', 'wb')  # Create a new file
    p.write(((0, 0, 3), b'foo'))  # Add a packet
    p.write(((0, 0, 3), b'bar'))
    del p
    p = open(builtins.open('test.pcap', 'rb'))  # Also takes file objects
    assert ((p.version, p.thiszone, p.sigfigs, p.snaplen,
             p.linktype) == ((2, 4), 0, 0, 65535, 1))
    assert ([i for i in p] == [((0, 0, 3), b'foo'), ((0, 0, 3), b'bar')])
コード例 #32
0
from builtins import open
from future import standard_library
standard_library.install_aliases()
import json
import requests

import boto3
from urllib.parse import urlparse
import sys

from elasticsearch import Elasticsearch

from grq2 import app
from grq2.lib.utils import parse_config

file = open(sys.argv[1], 'w')
client = boto3.client('s3')


def move_s3_files(url, target_path):
    parsed_url = urlparse(url)
    bucket = parsed_url.hostname.split('.', 1)[0]
    results = client.list_objects(Bucket=bucket,
                                  Delimiter='/',
                                  Prefix=parsed_url.path[1:] + '/')

    if results.get('Contents'):
        for result in results.get('Contents'):
            file_url = parsed_url.scheme + "://" + \
                parsed_url.hostname + '/' + result.get('Key')
            filename = result.get('Key').split('/')[-1]
コード例 #33
0
    def test_get_xform_list_with_shared_forms(self, mock_send_mail):
        # create user alice
        alice_data = {
            'username': '******',
            'email': '*****@*****.**',
            'password1': 'alice',
            'password2': 'alice'
        }
        alice_profile = self._create_user_profile(alice_data)

        # check that she can authenticate successfully
        request = self.factory.get('/')
        response = self.view(request)
        self.assertEqual(response.status_code, 401)
        auth = DigestAuth('alice', 'alice')
        request.META.update(auth(request.META, response))
        response = self.view(request)
        self.assertEqual(response.status_code, 200)

        self.assertFalse(
            ReadOnlyRole.user_has_role(alice_profile.user, self.project))
        # share bob's project with her
        data = {
            'username': '******',
            'role': ReadOnlyRole.name,
            'email_msg': 'I have shared the project with you'
        }
        request = self.factory.post('/', data=data, **self.extra)
        share_view = ProjectViewSet.as_view({'post': 'share'})
        projectid = self.project.pk
        response = share_view(request, pk=projectid)
        self.assertEqual(response.status_code, 204)
        self.assertTrue(mock_send_mail.called)
        self.assertTrue(
            ReadOnlyRole.user_has_role(alice_profile.user, self.project))

        request = self.factory.get('/')
        response = self.view(request)
        self.assertEqual(response.status_code, 401)
        auth = DigestAuth('alice', 'alice')
        request.META.update(auth(request.META, response))
        response = self.view(request, username='******')
        self.assertEqual(response.status_code, 200)

        path = os.path.join(os.path.dirname(__file__), '..', 'fixtures',
                            'formList.xml')

        with open(path, encoding='utf-8') as f:
            form_list_xml = f.read().strip()
            data = {"hash": self.xform.hash, "pk": self.xform.pk}
            content = response.render().content.decode('utf-8')
            self.assertEqual(content, form_list_xml % data)
            download_url = ('<downloadUrl>http://testserver/%s/'
                            'forms/%s/form.xml</downloadUrl>') % (
                                self.user.username, self.xform.id)
            # check that bob's form exists in alice's formList
            self.assertTrue(download_url in content)
            self.assertTrue(response.has_header('X-OpenRosa-Version'))
            self.assertTrue(
                response.has_header('X-OpenRosa-Accept-Content-Length'))
            self.assertTrue(response.has_header('Date'))
            self.assertEqual(response['Content-Type'],
                             'text/xml; charset=utf-8')
コード例 #34
0
    def __init__(self,
                 filename=None,
                 mode=None,
                 compresslevel=9,
                 fileobj=None,
                 mtime=None):
        """Constructor for the GzipFile class.

        At least one of fileobj and filename must be given a
        non-trivial value.

        The new class instance is based on fileobj, which can be a regular
        file, an io.BytesIO object, or any other object which simulates a file.
        It defaults to None, in which case filename is opened to provide
        a file object.

        When fileobj is not None, the filename argument is only used to be
        included in the gzip file header, which may includes the original
        filename of the uncompressed file.  It defaults to the filename of
        fileobj, if discernible; otherwise, it defaults to the empty string,
        and in this case the original filename is not included in the header.

        The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', 'wb', 'x', or
        'xb' depending on whether the file will be read or written.  The default
        is the mode of fileobj if discernible; otherwise, the default is 'rb'.
        A mode of 'r' is equivalent to one of 'rb', and similarly for 'w' and
        'wb', 'a' and 'ab', and 'x' and 'xb'.

        The compresslevel argument is an integer from 0 to 9 controlling the
        level of compression; 1 is fastest and produces the least compression,
        and 9 is slowest and produces the most compression. 0 is no compression
        at all. The default is 9.

        The mtime argument is an optional numeric timestamp to be written
        to the stream when compressing.  All gzip compressed streams
        are required to contain a timestamp.  If omitted or None, the
        current time is used.  This module ignores the timestamp when
        decompressing; however, some programs, such as gunzip, make use
        of it.  The format of the timestamp is the same as that of the
        return value of time.time() and of the st_mtime member of the
        object returned by os.stat().

        """

        if mode and ('t' in mode or 'U' in mode):
            raise ValueError("Invalid mode: {!r}".format(mode))
        if mode and 'b' not in mode:
            mode += 'b'
        if fileobj is None:
            fileobj = self.myfileobj = builtins.open(filename, mode or 'rb')
        if filename is None:
            filename = getattr(fileobj, 'name', '')
            if not isinstance(filename, (str, bytes)):
                filename = ''
        if mode is None:
            mode = getattr(fileobj, 'mode', 'rb')

        if mode.startswith('r'):
            self.mode = READ
            # Set flag indicating start of a new member
            self._new_member = True
            # Buffer data read from gzip file. extrastart is offset in
            # stream where buffer starts. extrasize is number of
            # bytes remaining in buffer from current stream position.
            self.extrabuf = b""
            self.extrasize = 0
            self.extrastart = 0
            self.name = filename
            # Starts small, scales exponentially
            self.min_readsize = 100
            fileobj = _PaddedFile(fileobj)

        elif mode.startswith(('w', 'a', 'x')):
            self.mode = WRITE
            self._init_write(filename)
            self.compress = zlib.compressobj(compresslevel, zlib.DEFLATED,
                                             -zlib.MAX_WBITS,
                                             zlib.DEF_MEM_LEVEL, 0)
        else:
            raise ValueError("Invalid mode: {!r}".format(mode))

        self.fileobj = fileobj
        self.offset = 0
        self.mtime = mtime

        if self.mode == WRITE:
            self._write_gzip_header()
コード例 #35
0
ファイル: reporting.py プロジェクト: thoward/pants
    def update_reporting(self, global_options, is_quiet, run_tracker):
        """Updates reporting config once we've parsed cmd-line flags."""

        # Get any output silently buffered in the old console reporter, and remove it.
        removed_reporter = run_tracker.report.remove_reporter('capturing')
        buffered_out = self._consume_stringio(
            removed_reporter.settings.outfile)
        buffered_err = self._consume_stringio(
            removed_reporter.settings.errfile)

        log_level = Report.log_level_from_string(global_options.level
                                                 or 'info')
        # Ideally, we'd use terminfo or somesuch to discover whether a
        # terminal truly supports color, but most that don't set TERM=dumb.
        color = global_options.colors and (os.getenv('TERM') != 'dumb')
        timing = global_options.time
        cache_stats = global_options.time  # TODO: Separate flag for this?

        if is_quiet:
            console_reporter = QuietReporter(
                run_tracker,
                QuietReporter.Settings(log_level=log_level,
                                       color=color,
                                       timing=timing,
                                       cache_stats=cache_stats))
        else:
            # Set up the new console reporter.
            stdout = sys.stdout.buffer if PY3 else sys.stdout
            stderr = sys.stderr.buffer if PY3 else sys.stderr
            settings = PlainTextReporter.Settings(
                log_level=log_level,
                outfile=stdout,
                errfile=stderr,
                color=color,
                indent=True,
                timing=timing,
                cache_stats=cache_stats,
                label_format=self.get_options().console_label_format,
                tool_output_format=self.get_options(
                ).console_tool_output_format)
            console_reporter = PlainTextReporter(run_tracker, settings)
            console_reporter.emit(buffered_out, dest=ReporterDestination.OUT)
            console_reporter.emit(buffered_err, dest=ReporterDestination.ERR)
            console_reporter.flush()
        run_tracker.report.add_reporter('console', console_reporter)

        if global_options.logdir:
            # Also write plaintext logs to a file. This is completely separate from the html reports.
            safe_mkdir(global_options.logdir)
            run_id = run_tracker.run_info.get_info('id')
            outfile = open(
                os.path.join(global_options.logdir, '{}.log'.format(run_id)),
                'wb')
            errfile = open(
                os.path.join(global_options.logdir,
                             '{}.err.log'.format(run_id)), 'wb')
            settings = PlainTextReporter.Settings(
                log_level=log_level,
                outfile=outfile,
                errfile=errfile,
                color=False,
                indent=True,
                timing=True,
                cache_stats=True,
                label_format=self.get_options().console_label_format,
                tool_output_format=self.get_options(
                ).console_tool_output_format)
            logfile_reporter = PlainTextReporter(run_tracker, settings)
            logfile_reporter.emit(buffered_out, dest=ReporterDestination.OUT)
            logfile_reporter.emit(buffered_err, dest=ReporterDestination.ERR)
            logfile_reporter.flush()
            run_tracker.report.add_reporter('logfile', logfile_reporter)

        invalidation_report = self._get_invalidation_report()
        if invalidation_report:
            run_id = run_tracker.run_info.get_info('id')
            outfile = os.path.join(self.get_options().reports_dir, run_id,
                                   'invalidation-report.csv')
            invalidation_report.set_filename(outfile)

        return invalidation_report
コード例 #36
0
def open(readonly):
    # Parse out the username from the output_conn URL.
    parsed = urlparse(params['output_conn'])
    username = parsed.username or "admin@internal"

    # Read the password from file.
    with builtins.open(params['output_password'], 'r') as fp:
        password = fp.read()
    password = password.rstrip()

    # Connect to the server.
    connection = sdk.Connection(
        url = params['output_conn'],
        username = username,
        password = password,
        ca_file = params['rhv_cafile'],
        log = logging.getLogger(),
        insecure = params['insecure'],
    )

    system_service = connection.system_service()

    # Create the disk.
    disks_service = system_service.disks_service()
    if params['disk_format'] == "raw":
        disk_format = types.DiskFormat.RAW
    else:
        disk_format = types.DiskFormat.COW
    disk = disks_service.add(
        disk = types.Disk(
            # The ID is optional.
            id = params.get('rhv_disk_uuid'),
            name = params['disk_name'],
            description = "Uploaded by virt-v2v",
            format = disk_format,
            initial_size = params['disk_size'],
            provisioned_size = params['disk_size'],
            # XXX Ignores params['output_sparse'].
            # Handling this properly will be complex, see:
            # https://www.redhat.com/archives/libguestfs/2018-March/msg00177.html
            sparse = True,
            storage_domains = [
                types.StorageDomain(
                    name = params['output_storage'],
                )
            ],
        )
    )

    # Wait till the disk is up, as the transfer can't start if the
    # disk is locked:
    disk_service = disks_service.disk_service(disk.id)
    debug("disk.id = %r" % disk.id)

    endt = time.time() + timeout
    while True:
        time.sleep(5)
        disk = disk_service.get()
        if disk.status == types.DiskStatus.OK:
            break
        if time.time() > endt:
            raise RuntimeError("timed out waiting for disk to become unlocked")

    # Get a reference to the transfer service.
    transfers_service = system_service.image_transfers_service()

    # Create a new image transfer, using the local host is possible.
    host = find_host(connection) if params['rhv_direct'] else None
    transfer = transfers_service.add(
        types.ImageTransfer(
            disk = types.Disk(id = disk.id),
            host = host,
            inactivity_timeout = 3600,
        )
    )
    debug("transfer.id = %r" % transfer.id)

    # Get a reference to the created transfer service.
    transfer_service = transfers_service.image_transfer_service(transfer.id)

    # After adding a new transfer for the disk, the transfer's status
    # will be INITIALIZING.  Wait until the init phase is over. The
    # actual transfer can start when its status is "Transferring".
    endt = time.time() + timeout
    while True:
        transfer = transfer_service.get()
        if transfer.phase != types.ImageTransferPhase.INITIALIZING:
            break
        if time.time() > endt:
            transfer_service.cancel()
            raise RuntimeError("timed out waiting for transfer status "
                               "!= INITIALIZING")
        time.sleep(5)

    # Now we have permission to start the transfer.
    if params['rhv_direct']:
        if transfer.transfer_url is None:
            transfer_service.cancel()
            raise RuntimeError("direct upload to host not supported, "
                               "requires ovirt-engine >= 4.2 and only works "
                               "when virt-v2v is run within the oVirt/RHV "
                               "environment, eg. on an oVirt node.")
        destination_url = urlparse(transfer.transfer_url)
    else:
        destination_url = urlparse(transfer.proxy_url)

    if destination_url.scheme == "https":
        context = \
            ssl.create_default_context(purpose = ssl.Purpose.SERVER_AUTH,
                                       cafile = params['rhv_cafile'])
        if params['insecure']:
            context.check_hostname = False
            context.verify_mode = ssl.CERT_NONE
        http = HTTPSConnection(
            destination_url.hostname,
            destination_url.port,
            context = context
        )
    elif destination_url.scheme == "http":
        http = HTTPConnection(
            destination_url.hostname,
            destination_url.port,
        )
    else:
        transfer_service.cancel()
        raise RuntimeError("unknown URL scheme (%s)" % destination_url.scheme)

    # The first request is to fetch the features of the server.

    # Authentication was needed only for GET and PUT requests when
    # communicating with old imageio-proxy.
    needs_auth = not params['rhv_direct']

    can_flush = False
    can_trim = False
    can_zero = False
    unix_socket = None

    http.request("OPTIONS", destination_url.path)
    r = http.getresponse()
    data = r.read()

    if r.status == 200:
        # New imageio never needs authentication.
        needs_auth = False

        j = json.loads(data)
        can_flush = "flush" in j['features']
        can_trim = "trim" in j['features']
        can_zero = "zero" in j['features']
        unix_socket = j.get('unix_socket')

    # Old imageio servers returned either 405 Method Not Allowed or
    # 204 No Content (with an empty body).  If we see that we leave
    # all the features as False and they will be emulated.
    elif r.status == 405 or r.status == 204:
        pass

    else:
        transfer_service.cancel()
        raise RuntimeError("could not use OPTIONS request: %d: %s" %
                           (r.status, r.reason))

    debug("imageio features: flush=%r trim=%r zero=%r unix_socket=%r" %
          (can_flush, can_trim, can_zero, unix_socket))

    # If we are connected to imageio on the local host and the
    # transfer features a unix_socket then we can reconnect to that.
    if host is not None and unix_socket is not None:
        try:
            http = UnixHTTPConnection(unix_socket)
        except Exception as e:
            # Very unlikely failure, but we can recover by using the https
            # connection.
            debug("cannot create unix socket connection, using https: %s" % e)
        else:
            debug("optimizing connection using unix socket %r" % unix_socket)

    # Save everything we need to make requests in the handle.
    return {
        'can_flush': can_flush,
        'can_trim': can_trim,
        'can_zero': can_zero,
        'connection': connection,
        'disk': disk,
        'disk_service': disk_service,
        'failed': False,
        'highestwrite': 0,
        'http': http,
        'needs_auth': needs_auth,
        'path': destination_url.path,
        'transfer': transfer,
        'transfer_service': transfer_service,
    }
コード例 #37
0
ファイル: restqa.py プロジェクト: 0bugz/zb-restqa
    def run_tests(self):

        test_suite_files_expr = "{}/*_test_suite.yml".format(
            self.test_suite_dir)
        suites = glob.glob(test_suite_files_expr)
        for suite in suites:
            with open(suite, 'r') as yaml_in:
                suite_settings = yaml.safe_load(yaml_in)
                suite_settings = suite_settings["suite"]
                suite_base_url = suite_settings.get("base_url", None)
                print("suite_base_url: {}".format(suite_base_url))
                suite_name = suite_settings.get("name", None)
                suite_setup = suite_settings.get("setup", None)
                suite_teardown = suite_settings.get("teardown", None)
                print("suite: {}::setup_function: {}::teardown_function: {}".
                      format(suite_name, suite_setup, suite_teardown))
                try:
                    ctxt = {}
                    setup_function = self.function_dict.get(
                        suite_setup, none_function)
                    ctxt = setup_function(ctxt)
                    tests = suite_settings.get("tests", [])
                    for test_settings in tests:
                        try:
                            test_name = test_settings.get("name", "None")
                            test_json_payload = test_settings.get(
                                "json", False)
                            test_http_headers = test_settings.get(
                                "headers", [])
                            test_http_method = test_settings.get(
                                "method", "GET")
                            test_http_path = test_settings.get("path", None)
                            test_http_payload = test_settings.get(
                                "payload", None)
                            test_pre = test_settings.get("pre_test", None)
                            test_post = test_settings.get("post_test", None)
                            test_assertions = test_settings.get(
                                "assertions", None)
                            headers = {}
                            for test_http_header in test_http_headers:
                                for header_name, header_value in test_http_header.items(
                                ):
                                    headers[header_name] = header_value
                            optional_params = {"headers": headers}
                            if test_http_method == "GET" and test_http_payload != None:
                                optional_params["params"] = test_http_payload
                            if test_http_method == "POST":
                                if test_http_payload != None:
                                    if test_json_payload:
                                        optional_params["json"] = json.dumps(
                                            test_http_payload)
                                    else:
                                        optional_params[
                                            "data"] = test_http_payload
                            request_url = "{}{}".format(
                                suite_base_url, test_http_path)
                            print("request_url: {}".format(request_url))
                            test_pre_function = self.function_dict.get(
                                test_pre, none_function)
                            ctxt = test_pre_function(ctxt)
                            try:
                                resp = requests.request(
                                    test_http_method, request_url,
                                    **optional_params)
                                resp = resp.json()
                                locals = {"ctxt": ctxt, "resp": resp}
                                for expression in test_assertions:
                                    matches = re.findall(
                                        JSON_SELECTOR_REGEX, expression)
                                    print(
                                        "expression: {}/properties: {}".format(
                                            expression, matches))
                                    for match in matches:
                                        replace_expr = match[0].replace(
                                            'resp\.', '')
                                        replace_expr = "{}'{}'{}".format(
                                            "jmespath.search(", replace_expr,
                                            ", resp)")
                                        expression = expression.replace(
                                            match[0], replace_expr)
                                    print("New expression: {}".format(
                                        expression))
                                    eval(expression, None, locals)
                            except Exception as e2:
                                print(
                                    "Exception occured while executing suite: {} / test: {} / {}"
                                    .format(suite_name, test_name, e2))
                            test_post_function = self.function_dict.get(
                                test_post, none_function)
                            ctxt = test_post_function(ctxt)
                        except Exception as e1:
                            print(
                                "Exception occured while executing suite: {} / test: {} / {}"
                                .format(suite_name, test_name, e1))
                    teardown_function = self.function_dict.get(
                        suite_teardown, none_function)
                    ctxt = teardown_function(ctxt)
                except Exception as e:
                    print(
                        "Exception occured while executing test suite: {}/{}".
                        format(suite_name, e))
コード例 #38
0
ファイル: python_eval.py プロジェクト: hrfuller/pants
    def _compile_target(self, vt):
        """'Compiles' a python target.

    'Compiling' means forming an isolated chroot of its sources and transitive deps and then
    attempting to import each of the target's sources in the case of a python library or else the
    entry point in the case of a python binary.

    For a library with sources lib/core.py and lib/util.py a "compiler" main file would look like:

      if __name__ == '__main__':
        import lib.core
        import lib.util

    For a binary with entry point lib.bin:main the "compiler" main file would look like:

      if __name__ == '__main__':
        from lib.bin import main

    In either case the main file is executed within the target chroot to reveal missing BUILD
    dependencies.
    """
        target = vt.target
        with self.context.new_workunit(name=target.address.spec):
            modules = self._get_modules(target)
            if not modules:
                # Nothing to eval, so a trivial compile success.
                return 0

            interpreter = self._get_interpreter_for_target_closure(target)
            reqs_pex = self._resolve_requirements_for_versioned_target_closure(
                interpreter, vt)
            srcs_pex = self._source_pex_for_versioned_target_closure(
                interpreter, vt)

            # Create the executable pex.
            exec_pex_parent = os.path.join(self.workdir, 'executable_pex')
            executable_file_content = self._get_executable_file_content(
                exec_pex_parent, modules)

            hasher = hashlib.sha1()
            hasher.update(reqs_pex.path().encode('utf-8'))
            hasher.update(srcs_pex.path().encode('utf-8'))
            hasher.update(executable_file_content.encode('utf-8'))
            exec_file_hash = hasher.hexdigest()
            exec_pex_path = os.path.realpath(
                os.path.join(exec_pex_parent, exec_file_hash))
            if not os.path.isdir(exec_pex_path):
                with safe_concurrent_creation(exec_pex_path) as safe_path:
                    # Write the entry point.
                    safe_mkdir(safe_path)
                    with open(
                            os.path.join(safe_path,
                                         '{}.py'.format(self._EXEC_NAME)),
                            'w') as outfile:
                        outfile.write(executable_file_content)
                    pex_info = (target.pexinfo if isinstance(
                        target, PythonBinary) else None) or PexInfo()
                    # Override any user-specified entry point, under the assumption that the
                    # executable_file_content does what the user intends (including, probably, calling that
                    # underlying entry point).
                    pex_info.entry_point = self._EXEC_NAME
                    pex_info.pex_path = ':'.join(pex.path()
                                                 for pex in (reqs_pex,
                                                             srcs_pex) if pex)
                    builder = PEXBuilder(safe_path,
                                         interpreter,
                                         pex_info=pex_info)
                    builder.freeze(bytecode_compile=False)

            pex = PEX(exec_pex_path, interpreter)

            with self.context.new_workunit(
                    name='eval',
                    labels=[
                        WorkUnitLabel.COMPILER, WorkUnitLabel.RUN,
                        WorkUnitLabel.TOOL
                    ],
                    cmd=' '.join(pex.cmdline())) as workunit:
                returncode = pex.run(stdout=workunit.output('stdout'),
                                     stderr=workunit.output('stderr'))
                workunit.set_outcome(WorkUnit.SUCCESS if returncode ==
                                     0 else WorkUnit.FAILURE)
                if returncode != 0:
                    self.context.log.error('Failed to eval {}'.format(
                        target.address.spec))
                return returncode
コード例 #39
0
ファイル: data.py プロジェクト: song-heng/where-image2
 def minimal_save(self, data_save_dir):
     with open(data_save_dir+'/vocab.json', 'w', encoding='utf-8') as f:
         print(str(json.dumps(self.vocab)), file=f)
コード例 #40
0
 def _write_text(self, text):
     with open(str(self), 'w') as f:
         f.write(text)
コード例 #41
0
ファイル: objdump_wrapper.py プロジェクト: razaina/hexag00n
    def disasm_packet_raw(self, packet_bytes, addr):
        """Disassemble an Hexagon packet.

        As each call to objdump is made in isolation, there's no point in disassembling
        each instruction of a packet separately, because information such as constant extension
        would be lost, therefore entire packets are handled.

        Packets can have 1-4 instructions, therefore the length of `packet_bytes` should
        be between 4-16 bytes (multiple of 4).

        Args:
            packet_bytes (str): bytes of the instruction packet.
            addr (int): address of the start of the packet.

        Returns:
            str: text representation of the disassembled instruction.

        """
        if (addr not in self.inst_cache):

            if len(packet_bytes) < 4:
                raise Exception("Received less tha 4 bytes: {:d}".format(
                    len(packet_bytes)))

            # For some reason objdump in binary mode is not correctly processing an
            # all zeros instruction, so this special case is handled here as an
            # unknown instruction (which is the behavior of objdump in non-binary mode).

            if struct.unpack('<I', packet_bytes[0:4])[0] == 0:
                return "{ <unknown> }"

            # Write temporary file with the packet instructions bytes.

            with open(self.dump_binary_path, 'wb') as f:
                f.write(packet_bytes)
                f.close()

            si = subprocess.STARTUPINFO()
            si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
            detached_process_flag = 0x00000008

            stdoutdata = subprocess.Popen(
                [
                    self.objdump_path,
                    "--disassemble-all",  # Display assembler contents of all sections
                    "-b",
                    "binary",  # Specify the target object format as BFDNAME
                    "-mhexagon",  # Specify the target architecture
                    "--adjust-vma=0x{:x}".format(
                        addr),  # Add OFFSET to all displayed section addresses
                    "--no-show-raw-insn",  # Don't display hex alongside symbolic disassembly
                    self.dump_binary_path,
                ],
                stderr=subprocess.STDOUT,
                stdout=subprocess.PIPE,
                startupinfo=si,
                creationflags=detached_process_flag).communicate()[0]

            self.populate_inst_cache(stdoutdata)

        return self.inst_cache[addr]
コード例 #42
0
    def __init__(self, file, header_fields=(), dtype=None):
        """Initialize a new instance.

        Parameters
        ----------
        file : file-like or str
            Stream or filename from which to read the data. A stream
            must to be open in ``'rb'`` mode.
        header_fields : sequence of dicts, optional
            Definition of the fields in the header (per row), each
            containing key-value pairs for the following keys:

            - ``'name'`` : Label for the field.
            - ``'offset'`` : Start of the field in bytes.
            - ``'size'`` : Size of the field in bytes.
            - ``'dtype'`` : Data type in Numpy- or Numpy-readable format.
            - ``'dshape'`` (optional) : The array of values is reshaped to
              this shape.
            - ``'description'`` (optional) : A human-readable description
              of the field.

            If empty, the header size is set 0, and the data type is
            assumed to be ``dtype``. Use this in conjunction with
            parametrized `read_data` to bypass the header and read
            arbitrary data portions.

        dtype : optional
            Data type of the file's data block. It must be understood by
            the `numpy.dtype` constructor. By default, the data type
            is determined from the file header, or, if no information
            is available there, it is set to ``np.dtype(float)``.

        See Also
        --------
        header_fields_from_table :
            Function to parse a specification table, returning a field
            sequence usable as ``header_fields`` parameter.
        """
        # Pick out the file object from temp files
        file = getattr(file, 'file', file)
        # Need those attrs in subsequent code
        file_attrs = ('mode', 'seek', 'read', 'readinto', 'close')
        is_file = all(hasattr(file, attr) for attr in file_attrs)
        if is_file:
            self.__file = file
            self.__owns_file = False
        else:
            self.__file = open(file, 'rb', buffering=0)
            self.__owns_file = True

        if 'b' not in self.file.mode:
            raise ValueError("`file` must be opened in binary mode, "
                             "but mode 'is {}'".format(self.file.mode))

        try:
            iter(header_fields)
        except TypeError:
            raise TypeError('`header_fields` must be iterable, got '
                            '{!r}'.format(header_fields))
        self.__header_fields = header_fields

        # Set default values for some attributes
        self._init_data_dtype = np.dtype(dtype)
        self.__header = OrderedDict()
コード例 #43
0
    def test_hermetic_binary_cache_with_dependencies(self):
        file_abs_path = os.path.join(
            get_buildroot(),
            'examples/src/scala/org/pantsbuild/example/hello/exe/Exe.scala')

        with temporary_dir() as cache_dir:
            config = {
                'cache.compile.zinc': {
                    'write_to': [cache_dir]
                },
                'compile.zinc': {
                    'execution_strategy': 'hermetic',
                    'use_classpath_jars': False,
                    'incremental': False,
                }
            }

            with self.temporary_workdir() as workdir:
                pants_run = self.run_pants_with_workdir(
                    [
                        '-q',
                        'run',
                        'examples/src/scala/org/pantsbuild/example/hello/exe',
                    ],
                    workdir,
                    config,
                )
                self.assert_success(pants_run)
                self.assertIn(
                    'Num args passed: 0. Stand by for welcome...\nHello, Resource World!',
                    pants_run.stdout_data,
                )

                compile_dir = os.path.join(workdir, 'compile', 'zinc',
                                           'current')

                for path_suffix in [
                        'examples.src.scala.org.pantsbuild.example.hello.exe.exe/current/classes/org/pantsbuild/example/hello/exe/Exe.class',
                        'examples.src.scala.org.pantsbuild.example.hello.welcome.welcome/current/classes/org/pantsbuild/example/hello/welcome/WelcomeEverybody.class',
                ]:
                    path = os.path.join(compile_dir, path_suffix)
                    self.assertTrue(os.path.exists(path),
                                    "Want path {} to exist".format(path))

                with with_overwritten_file_content(file_abs_path):

                    new_temp_test = '''package org.pantsbuild.example.hello.exe
                              
                              import java.io.{BufferedReader, InputStreamReader}
                              
                              import org.pantsbuild.example.hello.welcome
                              
                              // A simple jvm binary to illustrate Scala BUILD targets
                              
                              object Exe {
                                /** Test that resources are properly namespaced. */
                                def getWorld: String = {
                                  val is =
                                    this.getClass.getClassLoader.getResourceAsStream(
                                      "org/pantsbuild/example/hello/world.txt"
                                    )
                                  try {
                                    new BufferedReader(new InputStreamReader(is)).readLine()
                                  } finally {
                                    is.close()
                                  }
                                }
                              
                                def main(args: Array[String]) {
                                  println("Num args passed: " + args.size + ". Stand by for welcome...")
                                  if (args.size <= 0) {
                                    println("Hello, and welcome to " + getWorld + "!")
                                  } else {
                                    val w = welcome.WelcomeEverybody(args)
                                    w.foreach(s => println(s))
                                  }
                                }
                              }'''

                    with open(file_abs_path, 'w') as f:
                        f.write(new_temp_test)

                    pants_run = self.run_pants_with_workdir(
                        [
                            '-q',
                            'run',
                            'examples/src/scala/org/pantsbuild/example/hello/exe',
                        ],
                        workdir,
                        config,
                    )
                    self.assert_success(pants_run)
                    self.assertIn(
                        'Num args passed: 0. Stand by for welcome...\nHello, and welcome to Resource World!',
                        pants_run.stdout_data,
                    )

                    compile_dir = os.path.join(workdir, 'compile', 'zinc',
                                               'current')

                    for path_suffix in [
                            'examples.src.scala.org.pantsbuild.example.hello.exe.exe/current/classes/org/pantsbuild/example/hello/exe/Exe.class',
                            'examples.src.scala.org.pantsbuild.example.hello.welcome.welcome/current/classes/org/pantsbuild/example/hello/welcome/WelcomeEverybody.class',
                    ]:
                        path = os.path.join(compile_dir, path_suffix)
                        self.assertTrue(os.path.exists(path),
                                        "Want path {} to exist".format(path))
コード例 #44
0
def processing_std_vgt(res_queue,
                       pipeline_run_level=0,
                       pipeline_printout_level=0,
                       pipeline_printout_graph_level=0,
                       prod='',
                       starting_sprod='',
                       mapset='',
                       version='',
                       starting_dates=None,
                       update_stats=False,
                       nrt_products=True,
                       write2file=None,
                       logfile=None,
                       touch_only=False,
                       upsert_db=False):
    spec_logger = log.my_logger(logfile)
    spec_logger.info("Entering routine %s" % 'processing_std_vgt')

    proc_lists = None

    proc_lists = create_pipeline(prod=prod,
                                 starting_sprod=starting_sprod,
                                 mapset=mapset,
                                 version=version,
                                 starting_dates=starting_dates,
                                 proc_lists=proc_lists,
                                 update_stats=update_stats,
                                 nrt_products=nrt_products)

    if write2file is not None:
        fwrite_id = open(write2file, 'w')
    else:
        fwrite_id = None

    if upsert_db:
        tasks = pipeline_get_task_names()
        spec_logger.info("Updating DB for the pipeline %s" % tasks[0])
        # Get input product info
        input_product_info = querydb.get_product_out_info(
            allrecs=False,
            productcode=prod,
            subproductcode=starting_sprod,
            version=version)

        for my_sprod in proc_lists.list_subprods:
            # my_sprod.print_out()
            status = querydb.update_processing_chain_products(
                prod, version, my_sprod, input_product_info)

        spec_logger.info("Updating DB Done - Exit")
        # return proc_lists

    if pipeline_run_level > 0:
        spec_logger.info("Run the pipeline %s" % 'processing_std_vgt')
        pipeline_run(
            touch_files_only=touch_only,
            verbose=pipeline_run_level,
            logger=spec_logger,
            log_exceptions=spec_logger,
            history_file='/eStation2/log/.ruffus_history_{0}_{1}.sqlite'.
            format(prod, starting_sprod))
        tasks = pipeline_get_task_names()
        spec_logger.info("Run the pipeline %s" % tasks[0])
        spec_logger.info("After running the pipeline %s" %
                         'processing_std_vgt')

    if pipeline_printout_level > 0:
        pipeline_printout(
            verbose=pipeline_printout_level,
            output_stream=fwrite_id,
            history_file='/eStation2/log/.ruffus_history_{0}_{1}.sqlite'.
            format(prod, starting_sprod))

    if pipeline_printout_graph_level > 0:
        pipeline_printout_graph('flowchart.jpg')

    if write2file is not None:
        fwrite_id.close()

    # res_queue.put(proc_lists)
    return True
コード例 #45
0
def test_fnirt(setup_flirt):

    tmpdir, infile, reffile = setup_flirt
    os.chdir(tmpdir)
    fnirt = fsl.FNIRT()
    assert fnirt.cmd == 'fnirt'

    # Test list parameters
    params = [('subsampling_scheme', '--subsamp', [4, 2, 2, 1], '4,2,2,1'),
              ('max_nonlin_iter', '--miter', [4, 4, 4, 2], '4,4,4,2'),
              ('ref_fwhm', '--reffwhm', [4, 2, 2, 0], '4,2,2,0'),
              ('in_fwhm', '--infwhm', [4, 2, 2, 0], '4,2,2,0'),
              ('apply_refmask', '--applyrefmask', [0, 0, 1, 1], '0,0,1,1'),
              ('apply_inmask', '--applyinmask', [0, 0, 0, 1], '0,0,0,1'),
              ('regularization_lambda', '--lambda', [0.5, 0.75], '0.5,0.75'),
              ('intensity_mapping_model', '--intmod', 'global_non_linear',
               'global_non_linear')]
    for item, flag, val, strval in params:
        fnirt = fsl.FNIRT(in_file=infile, ref_file=reffile, **{item: val})
        log = fnirt._gen_fname(infile, suffix='_log.txt', change_ext=False)
        iout = fnirt._gen_fname(infile, suffix='_warped')
        if item in ('max_nonlin_iter'):
            cmd = 'fnirt --in=%s '\
                  '--logout=%s'\
                  ' %s=%s --ref=%s'\
                  ' --iout=%s' % (infile, log,
                                  flag, strval, reffile, iout)
        elif item in ('in_fwhm', 'intensity_mapping_model'):
            cmd = 'fnirt --in=%s %s=%s --logout=%s '\
                  '--ref=%s --iout=%s' % (infile, flag,
                                          strval, log, reffile, iout)
        elif item.startswith('apply'):
            cmd = 'fnirt %s=%s '\
                  '--in=%s '\
                  '--logout=%s '\
                  '--ref=%s --iout=%s' % (flag, strval,
                                          infile, log,
                                          reffile,
                                          iout)

        else:
            cmd = 'fnirt '\
                  '--in=%s --logout=%s '\
                  '--ref=%s %s=%s --iout=%s' % (infile, log,
                                                reffile,
                                                flag, strval,
                                                iout)
        assert fnirt.cmdline == cmd

    # Test ValueError is raised when missing mandatory args
    fnirt = fsl.FNIRT()
    with pytest.raises(ValueError):
        fnirt.run()
    fnirt.inputs.in_file = infile
    fnirt.inputs.ref_file = reffile
    intmap_basename = '%s_intmap' % fsl.FNIRT.intensitymap_file_basename(
        infile)
    intmap_image = fsl_name(fnirt, intmap_basename)
    intmap_txt = '%s.txt' % intmap_basename
    # doing this to create the file to pass tests for file existence
    with open(intmap_image, 'w'):
        pass
    with open(intmap_txt, 'w'):
        pass

    # test files
    opt_map = [('affine_file', '--aff=%s' % infile, infile),
               ('inwarp_file', '--inwarp=%s' % infile, infile),
               ('in_intensitymap_file', '--intin=%s' % intmap_basename,
                [intmap_image]),
               ('in_intensitymap_file', '--intin=%s' % intmap_basename,
                [intmap_image, intmap_txt]),
               ('config_file', '--config=%s' % infile, infile),
               ('refmask_file', '--refmask=%s' % infile, infile),
               ('inmask_file', '--inmask=%s' % infile, infile),
               ('field_file', '--fout=%s' % infile, infile),
               ('jacobian_file', '--jout=%s' % infile, infile),
               ('modulatedref_file', '--refout=%s' % infile, infile),
               ('out_intensitymap_file', '--intout=%s' % intmap_basename,
                True),
               ('out_intensitymap_file',
                '--intout=%s' % intmap_basename, intmap_image),
               ('fieldcoeff_file', '--cout=%s' % infile, infile),
               ('log_file', '--logout=%s' % infile, infile)]

    for (name, settings, arg) in opt_map:
        fnirt = fsl.FNIRT(in_file=infile, ref_file=reffile, **{name: arg})

        if name in ('config_file', 'affine_file', 'field_file',
                    'fieldcoeff_file'):
            cmd = 'fnirt %s --in=%s '\
                  '--logout=%s '\
                  '--ref=%s --iout=%s' % (settings, infile, log,
                                          reffile, iout)
        elif name in ('refmask_file'):
            cmd = 'fnirt --in=%s '\
                  '--logout=%s --ref=%s '\
                  '%s '\
                  '--iout=%s' % (infile, log,
                                 reffile,
                                 settings,
                                 iout)
        elif name in ('in_intensitymap_file', 'inwarp_file', 'inmask_file',
                      'jacobian_file'):
            cmd = 'fnirt --in=%s '\
                  '%s '\
                  '--logout=%s --ref=%s '\
                  '--iout=%s' % (infile,
                                 settings,
                                 log,
                                 reffile,
                                 iout)
        elif name in ('log_file'):
            cmd = 'fnirt --in=%s '\
                  '%s --ref=%s '\
                  '--iout=%s' % (infile,
                                 settings,
                                 reffile,
                                 iout)
        else:
            cmd = 'fnirt --in=%s '\
                  '--logout=%s %s '\
                  '--ref=%s --iout=%s' % (infile, log,
                                          settings,
                                          reffile, iout)

        assert fnirt.cmdline == cmd

        if name == 'out_intensitymap_file':
            assert fnirt._list_outputs()['out_intensitymap_file'] == [
                intmap_image, intmap_txt
            ]
コード例 #46
0
ファイル: __init__.py プロジェクト: stinos/pycopy-lib
def urandom(n):
    import builtins
    with builtins.open("/dev/urandom", "rb") as f:
        return f.read(n)
コード例 #47
0
    def _parseStructureFile(self, structfilePath=None, structfileString=None):
        if structfilePath is None and structfileString is None:
            raise ValueError("No structure file path or string given")

        indent = None
        patternsStack = []
        endpoints = {}
        lastIndent = 0

        # TODO: maybe custom exception class?:
        def error(msg, line, linenum):
            print('Error in structure file "{}", line {}:\n{}\n{}'.format(
                structfilePath, linenum + 1, line, msg),
                  file=sys.stderr)
            raise ValueError('Error on line {} in file "{}"'.format(
                linenum, structfilePath))

        linePattern = re.compile(r"(\s*)(.*)")  # groups: indent, content
        importLinePattern = re.compile(
            r"^(?:from\s+.+\s+)?(?:import\s+.+\s*)(?:as\s+.+\s*)?$")
        contentPattern = re.compile(r"(?:([A-z]\w*):\s?)?(.+)"
                                    )  # groups: endpointName, endpointPattern
        with open(structfilePath, encoding="utf-8"
                  ) if structfilePath else io.StringIO(structfileString) as f:
            # TODO: more descriptive errors?
            # TODO: show neighboring lines and highlight error
            # TODO: ensure endpoint names are valid Python identifiers, and don't conflict with iyore terms (path)

            for linenum, line in enumerate(f):
                # split indentation and content
                try:
                    ind, content = linePattern.match(line).groups()
                except AttributeError:
                    error("Unparseable line", line, linenum)

                if content == "":
                    # skip blank lines
                    # TODO: maybe only allow if ind == "" as well?
                    continue

                if content[0] == "#":
                    # allow comments on their own line
                    # TODO: allow inline comments
                    continue

                # split (possible) endpoint name, pattern
                try:
                    name, pattern = contentPattern.match(content).groups()
                except AttributeError:
                    error("Unparseable entry", line, linenum)

                # parse pattern
                try:
                    pattern = Pattern(pattern)
                except ValueError as e:
                    error(e.args[0], line, linenum)

                # determine indentation format from first lines
                if indent is None:
                    if ind == "":
                        currentIndent = 0
                    else:
                        if patternsStack == []:
                            error("Unexpected indent in first entry", line,
                                  linenum)
                        else:
                            indent = ind
                            currentIndent = 1
                # check indentation and figure out indentation level
                else:
                    currentIndent = len(ind) / len(indent)
                    currentIndint = int(currentIndent)
                    if currentIndent != currentIndint:
                        error(
                            "Inconsistent indent width: expected a multiple of {} whitespace characters, instead found {}"
                            .format(len(indent), len(ind)), line, linenum)
                    else:
                        currentIndent = currentIndint
                    if currentIndent * indent != ind:
                        error(
                            "Inconsistent indent characters: expected {}s, found a {}"
                            .format("space" if indent[0] == " " else "tab",
                                    "tab" if indent[0] == " " else "space"),
                            line, linenum)

                # based on indentation, modify pattern stack
                if currentIndent == 0:
                    patternsStack[:] = [pattern]
                elif currentIndent == lastIndent + 1:
                    patternsStack.append(pattern)
                elif currentIndent == lastIndent:
                    patternsStack[-1] = pattern
                elif currentIndent < lastIndent:
                    patternsStack = patternsStack[:currentIndent]
                    patternsStack.append(pattern)
                else:
                    error(
                        "Too many indents: previous line was indented {} times, so this can be indented at most {} times, but found {} indents"
                        .format(lastIndent, lastIndent + 1,
                                currentIndent), line, linenum)

                lastIndent = currentIndent

                # if a name is given, register (a copy of) the current pattern stack as an endpoint
                # TODO: multiple leaf patterns
                if name is not None:
                    if name in endpoints:
                        error(
                            "The endpoint '{}' already exists, try a different name"
                            .format(name), line, linenum)
                    else:
                        endpoints[name] = Endpoint(list(patternsStack),
                                                   self.base)

        return endpoints
コード例 #48
0
	sample_chi2_distrib = np.random.chisquare(size=100,df=1)
	binning = np.linspace(0,20,31)


	for llh_name in args.list:
		print(('plotting ',llh_name))

		assert llh_name in ['modchi2','SAY','dima','glu2','barlow'],'ERROR: Available likelihood functions are: glu2 modchi2 dima SAY barlow'

		pckl_name = args.input_stem+'_'+llh_name+'.pckl'
		assert os.path.isfile(pckl_name),'ERROR: file %s not found'%(pckl_name)

		#
		# Load the pickle file containing information about pseudo-experiments
		#
		indata = pickle.load(open(pckl_name))

		container_TS_truth_high = []
		container_TS_truth_low = []
		container_TS_lowstat = []
		container_TS_highstat = []
		bias = []

		val_truth = 20.
		container_val_lowstat = []
		container_val_highstat = []

		for pseudo_exp in indata:

			val_low = pseudo_exp['lowstats_opt']['x']
			val_high =pseudo_exp['highstats_opt']['x']
コード例 #49
0
ファイル: files.py プロジェクト: uasau/midgard
    def download_file(
        self,
        file_key: str,
        file_vars: Optional[Dict[str, str]] = None,
        file_path: Optional[pathlib.Path] = None,
        create_dirs: bool = True,
        **path_args: Any,
    ) -> Optional[pathlib.Path]:
        """Download a file from the web and save it to disk

        Use pycurl (libcurl) to do the actual downloading. Requests might be
        nicer for this, but turned out to be much slower (and in practice
        unusable for bigger files) and also not really supporting
        ftp-downloads.

        Args:
            file_key:     File key that should be downloaded.
            file_vars:    File variables used to find path from file_key.
            file_path:    Path where file will be saved, default is to read from configuration.
            create_dirs:  Create directories as necessary before downloading file.
            path_args:    Arguments passed on to .path() to find file_path.

        Returns:
            Path to downloaded file, None if no file was downloaded.
        """
        # Do not download anything if download_missing class variable is False
        if not self.download_missing:
            return None

        # Do not download anything if url is not given in configuration
        if "url" not in self[file_key] or not self[file_key].url.str:
            return None

        # Get file_path from configuration if it's not given explicitly
        file_url = self.url(file_key, file_vars=file_vars, **path_args)
        if file_path is None:
            file_path = self.path(file_key, file_vars=file_vars, download_missing=False, **path_args)
            file_path = file_path.with_name(file_url.name)
        if create_dirs:
            file_path.parent.mkdir(parents=True, exist_ok=True)

        log.info(f"Download {file_key} from '{file_url}' to '{file_path}'")
        with builtins.open(file_path, mode="wb") as fid:
            c = pycurl.Curl()
            c.setopt(c.URL, file_url)
            c.setopt(c.WRITEDATA, fid)
            try:
                c.perform()
                if not (200 <= c.getinfo(c.HTTP_CODE) <= 299):
                    raise pycurl.error()
            except pycurl.error:
                log.error(f"Problem downloading file: {c.getinfo(c.EFFECTIVE_URL)} ({c.getinfo(c.HTTP_CODE)})")
                if file_path.exists():  # Print first 10 lines to console
                    head_of_file = f"Contents of '{file_path}':\n" + "\n".join(file_path.read_text().split("\n")[:10])
                    log.info(console.indent(head_of_file, num_spaces=8))
                    file_path.unlink()
                log.warn(f"Try to download '{file_url}' manually and save it at '{file_path}'")
            else:
                log.info(f"Done downloading {file_key}")
            finally:
                c.close()
        return file_path
コード例 #50
0
ファイル: utils.py プロジェクト: agapow/datasan
def read_csv_as_dicts(in_pth):
    with open(in_pth, 'rU') as in_hndl:
        rdr = csv.DictReader(in_hndl)
        return [r for r in rdr]
コード例 #51
0
ファイル: aifc.py プロジェクト: kagada/Arianrhod
 def __init__(self, f):
     if isinstance(f, str):
         f = builtins.open(f, 'rb')
     # else, assume it is an open file object already
     self.initfp(f)
コード例 #52
0
ファイル: core.py プロジェクト: naveau/nipype
def run_command(runtime, output=None, timeout=0.01):
    """Run a command, read stdout and stderr, prefix with timestamp.

    The returned runtime contains a merged stdout+stderr log with timestamps
    """

    # Init variables
    cmdline = runtime.cmdline
    env = _canonicalize_env(runtime.environ)

    errfile = None
    outfile = None
    stdout = sp.PIPE
    stderr = sp.PIPE

    if output == 'file':
        outfile = os.path.join(runtime.cwd, 'output.nipype')
        stdout = open(outfile, 'wb')  # t=='text'===default
        stderr = sp.STDOUT
    elif output == 'file_split':
        outfile = os.path.join(runtime.cwd, 'stdout.nipype')
        stdout = open(outfile, 'wb')
        errfile = os.path.join(runtime.cwd, 'stderr.nipype')
        stderr = open(errfile, 'wb')
    elif output == 'file_stdout':
        outfile = os.path.join(runtime.cwd, 'stdout.nipype')
        stdout = open(outfile, 'wb')
    elif output == 'file_stderr':
        errfile = os.path.join(runtime.cwd, 'stderr.nipype')
        stderr = open(errfile, 'wb')

    proc = sp.Popen(
        cmdline,
        stdout=stdout,
        stderr=stderr,
        shell=True,
        cwd=runtime.cwd,
        env=env,
        close_fds=True,
    )

    result = {
        'stdout': [],
        'stderr': [],
        'merged': [],
    }

    if output == 'stream':
        streams = [
            Stream('stdout', proc.stdout),
            Stream('stderr', proc.stderr)
        ]

        def _process(drain=0):
            try:
                res = select.select(streams, [], [], timeout)
            except select.error as e:
                iflogger.info(e)
                if e[0] == errno.EINTR:
                    return
                else:
                    raise
            else:
                for stream in res[0]:
                    stream.read(drain)

        while proc.returncode is None:
            proc.poll()
            _process()

        _process(drain=1)

        # collect results, merge and return
        result = {}
        temp = []
        for stream in streams:
            rows = stream._rows
            temp += rows
            result[stream._name] = [r[2] for r in rows]
        temp.sort()
        result['merged'] = [r[1] for r in temp]

    if output.startswith('file'):
        proc.wait()
        if outfile is not None:
            stdout.flush()
            stdout.close()
            with open(outfile, 'rb') as ofh:
                stdoutstr = ofh.read()
            result['stdout'] = read_stream(stdoutstr, logger=iflogger)
            del stdoutstr

        if errfile is not None:
            stderr.flush()
            stderr.close()
            with open(errfile, 'rb') as efh:
                stderrstr = efh.read()
            result['stderr'] = read_stream(stderrstr, logger=iflogger)
            del stderrstr

        if output == 'file':
            result['merged'] = result['stdout']
            result['stdout'] = []
    else:
        stdout, stderr = proc.communicate()
        if output == 'allatonce':  # Discard stdout and stderr otherwise
            result['stdout'] = read_stream(stdout, logger=iflogger)
            result['stderr'] = read_stream(stderr, logger=iflogger)

    runtime.returncode = proc.returncode
    try:
        proc.terminate()  # Ensure we are done
    except OSError as error:
        # Python 2 raises when the process is already gone
        if error.errno != errno.ESRCH:
            raise

    # Dereference & force GC for a cleanup
    del proc
    del stdout
    del stderr
    gc.collect()

    runtime.stderr = '\n'.join(result['stderr'])
    runtime.stdout = '\n'.join(result['stdout'])
    runtime.merged = '\n'.join(result['merged'])
    return runtime
コード例 #53
0
 def create_file(self, path, default="#LWP-Cookies-2.0\n"):
     if not os.path.exists(path):
         with open(path, "w") as f:
             f.write(default)
コード例 #54
0
ファイル: parse-pileup.py プロジェクト: ReddyLab/POPSTARR2
    fields = line.rstrip().split()
    if (len(fields) < 9): continue
    (chr, pos, id, ref, alt, x, Pass, flags, GT) = fields[:9]
    altFields = alt.split(",")
    alt = altFields[0]  # Keeping only the first alternate allele
    #if(id=="."): continue
    if (id == "."): id = chr + "@" + pos
    if (variants.get(chr, None) is None): variants[chr] = []
    variants[chr].append(Variant(int(pos), id, ref, alt))
    #variants[chr].append(Variant(int(pos)-1,id,ref,alt)) # 8/21 added -1

# Process the pileup file
prevChr = None
variantsOnChr = None
nextVariant = None
for line in open(pileup, "rt"):
    fields = line.split()
    if (len(fields) < 6): continue
    (chr, pos, N, total, seq, qual) = fields
    pos = int(pos)
    if (prevChr is None or chr != prevChr):
        variantsOnChr = variants.get(chr, None)
        if (variantsOnChr is None): continue
        prevChr = chr
        nextVariant = 0
    while (nextVariant < len(variantsOnChr)
           and variantsOnChr[nextVariant].pos < pos - 1):
        nextVariant += 1
    if (nextVariant >= len(variantsOnChr)): continue
    variant = variantsOnChr[nextVariant]
    if (variant.pos != pos): continue
コード例 #55
0
 def get_pytest_rootdir():
     with open(rootdir_comm_path, 'r') as fp:
         return fp.read()
コード例 #56
0
def open(file_uri, encoding, settings):
    file_path = file_uri.replace('file://', '')
    return builtins.open(file_path, encoding=encoding)
コード例 #57
0
 def assert_ns_package(self, target, package_rel_dir):
   with open(self.init_py_path(target, package_rel_dir), 'r') as fp:
     self.assertEqual("__import__('pkg_resources').declare_namespace(__name__)",
                      fp.read().strip())
コード例 #58
0
    def test_form_id_filter_for_require_auth_account(self):
        """
        Test formList formID filter for account that requires authentication
        """
        # Bob submit forms
        xls_path = os.path.join(settings.PROJECT_ROOT, "apps", "main", "tests",
                                "fixtures", "tutorial.xls")
        self._publish_xls_form_to_project(xlsform_path=xls_path)

        xls_file_path = os.path.join(settings.PROJECT_ROOT, "apps", "logger",
                                     "fixtures",
                                     "external_choice_form_v1.xlsx")
        self._publish_xls_form_to_project(xlsform_path=xls_file_path)

        # Set require auth to true
        self.user.profile.require_auth = True
        self.user.profile.save()
        request = self.factory.get('/', {'formID': self.xform.id_string})
        response = self.view(request, username=self.user.username)
        self.assertEqual(response.status_code, 401)

        # Test for authenticated user but unrecognized formID
        auth = DigestAuth('bob', 'bobbob')
        request = self.factory.get('/', {'formID': 'unrecognizedID'})
        request.META.update(auth(request.META, response))
        response = self.view(request, username=self.user.username)
        self.assertEqual(response.status_code, 200)
        self.assertEqual(response.data, [])

        # Test for authenticated user and valid formID
        request = self.factory.get('/', {'formID': self.xform.id_string})
        self.assertTrue(self.user.profile.require_auth)
        response = self.view(request, username=self.user.username)
        self.assertEqual(response.status_code, 401)
        auth = DigestAuth('bob', 'bobbob')
        request.META.update(auth(request.META, response))
        response = self.view(request, username=self.user.username)
        self.assertEqual(response.status_code, 200)

        path = os.path.join(os.path.dirname(__file__), '..', 'fixtures',
                            'formList2.xml')

        with open(path, encoding='utf-8') as f:
            form_list = f.read().strip()
            data = {
                "hash": self.xform.hash,
                "pk": self.xform.pk,
                'version': self.xform.version
            }
            content = response.render().content.decode('utf-8')
            self.assertEqual(content, form_list % data)

        # Test for shared forms
        # Create user Alice
        alice_data = {
            'username': '******',
            'email': '*****@*****.**',
            'password1': 'alice',
            'password2': 'alice'
        }
        alice_profile = self._create_user_profile(alice_data)

        # check that she can authenticate successfully
        request = self.factory.get('/')
        response = self.view(request)
        self.assertEqual(response.status_code, 401)
        auth = DigestAuth('alice', 'alice')
        request.META.update(auth(request.META, response))
        response = self.view(request)
        self.assertEqual(response.status_code, 200)

        self.assertFalse(
            ReadOnlyRole.user_has_role(alice_profile.user, self.project))

        # share Bob's project with Alice
        data = {'username': '******', 'role': ReadOnlyRole.name}
        request = self.factory.post('/', data=data, **self.extra)
        share_view = ProjectViewSet.as_view({'post': 'share'})
        project_id = self.project.pk
        response = share_view(request, pk=project_id)
        self.assertEqual(response.status_code, 204)
        self.assertTrue(
            ReadOnlyRole.user_has_role(alice_profile.user, self.project))

        request = self.factory.get('/', {'formID': self.xform.id_string})
        response = self.view(request)
        self.assertEqual(response.status_code, 401)
        auth = DigestAuth('alice', 'alice')
        request.META.update(auth(request.META, response))
        response = self.view(request, username='******')
        self.assertEqual(response.status_code, 200)

        path = os.path.join(os.path.dirname(__file__), '..', 'fixtures',
                            'formList2.xml')

        with open(path, encoding='utf-8') as f:
            form_list = f.read().strip()
            data = {
                "hash": self.xform.hash,
                "pk": self.xform.pk,
                "version": self.xform.version
            }
            content = response.render().content.decode('utf-8')
            self.assertEqual(content, form_list % data)

        # Bob's profile
        bob_profile = self.user

        # Submit form as Alice
        self._login_user_and_profile(extra_post_data=alice_data)
        self.assertEqual(self.user.username, 'alice')

        path = os.path.join(settings.PROJECT_ROOT, "apps", "main", "tests",
                            "fixtures", "good_eats_multilang",
                            "good_eats_multilang.xls")
        self._publish_xls_form_to_project(xlsform_path=path)
        self.assertTrue(OwnerRole.user_has_role(alice_profile.user,
                                                self.xform))

        # Share Alice's form with Bob
        ReadOnlyRole.add(bob_profile, self.xform)
        self.assertTrue(ReadOnlyRole.user_has_role(bob_profile, self.xform))

        # Get unrecognized formID as bob
        request = self.factory.get('/', {'formID': 'unrecognizedID'})
        response = self.view(request, username=bob_profile.username)
        self.assertEqual(response.status_code, 401)
        auth = DigestAuth('bob', 'bobbob')
        request.META.update(auth(request.META, response))
        response = self.view(request, username=bob_profile.username)
        self.assertEqual(response.status_code, 200)
        self.assertEqual(response.data, [])

        # Get Alice's form as Bob
        request = self.factory.get('/', {'formID': 'good_eats_multilang'})
        response = self.view(request, username=bob_profile.username)
        self.assertEqual(response.status_code, 401)
        auth = DigestAuth('bob', 'bobbob')
        request.META.update(auth(request.META, response))
        response = self.view(request, username=bob_profile.username)
        self.assertEqual(response.status_code, 200)
        self.assertEqual(len(response.data), 1)
        self.assertEqual(response.data[0]['formID'], 'good_eats_multilang')
コード例 #59
0
def close(h):
    http = h['http']
    connection = h['connection']

    # This is sometimes necessary because python doesn't set up
    # sys.stderr to be line buffered and so debug, errors or
    # exceptions printed previously might not be emitted before the
    # plugin exits.
    sys.stderr.flush()

    # If the connection failed earlier ensure we clean up the disk.
    if h['failed']:
        delete_disk_on_failure(h)
        connection.close()
        return

    try:
        # Issue a flush request on close so that the data is written to
        # persistent store before we create the VM.
        if h['can_flush']:
            flush(h)

        http.close()

        disk = h['disk']
        transfer_service = h['transfer_service']

        transfer_service.finalize()

        # Wait until the transfer disk job is completed since
        # only then we can be sure the disk is unlocked.  As this
        # code is not very clear, what's happening is that we are
        # waiting for the transfer object to cease to exist, which
        # falls through to the exception case and then we can
        # continue.
        disk_id = disk.id
        start = time.time()
        try:
            while True:
                time.sleep(1)
                disk_service = h['disk_service']
                disk = disk_service.get()
                if disk.status == types.DiskStatus.LOCKED:
                    if time.time() > start + timeout:
                        raise RuntimeError("timed out waiting for transfer "
                                           "to finalize")
                    continue
                if disk.status == types.DiskStatus.OK:
                    debug("finalized after %s seconds" % (time.time() - start))
                    break
        except sdk.NotFoundError:
            raise RuntimeError("transfer failed: disk %s not found" % disk_id)

        # Write the disk ID file.  Only do this on successful completion.
        with builtins.open(params['diskid_file'], 'w') as fp:
            fp.write(disk.id)

    except:
        # Otherwise on any failure we must clean up the disk.
        delete_disk_on_failure(h)
        raise

    connection.close()
コード例 #60
0
ファイル: setup.py プロジェクト: andriineronov/ODA_API
from __future__ import absolute_import, division, print_function

from builtins import (bytes, str, open, super, range, zip, round, input, int,
                      pow, object, map, zip)

__author__ = 'andrea tramacere'

#!/usr/bin/env python

from setuptools import setup, find_packages
import glob

f = open("./requirements.txt", 'r')
install_req = f.readlines()
f.close()

packs = find_packages()

print('packs', packs)

include_package_data = True

scripts_list = glob.glob('./bin/*')
setup(
    name='oda_api',
    version=1.0,
    description='API plugin  for CDCI online data analysis',
    author='Andrea Tramacere',
    author_email='*****@*****.**',
    scripts=scripts_list,
    packages=packs,