예제 #1
0
def make_file(filetype):
    with NamedTemporaryFile("w", suffix="." + filetype, delete=False) as f:
        return f.name
예제 #2
0
 def set_stderr(self):
     self.stderr = NamedTemporaryFile('w', delete=False,
                                      prefix='astevaltest_stderr')
     self.interp.err_writer = self.stderr
예제 #3
0
파일: plot.py 프로젝트: kelloggm/crossbot
def plot(client, request):
    '''Plot everyone's times in a date range.
    `plot [plot_type] [num_days] [smoothing] [scale] [start date] [end date]`, all arguments optional.
    `plot_type` is either `normalized` (default) or `times` for a non-smoothed plot of actual times.
    `smoothing` is between 0 (no smoothing) and 1 exclusive. .6 default
    You can provide either `num_days` or `start_date` and `end_date`.
    `plot` plots the last 5 days by default.
    The scale can be `log` or `linear`.'''

    args = request.args

    start_date = crossbot.date(args.start_date)
    end_date = crossbot.date(args.end_date)

    start_dt = datetime.datetime.strptime(start_date, "%Y-%m-%d").date()
    end_dt = datetime.datetime.strptime(end_date, "%Y-%m-%d").date()

    # we only use num_days if the other params weren't given
    # otherwise set num_days based the given range
    if start_date == end_date:
        start_dt -= datetime.timedelta(days=args.num_days)
        start_date = start_dt.strftime("%Y-%m-%d")
    else:
        args.num_days = (end_dt - start_dt).days

    if not 0 <= args.smooth <= 0.95:
        request.reply('smooth should be between 0 and 0.95', direct=True)

    if args.scale is None:
        args.scale = 'linear' if args.plot_type == 'normalized' else 'log'

    with sqlite3.connect(crossbot.db_path) as con:
        cursor = con.execute(
            '''
        SELECT userid, date, seconds
        FROM crossword_time
        WHERE date
          BETWEEN date(?)
          AND     date(?)
        ORDER BY date, userid''', (start_date, end_date))

        userids_present = set()

        times = defaultdict(list)
        times_by_date = defaultdict(dict)
        for userid, date, seconds in cursor:
            userids_present.add(userid)
            if seconds >= 0:
                # don't add failures to the times plot
                times[userid].append((date, seconds))
            times_by_date[date][userid] = seconds

    if args.plot_type == 'normalized':
        sorted_dates = sorted(times_by_date.keys())

        # failures come with a heaver ranking penalty
        MAX_PENALTY = -1.5
        FAILURE_PENALTY = -2

        def mk_score(mean, t, stdev):
            if t < 0:
                return FAILURE_PENALTY
            if stdev == 0:
                return 0

            score = (mean - t) / stdev
            return max(MAX_PENALTY, score)

        # scores are the stdev away from mean of that day
        scores = {}
        for date, user_times in times_by_date.items():
            times = user_times.values()
            # make failures 1 minute worse than the worst time
            times = [t if t >= 0 else max(times) + 60 for t in times]
            q1, q3 = np.percentile(times, [25, 75])
            stdev = statistics.pstdev(times)
            o1, o3 = q1 - stdev, q3 + stdev
            times = [t for t in times if o1 <= t <= o3]
            mean = statistics.mean(times)
            stdev = statistics.pstdev(times, mean)
            scores[date] = {
                userid: mk_score(mean, t, stdev)
                for userid, t in user_times.items()
            }

        new_score_weight = 1 - args.smooth
        running = defaultdict(list)

        MAX_PLOT_SCORE = 1.0
        MIN_PLOT_SCORE = -1.0
        weighted_scores = defaultdict(list)
        for date in sorted_dates:
            for user, score in scores[date].items():

                old_score = running.get(user)

                new_score = score * new_score_weight + old_score * (1 - new_score_weight) \
                            if old_score is not None else score

                running[user] = new_score
                plot_score = max(MIN_PLOT_SCORE, min(new_score,
                                                     MAX_PLOT_SCORE))
                weighted_scores[user].append((date, plot_score))

    width, height, dpi = (120 * args.num_days), 600, 100
    width = max(400, min(width, 1000))

    fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi)
    ax = fig.add_subplot(1, 1, 1)
    ax.set_yscale(args.scale)

    def fmt_min(sec, pos):
        minutes, seconds = divmod(int(sec), 60)
        return '{}:{:02}'.format(minutes, seconds)

    cmap = plt.get_cmap('nipy_spectral')
    markers = cycle(['-o', '-X', '-s', '-^'])

    if args.plot_type == 'normalized':
        weighted_scores = OrderedDict(sorted(weighted_scores.items()))
        n_users = len(weighted_scores)
        colors = [cmap(i / n_users) for i in range(n_users)]
        for (userid, pairs), color in zip(weighted_scores.items(), colors):
            dates, scores = zip(*pairs)
            dates = [
                datetime.datetime.strptime(d, "%Y-%m-%d").date() for d in dates
            ]
            name = client.user(userid)
            ax.plot_date(mdates.date2num(dates),
                         scores,
                         next(markers),
                         label=name,
                         color=color)

    elif args.plot_type == 'times':
        max_sec = 0
        n_users = len(times)
        colors = [cmap(i / n_users) for i in range(n_users)]
        times = OrderedDict(sorted(times.items()))
        for (userid, entries), color in zip(times.items(), colors):

            dates, seconds = zip(*entries)
            max_sec = max(max_sec, max(seconds))
            dates = [
                datetime.datetime.strptime(d, "%Y-%m-%d").date() for d in dates
            ]
            name = client.user(userid)
            ax.plot_date(mdates.date2num(dates),
                         seconds,
                         next(markers),
                         label=name,
                         color=color)

        if args.scale == 'log':
            ticks = takewhile(lambda x: x <= max_sec,
                              (30 * (2**i) for i in range(10)))
            ax.yaxis.set_ticks(list(ticks))
        else:
            ax.yaxis.set_ticks(range(0, max_sec + 1, 30))

        ax.set_ylim(bottom=0)

    else:
        raise RuntimeError('invalid plot_type {}'.format(args.plot_type))

    fig.autofmt_xdate()
    ax.xaxis.set_major_locator(mdates.DayLocator())
    ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %-d'))  # May 3
    if args.plot_type == 'times':
        ax.yaxis.set_major_formatter(
            matplotlib.ticker.FuncFormatter(fmt_min))  # 1:30
    ax.legend(fontsize=6, loc='upper left')

    temp = NamedTemporaryFile(suffix='.png', delete=False)
    fig.savefig(temp, format='png', bbox_inches='tight')
    temp.close()
    plt.close(fig)

    request.upload('plot', temp.name)

    # don't renome temp files if using command line client,
    # let the user see them
    if not isinstance(request, crossbot.client.CommandLineRequest):
        os.remove(temp.name)
    def export(self, out_f=None, format='mp3', codec=None, bitrate=None, parameters=None, tags=None, id3v2_version='4',
               cover=None):
        """
        Export an AudioSegment to a file with given options

        out_f (string):
            Path to destination audio file. Also accepts os.PathLike objects on
            python >= 3.6

        format (string)
            Format for destination audio file.
            ('mp3', 'wav', 'raw', 'ogg' or other ffmpeg/avconv supported files)

        codec (string)
            Codec used to encoding for the destination.

        bitrate (string)
            Bitrate used when encoding destination file. (64, 92, 128, 256, 312k...)
            Each codec accepts different bitrate arguments so take a look at the
            ffmpeg documentation for details (bitrate usually shown as -b, -ba or
            -a:b).

        parameters (string)
            Aditional ffmpeg/avconv parameters

        tags (dict)
            Set metadata information to destination files
            usually used as tags. ({title='Song Title', artist='Song Artist'})

        id3v2_version (string)
            Set ID3v2 version for tags. (default: '4')

        cover (file)
            Set cover for audio file from image file. (png or jpg)
        """
        id3v2_allowed_versions = ['3', '4']

        out_f, _ = _fd_or_path_or_tempfile(out_f, 'wb+')
        out_f.seek(0)

        if format == "raw":
            out_f.write(self._data)
            out_f.seek(0)
            return out_f

        # for wav output we can just write the data directly to out_f
        if format == "wav":
            data = out_f
        else:
            data = NamedTemporaryFile(mode="wb", delete=False)

        wave_data = wave.open(data, 'wb')
        wave_data.setnchannels(self.channels)
        wave_data.setsampwidth(self.sample_width)
        wave_data.setframerate(self.frame_rate)
        # For some reason packing the wave header struct with
        # a float in python 2 doesn't throw an exception
        wave_data.setnframes(int(self.frame_count()))
        wave_data.writeframesraw(self._data)
        wave_data.close()

        # for wav files, we're done (wav data is written directly to out_f)
        if format == 'wav':
            return out_f

        output = NamedTemporaryFile(mode="w+b", delete=False)

        # build converter command to export
        conversion_command = [
            self.converter,
            '-y',  # always overwrite existing files
            "-f", "wav", "-i", data.name,  # input options (filename last)
        ]

        if codec is None:
            codec = self.DEFAULT_CODECS.get(format, None)

        if cover is not None:
            if cover.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.tif', '.tiff')) and format == "mp3":
                conversion_command.extend(["-i", cover, "-map", "0", "-map", "1", "-c:v", "mjpeg"])
            else:
                raise AttributeError(
                    "Currently cover images are only supported by MP3 files. The allowed image formats are: .tif, .jpg, .bmp, .jpeg and .png.")

        if codec is not None:
            # force audio encoder
            conversion_command.extend(["-acodec", codec])

        if bitrate is not None:
            conversion_command.extend(["-b:a", bitrate])

        if parameters is not None:
            # extend arguments with arbitrary set
            conversion_command.extend(parameters)

        if tags is not None:
            if not isinstance(tags, dict):
                raise InvalidTag("Tags must be a dictionary.")
            else:
                # Extend converter command with tags
                # print(tags)
                for key, value in tags.items():
                    conversion_command.extend(
                        ['-metadata', '{0}={1}'.format(key, value)])

                if format == 'mp3':
                    # set id3v2 tag version
                    if id3v2_version not in id3v2_allowed_versions:
                        raise InvalidID3TagVersion(
                            "id3v2_version not allowed, allowed versions: %s" % id3v2_allowed_versions)
                    conversion_command.extend([
                        "-id3v2_version", id3v2_version
                    ])

        if sys.platform == 'darwin' and codec == 'mp3':
            conversion_command.extend(["-write_xing", "0"])

        conversion_command.extend([
            "-f", format, output.name,  # output options (filename last)
        ])

        log_conversion(conversion_command)

        # read stdin / write stdout
        with open(os.devnull, 'rb') as devnull:
            p = subprocess.Popen(conversion_command, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        p_out, p_err = p.communicate()

        log_subprocess_output(p_out)
        log_subprocess_output(p_err)

        if p.returncode != 0:
            raise CouldntEncodeError(
                "Encoding failed. ffmpeg/avlib returned error code: {0}\n\nCommand:{1}\n\nOutput from ffmpeg/avlib:\n\n{2}".format(
                    p.returncode, conversion_command, p_err))

        output.seek(0)
        out_f.write(output.read())

        data.close()
        output.close()

        os.unlink(data.name)
        os.unlink(output.name)

        out_f.seek(0)
        return out_f
예제 #5
0
    def test_matepair_bin(self):
        'It tests the split mate pairs binary'
        mate_bin = os.path.join(SEQ_BIN_DIR, 'split_matepairs')
        stdout = check_output([mate_bin, '-h'])
        assert 'usage' in stdout

        out_fhand = NamedTemporaryFile(suffix='.fasta')
        seq_fpath = os.path.join(TEST_DATA_DIR, '454_reads2.fastq')
        linkers = '454'
        cmd = [mate_bin, '-o', out_fhand.name, '-l', linkers, seq_fpath]
        check_output(cmd)
        result = open(out_fhand.name).read()

        assert r'G109AZL01D8U3X\1' in result
        assert r'G109AZL01D8U3X\2' in result

        out_fhand = NamedTemporaryFile(suffix='.fasta')
        seq_fpath = os.path.join(TEST_DATA_DIR, '454_reads.fastq')
        linkers = '454'
        cmd = [mate_bin, '-o', out_fhand.name, '-l', linkers, seq_fpath]
        check_output(cmd)
        result = open(out_fhand.name).read()
        assert r'@G109AZL01BJHT8\1' in result
        assert r'@G109AZL01BJHT8\2' in result

        mate_fhand = create_a_matepair_file()
        out_fhand = NamedTemporaryFile(suffix='.fasta')

        cmd = [
            mate_bin, '-o', out_fhand.name, '-l', TITANIUM_LINKER,
            mate_fhand.name
        ]
        from subprocess import call
        call(cmd)
        result = open(out_fhand.name).read()
        assert result.startswith(r'>seq1\1')

        cmd = [mate_bin, '-o', out_fhand.name, '-l', '454', mate_fhand.name]
        check_output(cmd)
        result = open(out_fhand.name).read()
        assert result.startswith(r'>seq1\1')

        # Error if a file path is given
        cmd = [
            mate_bin, '-o', out_fhand.name, '-l', 'adaptors.fasta',
            mate_fhand.name
        ]
        stderr = NamedTemporaryFile(suffix='.stderr')
        try:
            check_output(cmd, stderr=stderr)
            self.fail('Error expected')
        except CalledProcessError:
            pass

        # Error if not DNA is given
        cmd = [
            mate_bin, '-o', out_fhand.name, '-l', 'iamnotasequence',
            mate_fhand.name
        ]
        stderr = NamedTemporaryFile(suffix='.stderr')
        try:
            check_output(cmd, stderr=stderr)
            self.fail('Error expected')
        except CalledProcessError:
            pass

        # ion_torrent option is recognized
        cmd = [mate_bin, '-o', out_fhand.name, '-l', 'ion_torrent', seq_fpath]
        check_output(cmd)
예제 #6
0
def create_export_tarball(course_module, course_key, context, status=None):
    """
    Generates the export tarball, or returns None if there was an error.

    Updates the context with any error information if applicable.
    """
    name = course_module.url_name
    export_file = NamedTemporaryFile(prefix=name + '.', suffix=".tar.gz")
    root_dir = path(mkdtemp())

    try:
        if isinstance(course_key, LibraryLocator):
            export_library_to_xml(modulestore(), contentstore(), course_key,
                                  root_dir, name)
        else:
            export_course_to_xml(modulestore(), contentstore(),
                                 course_module.id, root_dir, name)

        if status:
            status.set_state(u'Compressing')
            status.increment_completed_steps()
        LOGGER.debug(u'tar file being generated at %s', export_file.name)
        with tarfile.open(name=export_file.name, mode='w:gz') as tar_file:
            tar_file.add(root_dir / name, arcname=name)

    except SerializationError as exc:
        LOGGER.exception(u'There was an error exporting %s',
                         course_key,
                         exc_info=True)
        parent = None
        try:
            failed_item = modulestore().get_item(exc.location)
            parent_loc = modulestore().get_parent_location(
                failed_item.location)

            if parent_loc is not None:
                parent = modulestore().get_item(parent_loc)
        except:  # pylint: disable=bare-except
            # if we have a nested exception, then we'll show the more generic error message
            pass

        context.update({
            'in_err':
            True,
            'raw_err_msg':
            str(exc),
            'edit_unit_url':
            reverse_usage_url("container_handler", parent.location)
            if parent else "",
        })
        if status:
            status.fail(
                json.dumps({
                    'raw_error_msg': context['raw_err_msg'],
                    'edit_unit_url': context['edit_unit_url']
                }))
        raise
    except Exception as exc:
        LOGGER.exception(u'There was an error exporting %s',
                         course_key,
                         exc_info=True)
        context.update({
            'in_err': True,
            'edit_unit_url': None,
            'raw_err_msg': str(exc)
        })
        if status:
            status.fail(json.dumps({'raw_error_msg': context['raw_err_msg']}))
        raise
    finally:
        if os.path.exists(root_dir / name):
            shutil.rmtree(root_dir / name)

    return export_file
예제 #7
0
파일: data.py 프로젝트: zonca/astropy
def download_file(remote_url, cache=False, show_progress=True, timeout=None):
    """
    Accepts a URL, downloads and optionally caches the result
    returning the filename, with a name determined by the file's MD5
    hash. If ``cache=True`` and the file is present in the cache, just
    returns the filename.

    Parameters
    ----------
    remote_url : str
        The URL of the file to download

    cache : bool, optional
        Whether to use the cache

    show_progress : bool, optional
        Whether to display a progress bar during the download (default
        is `True`). Regardless of this setting, the progress bar is only
        displayed when outputting to a terminal.

    timeout : float, optional
        The timeout, in seconds.  Otherwise, use
        `astropy.utils.data.Conf.remote_timeout`.

    Returns
    -------
    local_path : str
        Returns the local path that the file was download to.

    Raises
    ------
    urllib2.URLError, urllib.error.URLError
        Whenever there's a problem getting the remote file.
    """

    from astropy.utils.console import ProgressBarOrSpinner

    if timeout is None:
        timeout = conf.remote_timeout

    missing_cache = False

    if cache:
        try:
            dldir, urlmapfn = _get_download_cache_locs()
        except OSError as e:
            msg = 'Remote data cache could not be accessed due to '
            estr = '' if len(e.args) < 1 else (': ' + str(e))
            warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
            cache = False
            missing_cache = True  # indicates that the cache is missing to raise a warning later

    url_key = remote_url

    # Check if URL is Astropy data server, which has alias, and cache it.
    if (url_key.startswith(conf.dataurl) and
            conf.dataurl not in _dataurls_to_alias):
        try:
            with urllib.request.urlopen(conf.dataurl, timeout=timeout) as remote:
                _dataurls_to_alias[conf.dataurl] = [conf.dataurl, remote.geturl()]
        except urllib.error.URLError:  # Host unreachable
            _dataurls_to_alias[conf.dataurl] = [conf.dataurl]
    try:
        if cache:
            # We don't need to acquire the lock here, since we are only reading
            with shelve.open(urlmapfn) as url2hash:
                if url_key in url2hash:
                    return url2hash[url_key]
                # If there is a cached copy from mirror, use it.
                else:
                    for cur_url in _dataurls_to_alias.get(conf.dataurl, []):
                        if url_key.startswith(cur_url):
                            url_mirror = url_key.replace(cur_url,
                                                         conf.dataurl_mirror)
                            if url_mirror in url2hash:
                                return url2hash[url_mirror]

        with urllib.request.urlopen(remote_url, timeout=timeout) as remote:
            # keep a hash to rename the local file to the hashed name
            hash = hashlib.md5()

            info = remote.info()
            if 'Content-Length' in info:
                try:
                    size = int(info['Content-Length'])
                except ValueError:
                    size = None
            else:
                size = None

            if size is not None:
                check_free_space_in_dir(gettempdir(), size)
                if cache:
                    check_free_space_in_dir(dldir, size)

            if show_progress and sys.stdout.isatty():
                progress_stream = sys.stdout
            else:
                progress_stream = io.StringIO()

            dlmsg = f"Downloading {remote_url}"
            with ProgressBarOrSpinner(size, dlmsg, file=progress_stream) as p:
                with NamedTemporaryFile(delete=False) as f:
                    try:
                        bytes_read = 0
                        block = remote.read(conf.download_block_size)
                        while block:
                            f.write(block)
                            hash.update(block)
                            bytes_read += len(block)
                            p.update(bytes_read)
                            block = remote.read(conf.download_block_size)
                    except BaseException:
                        if os.path.exists(f.name):
                            os.remove(f.name)
                        raise

        if cache:
            _acquire_download_cache_lock()
            try:
                with shelve.open(urlmapfn) as url2hash:
                    # We check now to see if another process has
                    # inadvertently written the file underneath us
                    # already
                    if url_key in url2hash:
                        return url2hash[url_key]
                    local_path = os.path.join(dldir, hash.hexdigest())
                    shutil.move(f.name, local_path)
                    url2hash[url_key] = local_path
            finally:
                _release_download_cache_lock()
        else:
            local_path = f.name
            if missing_cache:
                msg = ('File downloaded to temporary location due to problem '
                       'with cache directory and will not be cached.')
                warn(CacheMissingWarning(msg, local_path))
            if conf.delete_temporary_downloads_at_exit:
                global _tempfilestodel
                _tempfilestodel.append(local_path)
    except urllib.error.URLError as e:
        if hasattr(e, 'reason') and hasattr(e.reason, 'errno') and e.reason.errno == 8:
            e.reason.strerror = e.reason.strerror + '. requested URL: ' + remote_url
            e.reason.args = (e.reason.errno, e.reason.strerror)
        raise e
    except socket.timeout as e:
        # this isn't supposed to happen, but occasionally a socket.timeout gets
        # through.  It's supposed to be caught in `urrlib2` and raised in this
        # way, but for some reason in mysterious circumstances it doesn't. So
        # we'll just re-raise it here instead
        raise urllib.error.URLError(e)

    return local_path
예제 #8
0
def gate_body(args, tasks):
    with Task('Vm: Basic GraalVM Tests', tasks,
              tags=[VmGateTasks.compiler]) as t:
        if t and mx_vm.has_component('GraalVM compiler'):
            # 1. the build must be a GraalVM
            # 2. the build must be JVMCI-enabled since the 'GraalVM compiler' component is registered
            mx_vm.check_versions(
                mx_vm.graalvm_output(),
                graalvm_version_regex=mx_vm.graalvm_version_regex,
                expect_graalvm=True,
                check_jvmci=True)

    with Task('Vm: GraalVM dist names', tasks,
              tags=[VmGateTasks.integration]) as t:
        if t:
            for suite, env_file_name, graalvm_dist_name in env_tests:
                out = mx.LinesOutputCapture()
                mx.run_mx([
                    '--no-warning', '--env', env_file_name, 'graalvm-dist-name'
                ],
                          suite,
                          out=out,
                          err=out,
                          env={})
                mx.log(
                    "Checking that the env file '{}' in suite '{}' produces a GraalVM distribution named '{}'"
                    .format(env_file_name, suite.name, graalvm_dist_name))
                if len(out.lines) != 1 or out.lines[0] != graalvm_dist_name:
                    mx.abort(
                        "Unexpected GraalVM dist name for env file '{}' in suite '{}'.\nExpected: '{}', actual: '{}'.\nDid you forget to update the registration of the GraalVM config?"
                        .format(env_file_name, suite.name, graalvm_dist_name,
                                '\n'.join(out.lines)))

    if mx_vm.has_component('LibGraal'):
        libgraal_location = mx_vm.get_native_image_locations(
            'LibGraal', 'jvmcicompiler')
        if libgraal_location is None:
            mx.warn(
                "Skipping libgraal tests: no library enabled in the LibGraal component"
            )
        else:
            extra_vm_arguments = [
                '-XX:+UseJVMCICompiler', '-XX:+UseJVMCINativeLibrary',
                '-XX:JVMCILibPath=' + dirname(libgraal_location)
            ]
            if args.extra_vm_argument:
                extra_vm_arguments += args.extra_vm_argument
            import mx_compiler

            # run avrora on the GraalVM binary itself
            with Task('LibGraal Compiler:GraalVM DaCapo-avrora',
                      tasks,
                      tags=[VmGateTasks.libgraal]) as t:
                if t:
                    mx.run([
                        join(mx_vm.graalvm_home(), 'bin',
                             'java'), '-XX:+UseJVMCICompiler',
                        '-XX:+UseJVMCINativeLibrary', '-jar',
                        mx.library('DACAPO').get_path(True), 'avrora'
                    ])

            with Task('LibGraal Compiler:CTW',
                      tasks,
                      tags=[VmGateTasks.libgraal]) as t:
                if t:
                    mx_compiler.ctw([
                        '-DCompileTheWorld.Config=Inline=false CompilationFailureAction=ExitVM',
                        '-esa',
                        '-XX:+EnableJVMCI',
                        '-DCompileTheWorld.MultiThreaded=true',
                        '-Dgraal.InlineDuringParsing=false',
                        '-Dgraal.TrackNodeSourcePosition=true',
                        '-DCompileTheWorld.Verbose=false',
                        '-XX:ReservedCodeCacheSize=300m',
                    ], extra_vm_arguments)

            mx_compiler.compiler_gate_benchmark_runner(
                tasks, extra_vm_arguments, prefix='LibGraal Compiler:')

            with Task('LibGraal Truffle:unittest',
                      tasks,
                      tags=[VmGateTasks.libgraal]) as t:
                if t:

                    def _unittest_config_participant(config):
                        vmArgs, mainClass, mainClassArgs = config

                        def is_truffle_fallback(arg):
                            fallback_args = [
                                "-Dtruffle.TruffleRuntime=com.oracle.truffle.api.impl.DefaultTruffleRuntime",
                                "-Dgraalvm.ForcePolyglotInvalid=true"
                            ]
                            return arg in fallback_args

                        newVmArgs = [
                            arg for arg in vmArgs
                            if not is_truffle_fallback(arg)
                        ]
                        return (newVmArgs, mainClass, mainClassArgs)

                    mx_unittest.add_config_participant(
                        _unittest_config_participant)
                    excluded_tests = environ.get("TEST_LIBGRAAL_EXCLUDE")
                    if excluded_tests:
                        with NamedTemporaryFile(prefix='blacklist.',
                                                mode='w',
                                                delete=False) as fp:
                            fp.file.writelines(
                                [l + '\n' for l in excluded_tests.split()])
                            unittest_args = ["--blacklist", fp.name]
                    else:
                        unittest_args = []
                    unittest_args = unittest_args + [
                        "--enable-timing", "--verbose"
                    ]
                    compiler_log_file = "graal-compiler.log"
                    mx_unittest.unittest(unittest_args + extra_vm_arguments + [
                        "-Dgraal.TruffleCompileImmediately=true",
                        "-Dgraal.TruffleBackgroundCompilation=false",
                        "-Dgraal.TraceTruffleCompilation=true",
                        "-Dgraal.PrintCompilation=true", "-Dgraal.LogFile={0}".
                        format(compiler_log_file), "truffle"
                    ])
                    if exists(compiler_log_file):
                        remove(compiler_log_file)
    else:
        mx.warn("Skipping libgraal tests: component not enabled")

    gate_substratevm(tasks)
    gate_sulong(tasks)
    gate_ruby(tasks)
    gate_python(tasks)
    gate_svm_truffle_tck_js(tasks)
    def upload_ndarray(self,
                       ndarray,
                       product_id,
                       image_id,
                       proj4=None,
                       wkt_srs=None,
                       geotrans=None,
                       raster_meta=None,
                       overviews=None,
                       overview_resampler=None,
                       add_namespace=False,
                       **kwargs):
        """Upload an ndarray with georeferencing information.

        This is an asynchronous operation and you can query for the status
        using `Catalog.upload_result()` with the upload_id returned by this
        method.  The upload id is the image_id.

        :param ndarray ndarray: (Required) A numpy ndarray with image data. If you are providing a multi-band image
            it should have 3 dimensions and the 3rd dimension of the array should index the bands. The dtype of the
            ndarray must also be one of the following:
            ['uint8', 'int8', 'uint16', 'int16', 'uint32', 'int32', 'float32', 'float64']
        :param str product_id: (Required) The id of the product this image belongs to.
        :param str image_id: (Required) Resulting image's id = <product_id>:<image_id>.
        :param str proj4: (One of proj4 or wkt_srs is required) A proj4 formatted string representing the
            spatial reference system used by the image.
        :param str wkt_srs: (One of proj4 or wkt_srs is required) A well known text string representing the
            spatial reference system used by the image.
        :param list(float) geotrans: (Required) The 6 number geographic transform of the image. Maps pixel coordinates
            to coordinates in the specified spatial reference system.
        :param dict raster_meta: Metadata returned from the :meth:`descarteslabs.client.services.raster.Raster.ndarray`
            request which generated the initial data for the :param ndarray: being uploaded. Passing :param geotrans:
            and :param wkt_srs: is unnecessary in this case.
        :param list(int) overviews: a list of overview resolution magnification factors i.e [2, 4] would make two
            overviews at 2x and 4x the native resolution. Maximum number of overviews allowed is 16.
        :param str overview_resampler: Resampler algorithm to use when building overviews. Controls how pixels are
            combined to make lower res pixels in overviews. Allowed resampler algorithms are:
            ['nearest', 'average', 'gauss', 'cubic', 'cubicspline', 'lanczos', 'average_mp',
            'average_magphase', 'mode'].
        :param bool add_namespace: Add your user namespace to the product_id. *Deprecated*

        :return: The upload id.
        :rtype: str

        .. note::
            - See :meth:`Catalog.add_image` for additional kwargs.
            - Only one of `proj4` or `wkt_srs` can be provided.
        """
        if ndarray.dtype.name not in self.UPLOAD_NDARRAY_SUPPORTED_DTYPES:
            raise TypeError("{} is not in supported dtypes {}".format(
                ndarray.dtype.name, self.UPLOAD_NDARRAY_SUPPORTED_DTYPES))

        metadata = kwargs
        metadata.setdefault("process_controls",
                            {}).update({"upload_type": "ndarray"})
        if raster_meta is not None:
            geotrans = raster_meta.get("geoTransform")
            wkt_srs = raster_meta.get("coordinateSystem", {}).get("wkt")
        for arg in ["image_id", "proj4", "wkt_srs", "geotrans"]:
            if locals()[arg] is not None:
                kwargs[arg] = locals()[arg]
        for arg in ["overviews", "overview_resampler"]:
            if locals()[arg] is not None:
                metadata["process_controls"][arg] = locals()[arg]
        with NamedTemporaryFile(delete=False) as tmp:
            try:
                np.save(tmp, ndarray, allow_pickle=False)
                # From tempfile docs:
                # Whether the name can be used to open the file a second time,
                # while the named temporary file is still open, varies across
                # platforms (it can be so used on Unix; it cannot on Windows
                # NT or later)
                #
                # We close the underlying file object so _do_upload can open
                # the path again in a cross platform compatible way.
                # Cleanup is manual in the finally block.
                tmp.close()
                failed, upload_id, error = self._do_upload(
                    tmp.name,
                    product_id,
                    metadata=metadata,
                    add_namespace=add_namespace)

                if failed:
                    raise error

                return upload_id
            finally:
                os.unlink(tmp.name)
예제 #10
0
def test_make_dir():
    temp = NamedTemporaryFile().name
    assert not exists(temp)
    _make_dir(test_logger(), temp)
    assert exists(temp)
예제 #11
0
def get_screen_image():
    with NamedTemporaryFile() as f:
        pil_image = pyautogui.screenshot(imageFilename=f.name)

    opencvImage = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
    return opencvImage
예제 #12
0
    def load_df(
        self,
        df: pandas.DataFrame,
        table: str,
        field_dict: Optional[Dict[Any, Any]] = None,
        delimiter: str = ',',
        encoding: str = 'utf8',
        pandas_kwargs: Any = None,
        **kwargs: Any,
    ) -> None:
        """
        Loads a pandas DataFrame into hive.

        Hive data types will be inferred if not passed but column names will
        not be sanitized.

        :param df: DataFrame to load into a Hive table
        :type df: pandas.DataFrame
        :param table: target Hive table, use dot notation to target a
            specific database
        :type table: str
        :param field_dict: mapping from column name to hive data type.
            Note that it must be OrderedDict so as to keep columns' order.
        :type field_dict: collections.OrderedDict
        :param delimiter: field delimiter in the file
        :type delimiter: str
        :param encoding: str encoding to use when writing DataFrame to file
        :type encoding: str
        :param pandas_kwargs: passed to DataFrame.to_csv
        :type pandas_kwargs: dict
        :param kwargs: passed to self.load_file
        """
        def _infer_field_types_from_df(df: pandas.DataFrame) -> Dict[Any, Any]:
            dtype_kind_hive_type = {
                'b': 'BOOLEAN',  # boolean
                'i': 'BIGINT',  # signed integer
                'u': 'BIGINT',  # unsigned integer
                'f': 'DOUBLE',  # floating-point
                'c': 'STRING',  # complex floating-point
                'M': 'TIMESTAMP',  # datetime
                'O': 'STRING',  # object
                'S': 'STRING',  # (byte-)string
                'U': 'STRING',  # Unicode
                'V': 'STRING',  # void
            }

            order_type = OrderedDict()
            for col, dtype in df.dtypes.iteritems():
                order_type[col] = dtype_kind_hive_type[dtype.kind]
            return order_type

        if pandas_kwargs is None:
            pandas_kwargs = {}

        with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
            with NamedTemporaryFile(dir=tmp_dir, mode="w") as f:
                if field_dict is None:
                    field_dict = _infer_field_types_from_df(df)

                df.to_csv(
                    path_or_buf=f,
                    sep=delimiter,
                    header=False,
                    index=False,
                    encoding=encoding,
                    date_format="%Y-%m-%d %H:%M:%S",
                    **pandas_kwargs,
                )
                f.flush()

                return self.load_file(filepath=f.name,
                                      table=table,
                                      delimiter=delimiter,
                                      field_dict=field_dict,
                                      **kwargs)
예제 #13
0
    def run_cli(
        self,
        hql: Union[str, str],
        schema: Optional[str] = None,
        verbose: bool = True,
        hive_conf: Optional[Dict[Any, Any]] = None,
    ) -> Any:
        """
        Run an hql statement using the hive cli. If hive_conf is specified
        it should be a dict and the entries will be set as key/value pairs
        in HiveConf


        :param hive_conf: if specified these key value pairs will be passed
            to hive as ``-hiveconf "key"="value"``. Note that they will be
            passed after the ``hive_cli_params`` and thus will override
            whatever values are specified in the database.
        :type hive_conf: dict

        >>> hh = HiveCliHook()
        >>> result = hh.run_cli("USE airflow;")
        >>> ("OK" in result)
        True
        """
        conn = self.conn
        schema = schema or conn.schema
        if schema:
            hql = f"USE {schema};\n{hql}"

        with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
            with NamedTemporaryFile(dir=tmp_dir) as f:
                hql += '\n'
                f.write(hql.encode('UTF-8'))
                f.flush()
                hive_cmd = self._prepare_cli_cmd()
                env_context = get_context_from_env_var()
                # Only extend the hive_conf if it is defined.
                if hive_conf:
                    env_context.update(hive_conf)
                hive_conf_params = self._prepare_hiveconf(env_context)
                if self.mapred_queue:
                    hive_conf_params.extend([
                        '-hiveconf',
                        f'mapreduce.job.queuename={self.mapred_queue}',
                        '-hiveconf',
                        f'mapred.job.queue.name={self.mapred_queue}',
                        '-hiveconf',
                        f'tez.queue.name={self.mapred_queue}',
                    ])

                if self.mapred_queue_priority:
                    hive_conf_params.extend([
                        '-hiveconf',
                        f'mapreduce.job.priority={self.mapred_queue_priority}'
                    ])

                if self.mapred_job_name:
                    hive_conf_params.extend([
                        '-hiveconf', f'mapred.job.name={self.mapred_job_name}'
                    ])

                hive_cmd.extend(hive_conf_params)
                hive_cmd.extend(['-f', f.name])

                if verbose:
                    self.log.info("%s", " ".join(hive_cmd))
                sub_process: Any = subprocess.Popen(hive_cmd,
                                                    stdout=subprocess.PIPE,
                                                    stderr=subprocess.STDOUT,
                                                    cwd=tmp_dir,
                                                    close_fds=True)
                self.sub_process = sub_process
                stdout = ''
                while True:
                    line = sub_process.stdout.readline()
                    if not line:
                        break
                    stdout += line.decode('UTF-8')
                    if verbose:
                        self.log.info(line.decode('UTF-8').strip())
                sub_process.wait()

                if sub_process.returncode:
                    raise AirflowException(stdout)

                return stdout
예제 #14
0
 def __init__(self):
     QtCore.QThread.__init__(self)
     self.buffer = NamedTemporaryFile()
예제 #15
0
def run_koopa(source, output_path, java_binary='java', tabsize=4):
    """Run Koopa to parse 'source', either a text file-like object with a
    read() method or a string, into an XML document saved to
    output_path.

    Returns the Cobol source code as a string, with expanded tabs.
    """

    if hasattr(source, 'read'):
        code = source.read()
        code_path = source.name
    elif isinstance(source, str):
        code = source
        code_path = '<string>'
    else:
        raise TypeError('source must be a file-like object or a string')

    source_file = None
    try:
        # Regardless of source encoding, save as a single-byte encoding since Cobol parsing
        # should only need ascii chars.  Saving as UTF-8 or similar means that the character
        # ranges reported by koopa will be offset from the data in self._code, breaking extracting
        # symbols etc.

        # But save as iso-8859-1, to keep more national chars in comments.
        # TODO: use input file encoding if it is single-byte.

        # Since NamedTemporaryFile doesn't support specifying encoding error handling,
        # encode the bytes ourselves to replace non-ascii with ? to preserve char counts.

        source_file = NamedTemporaryFile(mode='wb',
                                         suffix='.cbl',
                                         delete=False)
        source_file.write(code.encode('iso-8859-1', errors='replace'))
        source_file.close()

        jar = resource_filename('CobolSharp', KOOPA_JAR)

        # TODO: add command arg to control heap size
        cmd = (java_binary, '-cp', jar, '-Xms500m',
               '-Dkoopa.xml.include_positioning=true', 'koopa.app.cli.ToXml',
               source_file.name, output_path)

        process = subprocess.Popen(cmd,
                                   stdin=subprocess.DEVNULL,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.STDOUT,
                                   universal_newlines=True)

        stdout, stderr = process.communicate()

        # Doesn't seem to return an error exit code on parse errors...
        if process.returncode != 0 or 'Error:' in stdout:
            msg = stdout.replace(source_file.name, code_path)
            msg = msg.replace(os.path.basename(source_file.name), code_path)
            raise ParserError(msg)

        return code

    finally:
        if source_file:
            os.remove(source_file.name)
예제 #16
0
    def export(self,
               out_f=None,
               format='mp3',
               codec=None,
               bitrate=None,
               parameters=None,
               tags=None,
               id3v2_version='4'):
        """
        Export an AudioSegment to a file with given options

        out_f (string):
            Path to destination audio file

        format (string)
            Format for destination audio file. ('mp3', 'wav', 'ogg' or other ffmpeg/avconv supported files)

        codec (string)
            Codec used to encoding for the destination.

        bitrate (string)
            Bitrate used when encoding destination file. (128, 256, 312k...)

        parameters (string)
            Aditional ffmpeg/avconv parameters

        tags (dict)
            Set metadata information to destination files usually used as tags. ({title='Song Title', artist='Song Artist'})

        id3v2_version (string)
            Set ID3v2 version for tags. (default: '4')
        """
        id3v2_allowed_versions = ['3', '4']

        out_f = _fd_or_path_or_tempfile(out_f, 'wb+')
        out_f.seek(0)

        # for wav output we can just write the data directly to out_f
        if format == "wav":
            data = out_f
        else:
            data = NamedTemporaryFile(mode="wb", delete=False)

        wave_data = wave.open(data, 'wb')
        wave_data.setnchannels(self.channels)
        wave_data.setsampwidth(self.sample_width)
        wave_data.setframerate(self.frame_rate)
        # For some reason packing the wave header struct with a float in python 2
        # doesn't throw an exception
        wave_data.setnframes(int(self.frame_count()))
        wave_data.writeframesraw(self._data)
        wave_data.close()

        # for wav files, we're done (wav data is written directly to out_f)
        if format == 'wav':
            return out_f

        output = NamedTemporaryFile(mode="w+b", delete=False)

        # build converter command to export
        convertion_command = [
            self.converter,
            '-y',  # always overwrite existing files
            "-f",
            "wav",
            "-i",
            data.name,  # input options (filename last)
        ]

        if codec is None:
            codec = self.DEFAULT_CODECS.get(format, None)

        if codec is not None:
            # force audio encoder
            convertion_command.extend(["-acodec", codec])

        if bitrate is not None:
            convertion_command.extend(["-b:a", bitrate])

        if parameters is not None:
            # extend arguments with arbitrary set
            convertion_command.extend(parameters)

        if tags is not None:
            if not isinstance(tags, dict):
                raise InvalidTag("Tags must be a dictionary.")
            else:
                # Extend converter command with tags
                # print(tags)
                for key, value in tags.items():
                    convertion_command.extend(
                        ['-metadata', '{0}={1}'.format(key, value)])

                if format == 'mp3':
                    # set id3v2 tag version
                    if id3v2_version not in id3v2_allowed_versions:
                        raise InvalidID3TagVersion(
                            "id3v2_version not allowed, allowed versions: %s" %
                            id3v2_allowed_versions)
                    convertion_command.extend(
                        ["-id3v2_version", id3v2_version])

        convertion_command.extend([
            "-f",
            format,
            output.name,  # output options (filename last)
        ])

        # read stdin / write stdout
        subprocess.call(
            convertion_command,
            # make converter shut up
            stderr=open(os.devnull))

        output.seek(0)
        out_f.write(output.read())

        data.close()
        output.close()

        os.unlink(data.name)
        os.unlink(output.name)

        out_f.seek(0)
        return out_f
예제 #17
0
def fill_template(template_name,
                  context,
                  img_category='Figure',
                  output_format='odt'):
    """
    Fill a document with data and convert it to the requested format.

    Returns an absolute path to the generated file.
    """

    if not isinstance(context, Context):
        context = Context(context)

    context['output_format'] = output_format
    context['img_category'] = img_category

    source_file = find_template_file(template_name)
    source_extension = os.path.splitext(source_file)[1]
    source = zipfile.ZipFile(source_file, 'r')

    dest_file = NamedTemporaryFile(delete=False, suffix=source_extension)
    dest = zipfile.ZipFile(dest_file, 'w')

    manifest_data = ''
    for name in source.namelist():
        data = source.read(name)
        if name.endswith('.xml'):
            data = smart_str(data)

        if any(name.endswith(file) for file in ('content.xml', 'styles.xml')):
            template = Template(fix_inline_tags(data))
            data = template.render(context)
        elif name == 'META-INF/manifest.xml':
            manifest_data = data[:-20]  # Cut off the closing </manifest> tag
            continue  # We will append it at the very end
        dest.writestr(name, smart_bytes(data))

    for _, image in context.dicts[0].get(IMAGES_CONTEXT_KEY, {}).items():
        filename = os.path.basename(image.name)
        ext = os.path.splitext(filename)[1][1:]
        manifest_data += (
            '<manifest:file-entry '
            'manifest:media-type="image/%(ext)s" '
            'manifest:full-path="Pictures/%(filename)s"/>\n') % locals()
        image.open()
        dest.writestr('Pictures/%s' % filename, image.read())
        image.close()

    manifest_data += '</manifest:manifest>'
    dest.writestr('META-INF/manifest.xml', manifest_data)

    source.close()
    dest.close()

    if source_extension[1:] != output_format:
        results = Queue()
        convertor = Process(target=_convert_subprocess,
                            args=(str(dest_file.name), output_format, results))
        convertor.start()
        return results.get()
    else:
        return dest_file.name
예제 #18
0
# We fake a inventory file and let Ansible load if it's a real file.
# Just don't tell Ansible that, so we don't hurt its feelings.
inventory = """
[current]
{{ public_ip_address }}
"""

inventory_template = jinja2.Template(inventory)
rendered_inventory = inventory_template.render({
    'public_ip_address':
    '111.222.333.444'
    # and the rest of our variables
})

# Create a temporary file and write the template string to it
hosts = NamedTemporaryFile(delete=False)
hosts.write(rendered_inventory)
hosts.close()

pb = PlayBook(
    playbook='/home/ec2-user/hack/speeds/mysite/py.yaml',
    host_list=hosts.name,  # Our hosts, the rendered inventory file    
    callbacks=playbook_cb,
    runner_callbacks=runner_cb,
    stats=stats,
    # private_key_file='/path/to/key.pem'
)

results = pb.run()

# Ensure on_stats callback is called
예제 #19
0
파일: data.py 프로젝트: zonca/astropy
def get_readable_fileobj(name_or_obj, encoding=None, cache=False,
                         show_progress=True, remote_timeout=None):
    """
    Given a filename, pathlib.Path object or a readable file-like object, return a context
    manager that yields a readable file-like object.

    This supports passing filenames, URLs, and readable file-like objects,
    any of which can be compressed in gzip, bzip2 or lzma (xz) if the
    appropriate compression libraries are provided by the Python installation.

    Notes
    -----

    This function is a context manager, and should be used for example
    as::

        with get_readable_fileobj('file.dat') as f:
            contents = f.read()

    Parameters
    ----------
    name_or_obj : str or file-like object
        The filename of the file to access (if given as a string), or
        the file-like object to access.

        If a file-like object, it must be opened in binary mode.

    encoding : str, optional
        When `None` (default), returns a file-like object with a
        ``read`` method that returns `str` (``unicode``) objects, using
        `locale.getpreferredencoding` as an encoding.  This matches
        the default behavior of the built-in `open` when no ``mode``
        argument is provided.

        When ``'binary'``, returns a file-like object where its ``read``
        method returns `bytes` objects.

        When another string, it is the name of an encoding, and the
        file-like object's ``read`` method will return `str` (``unicode``)
        objects, decoded from binary using the given encoding.

    cache : bool, optional
        Whether to cache the contents of remote URLs.

    show_progress : bool, optional
        Whether to display a progress bar if the file is downloaded
        from a remote server.  Default is `True`.

    remote_timeout : float
        Timeout for remote requests in seconds (default is the configurable
        `astropy.utils.data.Conf.remote_timeout`, which is 3s by default)

    Returns
    -------
    file : readable file-like object
    """

    # close_fds is a list of file handles created by this function
    # that need to be closed.  We don't want to always just close the
    # returned file handle, because it may simply be the file handle
    # passed in.  In that case it is not the responsibility of this
    # function to close it: doing so could result in a "double close"
    # and an "invalid file descriptor" exception.
    PATH_TYPES = (str, pathlib.Path)

    close_fds = []
    delete_fds = []

    if remote_timeout is None:
        # use configfile default
        remote_timeout = conf.remote_timeout

    # Get a file object to the content
    if isinstance(name_or_obj, PATH_TYPES):
        # name_or_obj could be a Path object if pathlib is available
        name_or_obj = str(name_or_obj)

        is_url = _is_url(name_or_obj)
        if is_url:
            name_or_obj = download_file(
                name_or_obj, cache=cache, show_progress=show_progress,
                timeout=remote_timeout)
        fileobj = io.FileIO(name_or_obj, 'r')
        if is_url and not cache:
            delete_fds.append(fileobj)
        close_fds.append(fileobj)
    else:
        fileobj = name_or_obj

    # Check if the file object supports random access, and if not,
    # then wrap it in a BytesIO buffer.  It would be nicer to use a
    # BufferedReader to avoid reading loading the whole file first,
    # but that is not compatible with streams or urllib2.urlopen
    # objects on Python 2.x.
    if not hasattr(fileobj, 'seek'):
        fileobj = io.BytesIO(fileobj.read())

    # Now read enough bytes to look at signature
    signature = fileobj.read(4)
    fileobj.seek(0)

    if signature[:3] == b'\x1f\x8b\x08':  # gzip
        import struct
        try:
            import gzip
            fileobj_new = gzip.GzipFile(fileobj=fileobj, mode='rb')
            fileobj_new.read(1)  # need to check that the file is really gzip
        except (OSError, EOFError, struct.error):  # invalid gzip file
            fileobj.seek(0)
            fileobj_new.close()
        else:
            fileobj_new.seek(0)
            fileobj = fileobj_new
    elif signature[:3] == b'BZh':  # bzip2
        try:
            import bz2
        except ImportError:
            for fd in close_fds:
                fd.close()
            raise ValueError(
                ".bz2 format files are not supported since the Python "
                "interpreter does not include the bz2 module")
        try:
            # bz2.BZ2File does not support file objects, only filenames, so we
            # need to write the data to a temporary file
            with NamedTemporaryFile("wb", delete=False) as tmp:
                tmp.write(fileobj.read())
                tmp.close()
                fileobj_new = bz2.BZ2File(tmp.name, mode='rb')
            fileobj_new.read(1)  # need to check that the file is really bzip2
        except OSError:  # invalid bzip2 file
            fileobj.seek(0)
            fileobj_new.close()
            # raise
        else:
            fileobj_new.seek(0)
            close_fds.append(fileobj_new)
            fileobj = fileobj_new
    elif signature[:3] == b'\xfd7z':  # xz
        try:
            import lzma
            fileobj_new = lzma.LZMAFile(fileobj, mode='rb')
            fileobj_new.read(1)  # need to check that the file is really xz
        except ImportError:
            for fd in close_fds:
                fd.close()
            raise ValueError(
                ".xz format files are not supported since the Python "
                "interpreter does not include the lzma module.")
        except (OSError, EOFError) as e:  # invalid xz file
            fileobj.seek(0)
            fileobj_new.close()
            # should we propagate this to the caller to signal bad content?
            # raise ValueError(e)
        else:
            fileobj_new.seek(0)
            fileobj = fileobj_new

    # By this point, we have a file, io.FileIO, gzip.GzipFile, bz2.BZ2File
    # or lzma.LZMAFile instance opened in binary mode (that is, read
    # returns bytes).  Now we need to, if requested, wrap it in a
    # io.TextIOWrapper so read will return unicode based on the
    # encoding parameter.

    needs_textio_wrapper = encoding != 'binary'

    if needs_textio_wrapper:
        # A bz2.BZ2File can not be wrapped by a TextIOWrapper,
        # so we decompress it to a temporary file and then
        # return a handle to that.
        try:
            import bz2
        except ImportError:
            pass
        else:
            if isinstance(fileobj, bz2.BZ2File):
                tmp = NamedTemporaryFile("wb", delete=False)
                data = fileobj.read()
                tmp.write(data)
                tmp.close()
                delete_fds.append(tmp)

                fileobj = io.FileIO(tmp.name, 'r')
                close_fds.append(fileobj)

        fileobj = io.BufferedReader(fileobj)
        fileobj = io.TextIOWrapper(fileobj, encoding=encoding)

        # Ensure that file is at the start - io.FileIO will for
        # example not always be at the start:
        # >>> import io
        # >>> f = open('test.fits', 'rb')
        # >>> f.read(4)
        # 'SIMP'
        # >>> f.seek(0)
        # >>> fileobj = io.FileIO(f.fileno())
        # >>> fileobj.tell()
        # 4096L

        fileobj.seek(0)

    try:
        yield fileobj
    finally:
        for fd in close_fds:
            fd.close()
        for fd in delete_fds:
            os.remove(fd.name)
예제 #20
0
def decode_history_file(wal_info, comp_manager):
    """
    Read an history file and parse its contents.

    Each line in the file represents a timeline switch, each field is
    separated by tab, empty lines are ignored and lines starting with '#'
    are comments.

    Each line is composed by three fields: parentTLI, switchpoint and reason.
    "parentTLI" is the ID of the parent timeline.
    "switchpoint" is the WAL position where the switch happened
    "reason" is an human-readable explanation of why the timeline was changed

    The method requires a CompressionManager object to handle the eventual
     compression of the history file.

    :param barman.infofile.WalFileInfo wal_info: history file obj
    :param comp_manager: compression manager used in case
        of history file compression
    :return List[HistoryFileData]: information from the history file
    """

    path = wal_info.orig_filename
    # Decompress the file if needed
    if wal_info.compression:
        # Use a NamedTemporaryFile to avoid explicit cleanup
        uncompressed_file = NamedTemporaryFile(dir=os.path.dirname(path),
                                               prefix='.%s.' % wal_info.name,
                                               suffix='.uncompressed')
        path = uncompressed_file.name
        comp_manager.get_compressor(wal_info.compression).decompress(
            wal_info.orig_filename, path)

    # Extract the timeline from history file name
    tli, _, _ = decode_segment_name(wal_info.name)

    lines = []
    with open(path) as fp:
        for line in fp:
            line = line.strip()
            # Skip comments and empty lines
            if line.startswith("#"):
                continue
            # Skip comments and empty lines
            if len(line) == 0:
                continue
            # Use tab as separator
            contents = line.split('\t')
            if len(contents) != 3:
                # Invalid content of the line
                raise BadHistoryFileContents(path)

            history = HistoryFileData(tli=tli,
                                      parent_tli=int(contents[0]),
                                      switchpoint=parse_lsn(contents[1]),
                                      reason=contents[2])
            lines.append(history)

    # Empty history file or containing invalid content
    if len(lines) == 0:
        raise BadHistoryFileContents(path)
    else:
        return lines
    def from_file_using_temporary_files(cls, file, format=None, codec=None, parameters=None, **kwargs):
        orig_file = file
        file, close_file = _fd_or_path_or_tempfile(file, 'rb', tempfile=False)

        if format:
            format = format.lower()
            format = AUDIO_FILE_EXT_ALIASES.get(format, format)

        def is_format(f):
            f = f.lower()
            if format == f:
                return True
            if isinstance(orig_file, basestring):
                return orig_file.lower().endswith(".{0}".format(f))
            if isinstance(orig_file, bytes):
                return orig_file.lower().endswith((".{0}".format(f)).encode('utf8'))
            return False

        if is_format("wav"):
            try:
                obj = cls._from_safe_wav(file)
                if close_file:
                    file.close()
                return obj
            except:
                file.seek(0)
        elif is_format("raw") or is_format("pcm"):
            sample_width = kwargs['sample_width']
            frame_rate = kwargs['frame_rate']
            channels = kwargs['channels']
            metadata = {
                'sample_width': sample_width,
                'frame_rate': frame_rate,
                'channels': channels,
                'frame_width': channels * sample_width
            }
            obj = cls(data=file.read(), metadata=metadata)
            if close_file:
                file.close()
            return obj

        input_file = NamedTemporaryFile(mode='wb', delete=False)
        try:
            input_file.write(file.read())
        except(OSError):
            input_file.flush()
            input_file.close()
            input_file = NamedTemporaryFile(mode='wb', delete=False, buffering=2 ** 31 - 1)
            if close_file:
                file.close()
            close_file = True
            file = open(orig_file, buffering=2 ** 13 - 1, mode='rb')
            reader = file.read(2 ** 31 - 1)
            while reader:
                input_file.write(reader)
                reader = file.read(2 ** 31 - 1)
        input_file.flush()
        if close_file:
            file.close()

        output = NamedTemporaryFile(mode="rb", delete=False)

        conversion_command = [cls.converter,
                              '-y',  # always overwrite existing files
                              ]

        # If format is not defined
        # ffmpeg/avconv will detect it automatically
        if format:
            conversion_command += ["-f", format]

        if codec:
            # force audio decoder
            conversion_command += ["-acodec", codec]

        conversion_command += [
            "-i", input_file.name,  # input_file options (filename last)
            "-vn",  # Drop any video streams if there are any
            "-f", "wav",  # output options (filename last)
            output.name
        ]

        if parameters is not None:
            # extend arguments with arbitrary set
            conversion_command.extend(parameters)

        log_conversion(conversion_command)

        with open(os.devnull, 'rb') as devnull:
            p = subprocess.Popen(conversion_command, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        p_out, p_err = p.communicate()

        log_subprocess_output(p_out)
        log_subprocess_output(p_err)

        try:
            if p.returncode != 0:
                raise CouldntDecodeError(
                    "Decoding failed. ffmpeg returned error code: {0}\n\nOutput from ffmpeg/avlib:\n\n{1}".format(
                        p.returncode, p_err))
            obj = cls._from_safe_wav(output)
        finally:
            input_file.close()
            output.close()
            os.unlink(input_file.name)
            os.unlink(output.name)

        return obj
예제 #22
0
def import_file(uuid, refresh=False, file_size=0):
    """Download or copy file specified by UUID.
    :param refresh: Flag for forcing update of the file.
    :type refresh: bool.
    :param file_size: size of the remote file.
    :type file_size: int.
    :returns: FileStoreItem UUID or None if importing failed.

    """
    logger.debug("Importing FileStoreItem with UUID '%s'", uuid)

    item = FileStoreItem.objects.get_item(uuid=uuid)
    if not item:
        logger.error("FileStoreItem with UUID '%s' not found", uuid)
        return None

    # save task ID for looking up file import status
    item.import_task_id = import_file.request.id
    item.save()

    # if file is ready to be used then return it,
    # otherwise delete it if update is requested
    if item.is_local():
        if refresh:
            item.delete_datafile()
        else:
            logger.info("File already exists: '%s'", item.get_absolute_path())
            return item.uuid

    # start the transfer
    if os.path.isabs(item.source):
        try:
            with open(item.source, 'r') as f:
                # TODO: copy file in chunks to display progress report
                # model is saved by default if FileField.save() is called
                item.datafile.save(os.path.basename(item.source), File(f))
        except IOError:
            logger.error("Could not open file: %s", item.source)
            return None
        if item.source.startswith(settings.REFINERY_DATA_IMPORT_DIR):
            try:
                os.unlink(item.source)
            except IOError:
                logger.error("Could not delete uploaded source file '%s'",
                             item.source)
        logger.info("File copied from '%s'", item.source)
    elif item.source.startswith('s3://'):
        bucket_name, key = parse_s3_url(item.source)
        s3 = boto3.resource('s3')
        uploaded_object = s3.Object(bucket_name, key)
        with NamedTemporaryFile(dir=get_temp_dir()) as download:
            logger.debug("Downloading file from '%s'", item.source)
            try:
                uploaded_object.download_fileobj(download)
            except botocore.exceptions.ClientError:
                logger.error("Failed to download '%s'", item.source)
                import_file.update_state(state=celery.states.FAILURE,
                                         meta='Failed to import uploaded file')
                return None
            logger.debug("Saving downloaded file '%s'", download.name)
            item.datafile.save(os.path.basename(key), File(download))
            logger.debug("Saved downloaded file to '%s'", item.datafile.name)
        try:
            s3.Object(bucket_name, key).delete()
        except botocore.exceptions.ClientError:
            logger.error("Failed to delete '%s'", item.source)
    else:  # assume that source is a regular URL
        # check if source file can be downloaded
        try:
            response = requests.get(item.source, stream=True)
            response.raise_for_status()
        except HTTPError as exc:
            logger.error("Could not open URL '%s': '%s'", item.source, exc)
            import_file.update_state(state=celery.states.FAILURE,
                                     meta='Analysis failed during file import')
            # ignore the task so no other state is recorded
            # http://stackoverflow.com/a/33143545
            raise celery.exceptions.Ignore()
        # FIXME: When importing a tabular file into Refinery, there is a
        # dependence on this ConnectionError below returning `None`!!!!
        except (ConnectionError, ValueError) as exc:
            logger.error("Could not open URL '%s': '%s'", item.source, exc)
            return None

        with NamedTemporaryFile(dir=get_temp_dir(), delete=False) as tmpfile:
            # provide a default value in case Content-Length is missing
            remote_file_size = int(
                response.headers.get('Content-Length', file_size))
            logger.debug("Downloading from '%s'", item.source)
            # download and save the file
            import_failure = False
            local_file_size = 0
            block_size = 10 * 1024 * 1024  # bytes
            try:
                for buf in response.iter_content(block_size):
                    local_file_size += len(buf)

                    try:
                        tmpfile.write(buf)
                    except IOError as exc:
                        # e.g., [Errno 28] No space left on device
                        logger.error("Error downloading from '%s': %s",
                                     item.source, exc)
                        import_failure = True
                        break

                    # check if we have a sane value for file size
                    if remote_file_size > 0:
                        percent_done = \
                            local_file_size * 100. / remote_file_size
                    else:
                        percent_done = 0

                    import_file.update_state(state="PROGRESS",
                                             meta={
                                                 "percent_done":
                                                 "{:.0f}".format(percent_done),
                                                 "current":
                                                 local_file_size,
                                                 "total":
                                                 remote_file_size
                                             })
            except ContentDecodingError as e:
                logger.error("Error while decoding response content:%s" % e)
                import_failure = True

            if import_failure:
                # delete temp. file if download failed
                logger.error(
                    "File import task has failed. Deleting temporary file...")
                tmpfile.delete = True
                import_file.update_state(
                    state=celery.states.FAILURE,
                    meta='Analysis Failed during import_file subtask')
                # ignore the task so no other state is recorded
                # See: http://stackoverflow.com/a/33143545
                raise celery.exceptions.Ignore()

        logger.debug("Finished downloading from '%s'", item.source)

        # get the file name from URL (remove query string)
        u = urlparse.urlparse(item.source)
        src_file_name = os.path.basename(u.path)
        # construct destination path based on source file name
        rel_dst_path = item.datafile.storage.get_available_name(
            file_path(item, src_file_name))
        abs_dst_path = os.path.join(settings.FILE_STORE_BASE_DIR, rel_dst_path)
        # move the temp file into the file store
        try:
            if not os.path.exists(os.path.dirname(abs_dst_path)):
                os.makedirs(os.path.dirname(abs_dst_path))
            os.rename(tmpfile.name, abs_dst_path)
        except OSError as e:
            logger.error(
                "Error moving temp file into the file store. "
                "OSError: %s, file name: %s, error: %s", e.errno, e.filename,
                e.strerror)
            return False
        # temp file is only accessible by the owner by default which prevents
        # access by the web server if it is running as it's own user
        try:
            mode = os.stat(abs_dst_path).st_mode
            os.chmod(abs_dst_path,
                     mode | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH)
        except OSError as e:
            logger.error("Failed changing permissions on %s", abs_dst_path)
            logger.error("OSError: %s, file name %s, error: %s", e.errno,
                         e.filename, e.strerror)

        # assign new path to datafile
        item.datafile.name = rel_dst_path
        # save the model instance
        item.save()

    return item.uuid
예제 #23
0
    def test_split_mates(self):
        'It tests the detection of oligos in sequence files'

        mate_fhand = NamedTemporaryFile(suffix='.fasta')
        linker = TITANIUM_LINKER

        # a complete linker
        seq5 = 'CTAGTCTAGTCGTAGTCATGGCTGTAGTCTAGTCTACGATTCGTATCAGTTGTGTGAC'
        seq3 = 'ATCGATCATGTTGTATTGTGTACTATACACACACGTAGGTCGACTATCGTAGCTAGT'

        mate_fhand.write('>seq1\n' + seq5 + linker + seq3 + '\n')
        # no linker
        mate_fhand.write('>seq2\n' + seq5 + '\n')
        # a partial linker
        mate_fhand.write('>seq3\n' + seq5 + linker[2:25] + seq3 + '\n')
        # the linker is 5 prima
        mate_fhand.write('>seq4\n' + linker[10:] + seq3 + '\n')
        # two linkers
        mate_fhand.write('>seq5\n' + linker + seq3 + FLX_LINKER + seq5 + '\n')
        # reverse linker
        rev_linker = get_setting('TITANIUM_LINKER_REV')
        mate_fhand.write('>seq6\n' + seq5 + rev_linker + seq3 + '\n')
        mate_fhand.flush()

        splitter = MatePairSplitter()
        new_seqs = []
        for packet in read_seq_packets([mate_fhand], 2):
            new_seqs.append(splitter(packet))

        out_fhand = StringIO()
        write_seq_packets(out_fhand, new_seqs, file_format='fasta')

        result = out_fhand.getvalue()
        xpect = r'>seq1\1'
        xpect += '\n'
        xpect += 'CTAGTCTAGTCGTAGTCATGGCTGTAGTCTAGTCTACGATTCGTATCAGTTGTGTGAC\n'
        xpect += r'>seq1\2'
        xpect += '\n'
        xpect += 'ATCGATCATGTTGTATTGTGTACTATACACACACGTAGGTCGACTATCGTAGCTAGT\n'
        xpect += '>seq2\n'
        xpect += 'CTAGTCTAGTCGTAGTCATGGCTGTAGTCTAGTCTACGATTCGTATCAGTTGTGTGAC\n'
        xpect += r'>seq3_pl\1'
        xpect += '\n'
        xpect += 'CTAGTCTAGTCGTAGTCATGGCTGTAGTCTAGTCTACGATTCGTATCAGTTGTGTG\n'
        xpect += r'>seq3_pl\2'
        xpect += '\n'
        xpect += 'GTGTACTATACACACACGTAGGTCGACTATCGTAGCTAGT\n'
        xpect += '>seq4\n'
        xpect += 'ATCGATCATGTTGTATTGTGTACTATACACACACGTAGGTCGACTATCGTAGCTAGT\n'
        xpect += '>seq5_mlc.part1\n'
        xpect += 'TCGTATAACTTCGTATAATGTATGCTATACGAAGTTATTACGATCGATCATGTTGTAT'
        xpect += 'TG'
        xpect += 'TGTACTATACACACACGTAGGTCGACTATCGTAGCTAGT\n'
        xpect += '>seq5_mlc.part2\n'
        xpect += 'ACCTAGTCTAGTCGTAGTCATGGCTGTAGTCTAGTCTACGATTCGTATCAGTTGTGTGAC'
        xpect += '\n'
        xpect += r'>seq6\1'
        xpect += '\n'
        xpect += 'CTAGTCTAGTCGTAGTCATGGCTGTAGTCTAGTCTACGATTCGTATCAGTTGTGTGAC\n'
        xpect += r'>seq6\2'
        xpect += '\n'
        xpect += 'ATCGATCATGTTGTATTGTGTACTATACACACACGTAGGTCGACTATCGTAGCTAGT\n'
        assert xpect == result

        # with short linker in 3 prima
        mate_fhand = NamedTemporaryFile(suffix='.fasta')
        seq = ">seq1\nCATCAATGACATCACAAATGACATCAACAAACTCAAA"
        seq += "CTCACATACACTGCTGTACCGTAC"
        mate_fhand.write(seq)
        mate_fhand.flush()
        splitter = MatePairSplitter()
        new_seqs = []
        for packet in read_seq_packets([mate_fhand], 1):
            new_seqs.append(splitter(packet))
        out_fhand = StringIO()
        write_seq_packets(out_fhand, new_seqs, file_format='fasta')
        result = ">seq1\nCATCAATGACATCACAAATGACATCAACAAACTCAAACTCACATACA\n"
        assert result == out_fhand.getvalue()
예제 #24
0
    def get_output(self):
        '''
        Execute a command through system shell. First checks to see if
        the requested command is executable. Returns (returncode, stdout, 0)
        '''
        if self.is_hostname:
            # short circuit for hostame with internal method
            return determine_hostname()

        # all commands should timeout after a long interval so the client does not hang
        # prepend native nix 'timeout' implementation
        timeout_command = 'timeout -s KILL %s %s' % (self.config.cmd_timeout,
                                                     self.command)

        # ensure consistent locale for collected command output
        cmd_env = {'LC_ALL': 'C'}
        args = shlex.split(timeout_command)

        # never execute this stuff
        if set.intersection(set(args), constants.command_blacklist):
            raise RuntimeError("Command Blacklist: " + self.command)

        try:
            logger.debug('Executing: %s', args)
            proc0 = Popen(args,
                          shell=False,
                          stdout=PIPE,
                          stderr=STDOUT,
                          bufsize=-1,
                          env=cmd_env,
                          close_fds=True)
        except OSError as err:
            if err.errno == errno.ENOENT:
                logger.debug('Command %s not found', self.command)
                return
            else:
                raise err

        dirty = False

        cmd = "sed -rf " + constants.default_sed_file
        sedcmd = Popen(shlex.split(cmd), stdin=proc0.stdout, stdout=PIPE)
        proc0.stdout.close()
        proc0 = sedcmd

        if self.exclude is not None:
            exclude_file = NamedTemporaryFile()
            exclude_file.write("\n".join(self.exclude).encode('utf-8'))
            exclude_file.flush()
            cmd = "grep -F -v -f %s" % exclude_file.name
            proc1 = Popen(shlex.split(cmd), stdin=proc0.stdout, stdout=PIPE)
            proc0.stdout.close()
            stderr = None
            if self.pattern is None or len(self.pattern) == 0:
                stdout, stderr = proc1.communicate()

            # always log return codes for debug
            logger.debug('Proc1 Status: %s', proc1.returncode)
            logger.debug('Proc1 stderr: %s', stderr)
            proc0 = proc1

            dirty = True

        if self.pattern is not None and len(self.pattern):
            pattern_file = NamedTemporaryFile()
            pattern_file.write("\n".join(self.pattern).encode('utf-8'))
            pattern_file.flush()
            cmd = "grep -F -f %s" % pattern_file.name
            proc2 = Popen(shlex.split(cmd), stdin=proc0.stdout, stdout=PIPE)
            proc0.stdout.close()
            stdout, stderr = proc2.communicate()

            # always log return codes for debug
            logger.debug('Proc2 Status: %s', proc2.returncode)
            logger.debug('Proc2 stderr: %s', stderr)
            proc0 = proc2

            dirty = True

        if not dirty:
            stdout, stderr = proc0.communicate()

        # Required hack while we still pass shell=True to Popen; a Popen
        # call with shell=False for a non-existant binary will raise OSError.
        if proc0.returncode == 126 or proc0.returncode == 127:
            stdout = "Could not find cmd: %s", self.command

        logger.debug("Proc0 Status: %s", proc0.returncode)
        logger.debug("Proc0 stderr: %s", stderr)
        return stdout.decode('utf-8', 'ignore').strip()
예제 #25
0
 def set_stdout(self):
     self.stdout = NamedTemporaryFile('w', delete=False, prefix='astevaltest')
     self.interp.writer = self.stdout
예제 #26
0
    tn.close()
    return errors


if __name__ == '__main__':
    for zn in SRC_FILES:
        p = parse_zipname(zn)
        destpath = DEST_DIR.joinpath(p['year'], p['name'] + '.csv')

        if destpath.exists():
            print("Skipping: {}; this already exists: {}".format(zn, destpath))
            continue

        print("Opening:", zn)
        dbytes = extract_dbf_bytes(zn)
        if not dbytes:
            # some zip files, such as pa_nhs national files, do not have a DBF
            continue

        tn = NamedTemporaryFile('wb')
        tn.write(dbytes)
        records, fields, rowcount = extract_records(tn.name)

        print("Writing", rowcount, "records to:", destpath)

        errors = write_records_to_csv(destpath, records, fields, rowcount)
        if errors:
            print("\tErrors found:", len(errors))
            for e in errors:
                stderr.write(e + "\n")
예제 #27
0
 def test_invalid_root_dir(self):
     with NamedTemporaryFile() as tf:
         self.assertRaises(TraitError,
                           FileContentsManager,
                           root_dir=tf.name)
예제 #28
0
    def decode_msg(self,
                   primitive: P_DATA,
                   assoc: Optional["Association"] = None) -> bool:
        """Converts P-DATA primitives into a ``DIMSEMessage`` sub-class.

        Decodes the data from the P-DATA service primitive (which
        may contain the results of one or more P-DATA-TF PDUs) into the
        :attr:`~DIMSEMessage.command_set` and :attr:`~DIMSEMessage.data_set`
        attributes. Also sets the :attr:`~DIMSEMessage.context_id` and
        :attr:`~DIMSEMessage.encoded_command_set` attributes of the
        ``DIMSEMessage`` sub-class object.

        Parameters
        ----------
        primitive : pdu_primitives.P_DATA
            The P-DATA service primitive to be decoded into a DIMSE message.
        assoc : association.Association, optional
            The association processing the message. This is required when:

            * :attr:`~pynetdicom._config.STORE_RECV_CHUNKED_DATASET` is
              ``True``
            * The P-DATA primitive contains part of a C-STORE-RQ message

            In this case the association is consulted for its accepted
            transfer syntax, which is included in the File Meta Information
            of the stored dataset.

        Returns
        -------
        bool
            ``True`` when the DIMSE message is completely decoded, ``False``
            otherwise.

        References
        ----------

        * DICOM Standard, Part 8, :dcm:`Annex E<part08/chapter_E.html>`
        """
        # Make sure this is a P-DATA primitive
        if primitive.__class__ != P_DATA or primitive is None:
            return False

        for (context_id, data) in primitive.presentation_data_value_list:

            # The first byte of the P-DATA is the Message Control Header
            #   See Part 8, Annex E.2
            # The standard says that only the significant bits (ie the last
            #   two) should be checked
            # xxxxxx00 - Message Dataset information, not the last fragment
            # xxxxxx01 - Command information, not the last fragment
            # xxxxxx10 - Message Dataset information, the last fragment
            # xxxxxx11 - Command information, the last fragment
            control_header_byte = data[0]

            # LOGGER.debug('Control header byte %s', control_header_byte)
            # print(f'Control header byte {control_header_byte}')

            # COMMAND SET
            # P-DATA fragment contains Command Set information
            #   (control_header_byte is xxxxxx01 or xxxxxx11)
            if control_header_byte & 1:
                # The command set may be spread out over a number
                #   of fragments and P-DATA primitives and we need to remember
                #   the elements from previous fragments, hence the
                #   encoded_command_set class attribute
                # This adds all the command set data to the class object
                self.encoded_command_set.write(data[1:])

                # The final command set fragment (xxxxxx11) has been added
                #   so decode the command set
                if control_header_byte & 2:
                    # Presentation Context ID
                    #   Set this now as must only be one final command set
                    #   fragment and command set must always be present
                    self.context_id = context_id

                    # Command Set is always encoded Implicit VR Little Endian
                    #   decode(dataset, is_implicit_VR, is_little_endian)
                    # pylint: disable=attribute-defined-outside-init
                    self.command_set = decode(self.encoded_command_set, True,
                                              True)

                    # Determine which DIMSE Message class to use
                    self.__class__ = (_MESSAGE_TYPES[cast(
                        int, self.command_set.CommandField)][1])

                    # Determine if a Data Set is present by checking for
                    #   (0000, 0800) CommandDataSetType US 1. If the value is
                    #   0x0101 no dataset present, otherwise one is.
                    if self.command_set.CommandDataSetType == 0x0101:
                        # By returning True we're indicating that the message
                        #   has been completely decoded
                        return True

                    # Data Set is present
                    if (_config.STORE_RECV_CHUNKED_DATASET
                            and isinstance(self, C_STORE_RQ)):
                        # delete=False is a workaround for Windows
                        # Setting delete=True prevents us from re-opening
                        # the file after it is opened by NamedTemporaryFile
                        # below.
                        self._data_set_file = cast(
                            "NTF",
                            NamedTemporaryFile(delete=False,
                                               mode="wb",
                                               suffix=".dcm"))
                        self._data_set_path = Path(self._data_set_file.name)
                        # Write the File Meta
                        self._data_set_file.write(b'\x00' * 128)
                        self._data_set_file.write(b'DICM')

                        cs = self.command_set
                        cx = cast("Association",
                                  assoc)._accepted_cx[context_id]
                        sop_class = cast(UID, cs.AffectedSOPClassUID)
                        sop_instance = cast(UID, cs.AffectedSOPInstanceUID)
                        write_file_meta_info(
                            self._data_set_file,  # type: ignore
                            create_file_meta(
                                sop_class_uid=sop_class,
                                sop_instance_uid=sop_instance,
                                transfer_syntax=cx.transfer_syntax[0]))

            # DATA SET
            # P-DATA fragment contains Data Set information
            #   (control_header_byte is xxxxxx00 or xxxxxx10)
            else:
                # As with the command set, the data set may be spread over
                #   a number of fragments in each P-DATA primitive and a
                #   number of P-DATA primitives.
                if self._data_set_file:
                    self._data_set_file.write(data[1:])
                else:
                    cast(BytesIO, self.data_set).write(data[1:])

                # The final data set fragment (xxxxxx10) has been added
                if control_header_byte & 2 != 0:
                    # By returning True we're indicating that the message
                    #   has been completely decoded
                    return True

        # We return False to indicate that the message isn't yet fully decoded
        return False
예제 #29
0
# This is a config file for testing purposes, it creates a temporary
# file for the database.

from tempfile import NamedTemporaryFile

TITLE = "elogy"

SECRET_KEY = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'

DEBUG = False

# The name of the database file
with NamedTemporaryFile(delete=False) as f:
    DATABASE = f.name

# The folder where all uploaded files will be stored.
UPLOAD_FOLDER = '/tmp/test_elogy'

# Don't change anything below this line unless you know what you're doing!
# ------------------------------------------------------------------------

DATABASE = {
    # Note: Currently *only* works with sqlite!
    "name": DATABASE,
    "engine": "playhouse.sqlite_ext.SqliteExtDatabase",
    "threadlocals": True,
    "journal_mode": "WAL"
}
예제 #30
0
def render_mm(self, code, options, format, prefix='mermaid'):
    """Render mermaid code into a PNG or PDF output file."""

    if format == 'raw':
        format = 'png'

    mermaid_cmd = self.builder.config.mermaid_cmd
    verbose = self.builder.config.mermaid_verbose
    hashkey = (
        code + str(options) +
        str(self.builder.config.mermaid_sequence_config)).encode('utf-8')

    basename = '%s-%s' % (prefix, sha1(hashkey).hexdigest())
    fname = '%s.%s' % (basename, format)
    relfn = posixpath.join(self.builder.imgpath, fname)
    outdir = path.join(self.builder.outdir, self.builder.imagedir)
    outfn = path.join(outdir, fname)
    tmpfn = path.join(_get_default_tempdir(), basename)

    if path.isfile(outfn):
        return relfn, outfn

    ensuredir(path.dirname(outfn))

    # mermaid expects UTF-8 by default
    if isinstance(code, text_type):
        code = code.encode('utf-8')

    with open(tmpfn, 'wb') as t:
        t.write(code)

    mm_args = [mermaid_cmd, tmpfn, '-o', outdir]
    if verbose:
        mm_args.extend(['-v'])
    if self.builder.config.mermaid_phantom_path:
        mm_args.extend(
            ['--phantomPath', self.builder.config.mermaid_phantom_path])
    if self.builder.config.mermaid_sequence_config:
        with NamedTemporaryFile(delete=False) as seq:
            json.dump(self.builder.config.mermaid_sequence_config, seq)
        mm_args.extend(['--sequenceConfig', seq.name])
    if format == 'png':
        mm_args.extend(['-p'])
    else:
        mm_args.extend(['-s'])
        self.builder.warn('Mermaid SVG support is experimental')
    try:
        p = Popen(mm_args, stdout=PIPE, stdin=PIPE, stderr=PIPE)
    except OSError as err:
        if err.errno != ENOENT:  # No such file or directory
            raise
        self.builder.warn('command %r cannot be run (needed for mermaid '
                          'output), check the mermaid_cmd setting' %
                          mermaid_cmd)
        return None, None

    stdout, stderr = p.communicate(code)
    if verbose:
        self.builder.info(stdout)

    if p.returncode != 0:
        raise MermaidError('Mermaid exited with error:\n[stderr]\n%s\n'
                           '[stdout]\n%s' % (stderr, stdout))
    if not path.isfile(outfn):
        raise MermaidError(
            'Mermaid did not produce an output file:\n[stderr]\n%s\n'
            '[stdout]\n%s' % (stderr, stdout))
    return relfn, outfn