Пример #1
0
 def test_makefile_timeout_fires(self):
     # This is convoluted because (openssl s_server -www) starts
     # writing the response as soon as it receives the first line of
     # the request, so it's possible for it to send the response
     # before the request is sent and there would be no timeout.  So,
     # let the server spend time reading from an empty pipe
     FIFO_NAME = 'test_makefile_timeout_fires_fifo'  # noqa
     os.mkfifo('tests/' + FIFO_NAME)
     pipe_pid = os.fork()
     try:
         if pipe_pid == 0:
             try:
                 with open('tests/' + FIFO_NAME, 'w') as f:
                     time.sleep(sleepTime + 1)
                     f.write('Content\n')
             finally:
                 os._exit(0)
         self.args[self.args.index('-www')] = '-WWW'
         pid = self.start_server(self.args)
         try:
             c = httpslib.HTTPSConnection(srv_host, self.srv_port)
             c.putrequest('GET', '/' + FIFO_NAME)
             c.putheader('Accept', 'text/html')
             c.putheader('Accept', 'text/plain')
             c.endheaders()
             c.sock.settimeout(0.0000000001)
             with self.assertRaises(socket.timeout):
                 c.getresponse()
             c.close()
         finally:
             self.stop_server(pid)
     finally:
         os.kill(pipe_pid, signal.SIGTERM)
         os.waitpid(pipe_pid, 0)
         os.unlink('tests/' + FIFO_NAME)
Пример #2
0
 def run(self):
     if not os.path.exists(ReplayPipeName):
         os.mkfifo(ReplayPipeName)
     writePipe = open(ReplayPipeName, 'w')
     while True:
         msg = self.replayQueue.get()
         writeToPipe(writePipe, msg)
Пример #3
0
    def extract_item(self, item, restore_attrs=True, dry_run=False):
        if dry_run:
            if b'chunks' in item:
                for _ in self.pipeline.fetch_many([c[0] for c in item[b'chunks']], is_preloaded=True):
                    pass
            return

        dest = self.cwd
        if item[b'path'].startswith('/') or item[b'path'].startswith('..'):
            raise Exception('Path should be relative and local')
        path = os.path.join(dest, item[b'path'])
        # Attempt to remove existing files, ignore errors on failure
        try:
            st = os.lstat(path)
            if stat.S_ISDIR(st.st_mode):
                os.rmdir(path)
            else:
                os.unlink(path)
        except OSError:
            pass
        mode = item[b'mode']
        if stat.S_ISDIR(mode):
            if not os.path.exists(path):
                os.makedirs(path)
            if restore_attrs:
                self.restore_attrs(path, item)
        elif stat.S_ISREG(mode):
            if not os.path.exists(os.path.dirname(path)):
                os.makedirs(os.path.dirname(path))
            # Hard link?
            if b'source' in item:
                source = os.path.join(dest, item[b'source'])
                if os.path.exists(path):
                    os.unlink(path)
                os.link(source, path)
            else:
                with open(path, 'wb') as fd:
                    ids = [c[0] for c in item[b'chunks']]
                    for data in self.pipeline.fetch_many(ids, is_preloaded=True):
                        fd.write(data)
                    fd.flush()
                    self.restore_attrs(path, item, fd=fd.fileno())
        elif stat.S_ISFIFO(mode):
            if not os.path.exists(os.path.dirname(path)):
                os.makedirs(os.path.dirname(path))
            os.mkfifo(path)
            self.restore_attrs(path, item)
        elif stat.S_ISLNK(mode):
            if not os.path.exists(os.path.dirname(path)):
                os.makedirs(os.path.dirname(path))
            source = item[b'source']
            if os.path.exists(path):
                os.unlink(path)
            os.symlink(source, path)
            self.restore_attrs(path, item, symlink=True)
        elif stat.S_ISCHR(mode) or stat.S_ISBLK(mode):
            os.mknod(path, item[b'mode'], item[b'rdev'])
            self.restore_attrs(path, item)
        else:
            raise Exception('Unknown archive item type %r' % item[b'mode'])
Пример #4
0
    def __init__(self, model, serverpath=None):
        if serverpath == None:
            serverpath = os.path.join(os.environ['GOPATH'],
                                      'bin', 'semanticizest')
        d = mkdtemp(prefix='semanticizest-py')
        try:
            portfifo = os.path.join(d, 'portfifo')
            os.mkfifo(portfifo)

            args = [serverpath, '--http=:0', '--portfile=' + portfifo, model]
            # TODO start a thread that consumes stderr and acts on it.
            proc = subprocess.Popen(args)

            try:
                with open(portfifo) as f:
                    port = int(f.read().strip())
            except:
                proc.terminate()
                raise

            self._proc = proc
            self.url = 'http://localhost:%d' % port

        finally:
            rmtree(d, ignore_errors=True)
Пример #5
0
    def _send_and_wait(self, title, *args):
        """Send a message to the autoserv and wait for it to signal
        completion.

        @param title: An alphanumeric string to title the message.
        @param *args: Additional arbitrary alphanumeric arguments to pass
                to the server.
        """
        # create a named pipe for us to receive a signal on
        fifo_dir = autotemp.tempdir(suffix='-fifo', unique_id='harness',
                                    dir=self.job.tmpdir)
        try:
            fifo_path = os.path.join(fifo_dir.name, 'autoserv.fifo')
            os.mkfifo(fifo_path)

            # send signal to the server as title[:args]:path
            msg = ':'.join([title] + list(args) + [fifo_path]) + '\n'
            self.status.write(msg)

            # wait for the server to signal back to us
            fifo = open(fifo_path)
            fifo.read(1)
            fifo.close()
        finally:
            fifo_dir.clean()
Пример #6
0
def test_normalize_by_median_streaming_0():
    CUTOFF = '20'

    infile = utils.get_test_data('100-reads.fq.gz')
    in_dir = os.path.dirname(infile)
    fifo = utils.get_temp_filename('fifo')
    outfile = utils.get_temp_filename('outfile')

    # Use a fifo to copy stdout to a file for checking
    os.mkfifo(fifo)
    thread = threading.Thread(target=write_by_chunks, args=(fifo, outfile))
    thread.start()

    # Execute diginorm
    script = 'normalize-by-median.py'
    args = ['-C', CUTOFF, '-k', '17', '-o', fifo, infile]
    utils.runscript(script, args, in_dir)

    # Merge the thread
    thread.join()

    assert os.path.exists(outfile), outfile
    with open(outfile) as fp:
        linecount = sum(1 for _ in fp)
    assert linecount == 400
Пример #7
0
    def __init__(self, fifo, wakes, qpolls):
        import stat
        if os.path.exists(fifo):
            if not stat.S_ISFIFO(os.stat(fifo).st_mode):
                CursesBase.close()
                error('%s is not a named pipe!' % fifo)
        else:
            try:
                os.mkfifo(fifo)

            except OSError:
                CursesBase.close()
                error("cannot create %s, check permissions." % fifo)

        self.events = {}

        self.fifo_opened = False
        self.fifo   = fifo
        self.wakes  = wakes
        self.qpolls = qpolls

        threading.Thread.__init__(self)
        Stoppable.__init__(self)

        self.daemon = True
 def create_test_files(self):
     """Create a minimal test case including all supported file types
     """
     # File
     self.create_regular_file('empty', size=0)
     # 2600-01-01 > 2**64 ns
     os.utime('input/empty', (19880895600, 19880895600))
     self.create_regular_file('file1', size=1024 * 80)
     self.create_regular_file('flagfile', size=1024)
     # Directory
     self.create_regular_file('dir2/file2', size=1024 * 80)
     # File owner
     os.chown('input/file1', 100, 200)
     # File mode
     os.chmod('input/file1', 0o7755)
     os.chmod('input/dir2', 0o555)
     # Block device
     os.mknod('input/bdev', 0o600 | stat.S_IFBLK,  os.makedev(10, 20))
     # Char device
     os.mknod('input/cdev', 0o600 | stat.S_IFCHR,  os.makedev(30, 40))
     # Hard link
     os.link(os.path.join(self.input_path, 'file1'),
             os.path.join(self.input_path, 'hardlink'))
     # Symlink
     os.symlink('somewhere', os.path.join(self.input_path, 'link1'))
     if xattr.is_enabled():
         xattr.setxattr(os.path.join(self.input_path, 'file1'), 'user.foo', b'bar')
         xattr.setxattr(os.path.join(self.input_path, 'link1'), 'user.foo_symlink', b'bar_symlink', follow_symlinks=False)
     # FIFO node
     os.mkfifo(os.path.join(self.input_path, 'fifo1'))
     if has_lchflags:
         os.lchflags(os.path.join(self.input_path, 'flagfile'), stat.UF_NODUMP)
Пример #9
0
 def init_fifo(self, fifo_file_name):
     self.fifo_file_name = fifo_file_name
     if os.path.exists(self.fifo_file_name):
         LOGGER.warning(self.fifo_file_name+" already exists, it should not")
         os.unlink(self.fifo_file_name)
     os.mkfifo(self.fifo_file_name)
     self.fifo = os.fdopen(os.open(self.fifo_file_name, os.O_RDONLY|os.O_NONBLOCK))
Пример #10
0
def hook_init(fm):
    try:
        # Create a FIFO.
        import os
        IPC_FIFO = "/tmp/ranger-ipc." + str(os.getpid())
        os.mkfifo(IPC_FIFO)

        # Start the reader thread.
        try:
            import thread
        except ImportError:
            import _thread as thread

        def ipc_reader(filepath):
            while True:
                with open(filepath, 'r') as fifo:
                    line = fifo.read()
                    fm.execute_console(line.strip())
        thread.start_new_thread(ipc_reader, (IPC_FIFO,))

        # Remove the FIFO on ranger exit.
        def ipc_cleanup(filepath):
            try:
                os.unlink(filepath)
            except IOError:
                pass
        import atexit
        atexit.register(ipc_cleanup, IPC_FIFO)
    except IOError:
        # IPC support disabled
        pass
    finally:
        old_hook_init(fm)
Пример #11
0
    def _test_open(self, do_open_close_reader, do_open_close_writer):
        filename = support.TESTFN

        # Use a fifo: until the child opens it for reading, the parent will
        # block when trying to open it for writing.
        support.unlink(filename)
        os.mkfifo(filename)
        self.addCleanup(support.unlink, filename)

        code = '\n'.join((
            'import os, time',
            '',
            'path = %a' % filename,
            'sleep_time = %r' % self.sleep_time,
            '',
            '# let the parent block',
            'time.sleep(sleep_time)',
            '',
            do_open_close_reader,
        ))

        proc = self.subprocess(code)
        with kill_on_error(proc):
            do_open_close_writer(filename)
            self.assertEqual(proc.wait(), 0)
Пример #12
0
    def __init__(self, config_dir):
        self.app = Flask(__name__)
        self.service_map = {}
        self.event_map = {}
        signal.signal(signal.SIGTERM, self.term_handler)
        signal.signal(signal.SIGQUIT, self.term_handler)
        signal.signal(signal.SIGINT, self.term_handler)

        with open(config_dir) as config_fd:
            self.api_config = yaml.load(config_fd)
        self.api_fifo_name = str(uuid.uuid4()) + '.fifo'
        self.api_fifo_path = os.path.join(ApiConstants.API_PIPE_DIR,
                                          self.api_fifo_name)
        os.mkfifo(self.api_fifo_path)
        try:
            self.api_fifo_fd = os.open(self.api_fifo_path, os.O_NONBLOCK)
            self.api_fifo_file = os.fdopen(self.api_fifo_fd)
        except (IOError, OSError) as exc:
            print ("Unable to read the fifo file due to error {0} "
                   .format(exc))
            raise

        if not os.path.exists(self.api_config['moirai_input_fifo']):
            os.mkfifo(self.api_config['moirai_input_fifo'])

        try:
            self.moirai_fifo_fd = os.open(self.api_config['moirai_input_fifo'],
                                          os.O_WRONLY | os.O_NONBLOCK)
            self.moirai_fifo_file = os.fdopen(self.moirai_fifo_fd, 'w')
        except (IOError, OSError) as exc:
            print "Unable to connect to Moirai Server"
            self.moirai_fifo_fd = None
        self.setup_routes()
        self.command_id = 0
    def start_server(self):
        self.kill_proc_if_running()

        if self.comm_mode=='PIPE':
            if not os.path.exists(self.outpipe):
                os.mkfifo(self.outpipe)
        
        cmd = command(**self.__dict__)
        LOG.info("Starting java subprocess, and waiting for signal it's ready, with command: %s" % cmd)
        self.proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, universal_newlines=True)
        time.sleep(STARTUP_BUSY_WAIT_INTERVAL_SEC)
        if self.comm_mode=='SOCKET':
            sock = self.get_socket(num_retries=100, retry_interval=STARTUP_BUSY_WAIT_INTERVAL_SEC)
            sock.close()
        elif self.comm_mode=='PIPE':
            self.outpipe_fp = open(self.outpipe, 'rU', encoding=g_encoding)

        while True:
            # This loop is for if you have timeouts for the socket connection
            # The pipe system doesn't have timeouts, so this should run only
            # once in that case.
            try:
                ret = self.send_command_and_parse_result('PING\t""', 2)
                if ret is None:
                    continue
                assert ret == "PONG", "Bad return data on startup ping: " + ret
                LOG.info("Successful ping. The server has started.")
                break
            except socket.error:
                LOG.info("Waiting for startup: ping got exception")
                LOG.info("pausing before retry")
                time.sleep(STARTUP_BUSY_WAIT_INTERVAL_SEC)

        LOG.info("Subprocess is ready.")
Пример #14
0
def make_stream_api():
    # Make sure we create a unique pipe for each request.
    global n_pipes_made
    n_pipes_made += 1
    # Set up a tweet pipe to listen to.
    pipe_path = temp_dir + "/tweet_pipe_" + str(n_pipes_made) + ".fifo"
    os.mkfifo(pipe_path)
    # Inform the hashkat instance of the pipe:
    send_command({"type": "add_tweet_stream", "stream_path": pipe_path})
    # Open the pipe for reading our tweet stream from:
    pipe = open(pipe_path, "r")
    # Cleanup method to be called upon disconnect:
    def cleanup():
        send_command({"type": "remove_tweet_stream", "stream_path": pipe_path})
        pipe.close()
        os.remove(pipe_path)

    def generate():
        try:
            for line in iter(pipe.readline, ""):
                tweet = json.loads(line)
                # TODO Transform tweet to meet Twitter API
                yield json.dumps(tweet) + "\n"
        finally:
            cleanup()

    return Response(generate(), status=200, content_type="application/json")
Пример #15
0
def start_servers():
    print("==> Starting servers", flush=True)
    print("  -> Gitolite Log Tail", flush=True)
    gitolite_log_fifo = "/tmp/gitolite_logs.fifo"
    if not os.path.exists(gitolite_log_fifo):
        os.mkfifo(gitolite_log_fifo)

    os.makedirs(".ssh/", mode=0o700, exist_ok=True)
    with open(".ssh/environment", "w") as f:
        f.write("GL_LOGFILE={gitolite_log_fifo}\n".format(**locals()))
        # SSHD doesn't pass its environment on to gitolite, so the critical
        # nss-wrapper stuff doesn't get through. Fix this.
        for k,v in os.environ.items():
            f.write("{k}={v}\n".format(**locals()))

    tail_cmd = ["tail", "-f", gitolite_log_fifo]
    tail_pid = os.spawnvp(os.P_NOWAIT, tail_cmd[0], tail_cmd)

    print("  -> SSHD", flush=True)
    sshd_cmd = ["/sbin/sshd", "-f", "./sshd_config", "-D", "-e"]
    sshd_pid = os.spawnvp(os.P_NOWAIT, sshd_cmd[0], sshd_cmd)

    print("  -> Apache HTTPD", flush=True)
    httpd_cmd = ["httpd", "-d", ".", "-f", "httpd.conf", "-DFOREGROUND"]
    httpd_pid = os.spawnvp(os.P_NOWAIT, httpd_cmd[0], httpd_cmd)

    return set((sshd_pid, httpd_pid))
Пример #16
0
def prepare_input_fifo(size, content_func=random_bytes):
    """
    Create temporary FIFO with content generated by `content_func`
    """

    # Write data first to a file and then from there to FIFO
    # (.. so that we can compare contents with the output file later)

    f = tempfile.NamedTemporaryFile(delete=False)
    f.write(content_func(size))
    f.close()
    f_fn = f.name

    fifo_fn = os.path.join(tempfile.gettempdir(), random_id())
    os.mkfifo(fifo_fn)

    def writer():
        with open(f_fn, "r") as f_fd:
            with open(fifo_fn, "w") as fifo_fd:
                for line in f_fd:
                    fifo_fd.write(line)

    threading.Thread(target=writer).start()

    print "Input file: fifo=", fifo_fn, "file=", f_fn
    return f_fn, fifo_fn
Пример #17
0
 def create_test_files(self):
     """Create a minimal test case including all supported file types
     """
     # File
     self.create_regual_file('empty', size=0)
     self.create_regual_file('file1', size=1024 * 80)
     # Directory
     self.create_regual_file('dir2/file2', size=1024 * 80)
     # File owner
     os.chown('input/file1', 100, 200)
     # File mode
     os.chmod('input/file1', 0o7755)
     os.chmod('input/dir2', 0o555)
     # Block device
     os.mknod('input/bdev', 0o600 | stat.S_IFBLK,  os.makedev(10, 20))
     # Char device
     os.mknod('input/cdev', 0o600 | stat.S_IFCHR,  os.makedev(30, 40))
     if xattr.is_enabled():
         xattr.setxattr(os.path.join(self.input_path, 'file1'), 'user.foo', b'bar')
     # Hard link
     os.link(os.path.join(self.input_path, 'file1'),
             os.path.join(self.input_path, 'hardlink'))
     # Symlink
     os.symlink('somewhere', os.path.join(self.input_path, 'link1'))
     # FIFO node
     os.mkfifo(os.path.join(self.input_path, 'fifo1'))
Пример #18
0
    def __init__(self, configdir, tmpdir, hpyplm=False, metric='ibm_bleu'):
    
        cdec_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

        self.tmp = tmpdir
        os.mkdir(self.tmp)

        # HPYPLM reference stream
        self.hpyplm = hpyplm
        if self.hpyplm:
            ref_fifo_file = os.path.join(self.tmp, 'ref.fifo')
            os.mkfifo(ref_fifo_file)
            self.ref_fifo = open(ref_fifo_file, 'w+')
            # Start with empty line (do not learn prior to first input)
            self.ref_fifo.write('\n')
            self.ref_fifo.flush()

        # Decoder
        decoder_config = [[f.strip() for f in line.split('=')] for line in open(os.path.join(configdir, 'cdec.ini'))]
        util.cdec_ini_for_realtime(decoder_config, os.path.abspath(configdir), ref_fifo_file if self.hpyplm else None)
        decoder_config_file = os.path.join(self.tmp, 'cdec.ini')
        with open(decoder_config_file, 'w') as output:
            for (k, v) in decoder_config:
                output.write('{}={}\n'.format(k, v))
        decoder_weights = os.path.join(configdir, 'weights.final')
        self.decoder = decoder.MIRADecoder(decoder_config_file, decoder_weights, metric=metric)
Пример #19
0
def main():
    " Main command to dynconf cli"

    # Read args.
    parser = argparse.ArgumentParser(description="Dynamic configuration.")

    # Filename to replace.
    parser.add_argument('--filename', help="Filename to replace")
    parser.add_argument('--keep-file', help="Don't delete the original file.",
                        default=False, action='store_true')

    # Parse the arguments.
    args = parser.parse_args()

    if not args.keep_file:

        # Remove file.
        os.unlink(args.filename)

        # Create fifo.
        os.mkfifo(args.filename)

    # Read content.
    content = sys.stdin.read()

    # Open to read.
    fd = os.open(args.filename, os.O_WRONLY)

    # Pass stdin to filename.
    os.write(fd, content)

    # Close the fifo.
    os.close(fd)
Пример #20
0
    def __init__(self, ref, urls, save_id, multi):
        TransferContext.__init__(self, multi)
        self.mem_buffer = ""
        self.ref = ref
        self.urls = urls
        self.failures = 0
        self.has_completed = False
        self.has_succeeded = False
        self.fifo_dir = tempfile.mkdtemp()
        self.fifo_name = os.path.join(self.fifo_dir, 'fetch_fifo')
        with tempfile.NamedTemporaryFile(delete=False) as sinkfile:
            self.sinkfile_name = sinkfile.name
            
        os.mkfifo(self.fifo_name)
        self.fifo_fd = os.open(self.fifo_name, os.O_RDWR | os.O_NONBLOCK)
        self.sink_fp = open(self.sinkfile_name, "wb")
        self.current_start_byte = 0
        self.chunk_size = 1048576
        self.have_written_to_process = False
        self.response_had_stream = False
        self.dormant_until = None
        self.requests_paused = False
        self.request_length = None

        self.save_id = save_id
        self.multi = multi
        self.description = self.urls[0]
        multi.handles.append(self)
        self.start_next_fetch()
Пример #21
0
 def __init__(self, douban):
     self.platform = platform.system()
     self.get_config()
     self.douban = douban
     if self.douban.pro == 0:
         PRO = ''
     else:
         PRO = colored(' PRO ', attrs=['reverse'])
     self.TITLE += self.douban.user_name + ' ' + PRO + ' ' + ' >>\r'
     self.start = 0 # 播放锁,play之前需要加
     self.q = 0 # 退出
     self.lrc_dict = {} # 歌词
     self.song_time = -1 # 歌曲剩余播放时间
     self.rate = ['★ '*i for i in range(1, 6)] # 歌曲评分
     self.lrc_display = 0 # 是否显示歌词
     self.pause = True
     self.mplayer_controller = os.path.join(tempfile.mkdtemp(), 'mplayer_controller')
     self.loop = False
     self.is_muted = False # 是否静音
     os.mkfifo(self.mplayer_controller)
     # 守护线程
     self.t1 = threading.Thread(target=self.protect)
     self.t2 = threading.Thread(target=self.display_time)
     self.t3 = threading.Thread(target=self.display_lrc)
     self.t1.start()
     self.t2.start()
     self.t3.start()
     super(Win, self).__init__(self.douban.lines)
     # 启动自动播放
     self.SUFFIX_SELECTED = '正在加载请稍后...'
     self.display()
     self.douban.set_channel(self.douban.channels[self.markline]['channel_id']) # 设置默认频率
     self.douban.get_playlist()
     self.play()
     self.run()
Пример #22
0
def streamer(ifilename):

    # Get temp filenames, etc.
    in_dir = tempfile.mkdtemp(prefix="screedtest_")
    fifo = os.path.join(in_dir, 'fifo')
    ifile = io.open(ifilename, 'rb')

    # make a fifo to simulate streaming
    os.mkfifo(fifo)

    exception = []
    # FIFOs MUST BE OPENED FOR READING BEFORE THEY ARE WRITTEN TO
    # If this isn't done, they will BLOCK and things will hang.
    # rvalues will hold the return from the threaded function
    thread = threading.Thread(target=streamer_reader, args=[fifo, exception])
    thread.start()

    fifofile = io.open(fifo, 'wb')
    # read binary to handle compressed files
    chunk = ifile.read(8192)
    while len(chunk) > 0:
        fifofile.write(chunk)
        chunk = ifile.read(8192)

    fifofile.close()

    thread.join()

    if len(exception) > 0:
        raise exception[0]
Пример #23
0
    def prepareEnvironment(self):
        """Prepare the environment for the execution of the domain. This
        method is called before any devices are set up."""
        
        domid = self.vm.getDomid()
        
        # Delete left-over pipes
        try:
            os.unlink('/var/run/tap/qemu-read-%d' % domid)
            os.unlink('/var/run/tap/qemu-write-%d' % domid)
        except:
            pass

        # No device model, don't create pipes
        if self.device_model is None:
            return

        if platform.system() != 'SunOS':
            # If we use a device model, the pipes for communication between
            # blktapctrl and ioemu must be present before the devices are 
            # created (blktapctrl must access them for new block devices)

            try:
                os.makedirs('/var/run/tap', 0755)
            except:
                pass

            try:
                os.mkfifo('/var/run/tap/qemu-read-%d' % domid, 0600)
                os.mkfifo('/var/run/tap/qemu-write-%d' % domid, 0600)
            except OSError, e:
                log.warn('Could not create blktap pipes for domain %d' % domid)
                log.exception(e)
                pass
Пример #24
0
        def test_mkfifo(self):
            os = self.posix
            os.mkfifo(self.path2 + "test_mkfifo", 0o666)
            st = os.lstat(self.path2 + "test_mkfifo")
            import stat

            assert stat.S_ISFIFO(st.st_mode)
Пример #25
0
    def setUp(self):
        # Create a fifo for testing. We'll remove it in tearDown.
        self.fifo_path = os.getcwd() + '/fifo'
        os.mkfifo(self.fifo_path)

        self.pipe = os.open(self.fifo_path, os.O_RDONLY | os.O_NONBLOCK)
        self.pad = Pad(self.fifo_path)
Пример #26
0
def are_fifos_supported():
    with unopened_tempfile() as filepath:
        try:
            os.mkfifo(filepath)
            return True
        except OSError:
            return False
Пример #27
0
 def restore(self, fileContents, root, target, journal=None, nameLookup=True,
             **kwargs):
     util.removeIfExists(target)
     util.mkdirChain(os.path.dirname(target))
     os.mkfifo(target)
     return File.restore(self, root, target, journal=journal,
         nameLookup=nameLookup, **kwargs)
Пример #28
0
 def setupNamedOutPipes(self, pipeshutdown):
     # lets add any named output pipes requested
     for (outpipename,outboxname) in self.outpipes.items():
         
         # create the pipe
         try:
             os.mkfifo(outpipename)
         except:
             pass
         
         # open the file handle for reading
         f = open(outpipename, "rb+",self.buffersize)
         
         # create the handler component to receive from that pipe
         PIPE = _FromFileHandle(f, self.buffersize)
         self.link((PIPE,"outbox"), (self,outboxname), passthrough=2)
         
         # wire up and inbox for it, and daisy chain its control box from the
         # previous pipe's signal box
         self.link(pipeshutdown,(PIPE,"control"))
         pipeshutdown=(PIPE,"signal")
         
         self.addChildren(PIPE)
         
         # give it a useful name (for debugging), and make it our child
         PIPE.name = "[UnixProcess2 outpipe '"+outpipename+"'] "+PIPE.name
         
     return pipeshutdown
Пример #29
0
def main():
    if os.path.exists(FIFO_PATH):
        os.unlink(FIFO_PATH)
    os.mkfifo(FIFO_PATH)

    proc = subprocess.Popen("raspistill -vf -t 3600000 {} -n -o {}".format(DEFAULT_ATTRS, FIFO_PATH), shell=True)
    proc.wait()
Пример #30
0
def run(test, params, env):
    """
    Test command: virsh domjobabort.

    The command can abort the currently running domain job.
    1.Prepare test environment,destroy or suspend a VM.
    2.Do action to get a subprocess(dump, save, managedsave).
    3.Perform virsh domjobabort operation to abort VM's job.
    4.Recover the VM's status and wait for the subprocess over.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm", "vm1")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    pre_vm_state = params.get("pre_vm_state", "start")
    readonly = ("yes" == params.get("readonly", "no"))
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    # Instead of "paused_after_start_vm", use "pre_vm_state".
    # After start the VM, wait for some time to make sure the job
    # can be created on this domain.
    if start_vm == "yes":
        vm.wait_for_login()
        if params.get("pre_vm_state") == "suspend":
            vm.pause()

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    original_speed = virsh.migrate_getspeed(vm_name).stdout.strip()

    def get_subprocess(action, vm_name, file, remote_uri=None):
        """
        Execute background virsh command, return subprocess w/o waiting for exit()

        :param cmd : virsh command.
        :param guest_name : VM's name
        :param file_source : virsh command's file option.
        """
        args = ""
        if action == "managedsave":
            file = ""
        elif action == "migrate":
            # Slow down migration for domjobabort
            virsh.migrate_setspeed(vm_name, "1")
            file = remote_uri
            args = "--unsafe"
        command = "virsh %s %s %s %s" % (action, vm_name, file, args)
        logging.debug("Action: %s", command)
        p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        return p

    action = params.get("jobabort_action", "dump")
    dump_opt = params.get("dump_opt", None)
    status_error = params.get("status_error", "no")
    job = params.get("jobabort_job", "yes")
    tmp_file = os.path.join(data_dir.get_tmp_dir(), "domjobabort.tmp")
    tmp_pipe = os.path.join(data_dir.get_tmp_dir(), "domjobabort.fifo")
    vm_ref = params.get("jobabort_vm_ref")
    remote_uri = params.get("jobabort_remote_uri")
    remote_host = params.get("migrate_dest_host")
    remote_user = params.get("migrate_dest_user", "root")
    remote_pwd = params.get("migrate_dest_pwd")
    saved_data = None

    # Build job action
    if dump_opt:
        action = "dump --crash"

    if action == "managedsave":
        tmp_pipe = '/var/lib/libvirt/qemu/save/%s.save' % vm.name

    if action == "restore":
        virsh.save(vm_name, tmp_file, ignore_status=True)

    if action == "migrate":
        if remote_host.count("EXAMPLE"):
            test.cancel("Remote host should be configured "
                        "for migrate.")
        else:
            # Config ssh autologin for remote host
            ssh_key.setup_ssh_key(remote_host, remote_user,
                                  remote_pwd, port=22)

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name

    # Get the subprocess of VM.
    # The command's effect is to abort the currently running domain job.
    # So before do "domjobabort" action, we must create a job on the domain.
    process = None
    if job == "yes" and start_vm == "yes" and status_error == "no":
        if os.path.exists(tmp_pipe):
            os.unlink(tmp_pipe)
        os.mkfifo(tmp_pipe)

        process = get_subprocess(action, vm_name, tmp_pipe, remote_uri)

        saved_data = None
        if action == "restore":
            with open(tmp_file, 'r') as tmp_f:
                saved_data = tmp_f.read(10 * 1024 * 1024)
            f = open(tmp_pipe, 'w')
            f.write(saved_data[:1024 * 1024])
        elif action == "migrate":
            f = None
        else:
            f = open(tmp_pipe, 'rb')
            dummy = f.read(1024 * 1024).decode(locale.getpreferredencoding(), 'ignore')

    # Give enough time for starting job
    t = 0
    while t < 5:
        jobtype = vm.get_job_type()
        if "None" == jobtype:
            t += 1
            time.sleep(1)
            continue
        elif jobtype is False:
            logging.error("Get job type failed.")
            break
        else:
            logging.debug("Job started: %s", jobtype)
            break
    virsh_dargs = {'ignore_status': True, 'debug': True}
    if readonly:
        virsh_dargs.update({'readonly': True})
    ret = virsh.domjobabort(vm_ref, **virsh_dargs)
    status = ret.exit_status

    if process and f:
        if saved_data:
            f.write(saved_data[1024 * 1024:])
        else:
            dummy = f.read()
        f.close()

        try:
            os.unlink(tmp_pipe)
        except OSError as detail:
            logging.info("Can't remove %s: %s", tmp_pipe, detail)
        try:
            os.unlink(tmp_file)
        except OSError as detail:
            logging.info("Cant' remove %s: %s", tmp_file, detail)

    # Recover the environment.
    if pre_vm_state == "suspend":
        vm.resume()
    if process:
        if process.poll():
            try:
                process.kill()
            except OSError:
                pass

    if action == "migrate":
        # Recover migration speed
        virsh.migrate_setspeed(vm_name, original_speed)
        migration.MigrationTest().cleanup_dest_vm(vm, None, remote_uri)

    # check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            test.fail("Run failed with right command")
Пример #31
0
 def make_source(filepath):
     try:
         os.unlink(filepath)
     except OSError:
         pass
     os.mkfifo(filepath)
Пример #32
0
 def test_fifo(self):
     os.mkfifo(TESTFN, 0o700)
     st_mode = self.get_mode()
     self.assertS_IS("FIFO", st_mode)
Пример #33
0
 def create(self):
     self.remove()
     os.mkfifo(self.path)
Пример #34
0
    def __init__(self, console_type, address, is_server=False, pki_path='.'):
        """
        Initialize the instance and create socket/fd for connect with.

        :param console_type: Type of console to connect with. Supports
                             unix, tcp, udp, file or pipe
        :param address: Address for socket or fd. For tcp and udp, This should
                        be a tuple of (host, port). For unix, file, pipe
                        this should be a string representing the path to be
                        connect with
        :param is_server: Whether this connection act as a server or a client
        :param pki_path: Where the tls pki file is located
        """
        self.exit = False
        self.address = address
        self.peer_addr = None
        self.console_type = console_type
        self.socket = None
        self.read_fd = None
        self.write_fd = None
        self._poll_thread = None
        self.pki_path = pki_path
        self.linesep = "\n"
        self.prompt = r"^\[.*\][\#\$]\s*$"
        self.status_test_command = "echo $?"
        self.process = None

        if console_type == 'unix':
            self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
            self.socket.connect(address)
        elif console_type == 'tls':
            outfilename = '/tmp/gnutls.out'
            self.read_fd = open(outfilename, 'a+')
            if is_server:
                cmd = ('gnutls-serv --echo --x509cafile %s/ca-cert.pem ' %
                       self.pki_path +
                       '--x509keyfile %s/server-key.pem' % self.pki_path +
                       ' --x509certfile %s/server-cert.pem' % self.pki_path)
                obj = subprocess.Popen(cmd,
                                       shell=True,
                                       stdin=subprocess.PIPE,
                                       stdout=self.read_fd,
                                       universal_newlines=True)
            else:
                cmd = ('gnutls-cli --priority=NORMAL -p5556 --x509cafile=%s' %
                       self.pki_path + '/ca-cert.pem ' + '127.0.0.1 ' +
                       '--x509certfile=%s' % self.pki_path +
                       '/client-cert.pem --x509keyfile=%s' % self.pki_path +
                       '/client-key.pem')
                obj = subprocess.Popen(cmd,
                                       shell=True,
                                       stdin=subprocess.PIPE,
                                       stdout=self.read_fd,
                                       universal_newlines=True)
                obj.stdin.write('\n')
                time.sleep(50)
                obj.stdin.write('\n')
                time.sleep(50)
                # In python 3, stdin.close() will raise a BrokenPiPeError
                try:
                    obj.stdin.close()
                except socket.error as e:
                    if e.errno != errno.EPIPE:
                        # Not a broken pipe
                        raise
            self.process = obj
        elif console_type == 'tcp':
            self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            if is_server:
                self.socket.bind(address)
                self.socket.listen(1)
                self._poll_thread = threading.Thread(target=self._tcp_thread)
                self._poll_thread.start()
            else:
                self.socket.connect(address)
        elif console_type == 'udp':
            self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
            if is_server:
                self.socket.bind(address)
        elif console_type == 'pipe':
            os.mkfifo(address + '.in')
            os.mkfifo(address + '.out')
            self.write_fd = os.open(address + '.in',
                                    os.O_RDWR | os.O_CREAT | os.O_NONBLOCK)
            self.read_fd = os.open(address + '.out',
                                   os.O_RDONLY | os.O_CREAT | os.O_NONBLOCK)
        elif console_type == 'file':
            self.read_fd = open(address, 'r')
Пример #35
0
def child():
    pipeout = os.open(pipe_name, os.O_WRONLY)
    counter = 0
    while True:
        time.sleep(1)
        os.write(pipeout, b'Number %03d\n' % counter)
        counter = (counter + 1) % 5
    print("Child completed")


def parent():
    pipein = open(pipe_name, 'r')
    counter = 0
    while True:
        line = pipein.readline()[:-1]
        print('Parent %d got "%s" at %s' % (os.getpid(), line, time.time()))
        counter += 1
        if counter > 10:
            break
    pipein.close()
    print("Parent completed")


if not os.path.exists(pipe_name):
    os.mkfifo(pipe_name)
pid = os.fork()
if pid != 0:
    parent()
else:
    child()
Пример #36
0
def _run_salmon(job_context: Dict) -> Dict:
    """Runs Salmon Quant."""
    logger.debug("Running Salmon..")

    # Salmon needs to be run differently for different sample types.
    # SRA files also get processed differently as we don't want to use fasterq-dump to extract
    # them to disk.
    if job_context.get("sra_input_file_path", None):

        # Single reads
        if job_context["sra_num_reads"] == 1:

            fifo = "/tmp/barney"
            os.mkfifo(fifo)

            dump_str = "fastq-dump --stdout {input_sra_file} > {fifo} &"
            formatted_dump_command = dump_str.format(
                input_sra_file=job_context["sra_input_file_path"], fifo=fifo)
            dump_po = subprocess.Popen(formatted_dump_command,
                                       shell=True,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.STDOUT)

            command_str = (
                "salmon --no-version-check quant -l A -i {index} "
                "-r {fifo} -p 16 -o {output_directory} --seqBias --dumpEq --writeUnmappedNames"
            )
            formatted_command = command_str.format(
                index=job_context["index_directory"],
                input_sra_file=job_context["sra_input_file_path"],
                fifo=fifo,
                output_directory=job_context["output_directory"],
            )
        # Paired are trickier
        else:

            # Okay, for some reason I can't explain, this only works in the temp directory,
            # otherwise the `tee` part will only output to one or the other of the streams (non-deterministically),
            # but not both. This doesn't appear to happen if the fifos are in tmp.
            alpha = "/tmp/alpha"
            os.mkfifo(alpha)
            beta = "/tmp/beta"
            os.mkfifo(beta)

            dump_str = "fastq-dump --stdout --split-files -I {input_sra_file} | tee >(grep '@.*\.1\s' -A3 --no-group-separator > {fifo_alpha}) >(grep '@.*\.2\s' -A3 --no-group-separator > {fifo_beta}) > /dev/null &"
            formatted_dump_command = dump_str.format(
                input_sra_file=job_context["sra_input_file_path"],
                fifo_alpha=alpha,
                fifo_beta=beta)
            dump_po = subprocess.Popen(
                formatted_dump_command,
                shell=True,
                executable="/bin/bash",
                stdout=subprocess.PIPE,
                stderr=subprocess.STDOUT,
            )

            command_str = (
                "salmon --no-version-check quant -l A -i {index} "
                "-1 {fifo_alpha} -2 {fifo_beta} -p 16 -o {output_directory} --seqBias --dumpEq --writeUnmappedNames"
            )
            formatted_command = command_str.format(
                index=job_context["index_directory"],
                input_sra_file=job_context["sra_input_file_path"],
                fifo_alpha=alpha,
                fifo_beta=beta,
                output_directory=job_context["output_directory"],
            )

    else:
        if "input_file_path_2" in job_context:
            second_read_str = " -2 {}".format(job_context["input_file_path_2"])

            # Rob recommends 16 threads/process, which fits snugly on an x1 at 8GB RAM per Salmon container:
            # (2 threads/core * 16 cores/socket * 64 vCPU) / (1TB/8GB) = ~17
            command_str = (
                "salmon --no-version-check quant -l A --biasSpeedSamp 5 -i {index}"
                " -1 {input_one}{second_read_str} -p 16 -o {output_directory}"
                " --seqBias --gcBias --dumpEq --writeUnmappedNames")

            formatted_command = command_str.format(
                index=job_context["index_directory"],
                input_one=job_context["input_file_path"],
                second_read_str=second_read_str,
                output_directory=job_context["output_directory"],
            )
        else:
            # Related: https://github.com/COMBINE-lab/salmon/issues/83
            command_str = ("salmon --no-version-check quant -l A -i {index}"
                           " -r {input_one} -p 16 -o {output_directory}"
                           " --seqBias --dumpEq --writeUnmappedNames")

            formatted_command = command_str.format(
                index=job_context["index_directory"],
                input_one=job_context["input_file_path"],
                output_directory=job_context["output_directory"],
            )

    logger.debug(
        "Running Salmon Quant using the following shell command: %s",
        formatted_command,
        processor_job=job_context["job_id"],
    )

    # Salmon probably shouldn't take longer than three hours.
    timeout = 60 * 60 * 3
    job_context["time_start"] = timezone.now()
    try:
        completed_command = subprocess.run(
            formatted_command.split(),
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            timeout=timeout,
        )
    except subprocess.TimeoutExpired:
        failure_reason = "Salmon timed out because it failed to complete within 3 hours."
        logger.error(
            failure_reason,
            sample_accesion_code=job_context["sample"].accession_code,
            processor_job=job_context["job_id"],
        )
        job_context["job"].failure_reason = failure_reason
        job_context["job"].no_retry = True
        job_context["success"] = False
        return job_context

    job_context["time_end"] = timezone.now()

    if completed_command.returncode == 1:
        stderr = completed_command.stderr.decode().strip()
        error_start = stderr.upper().find("ERROR:")
        error_start = error_start if error_start != -1 else 0
        logger.error(
            "Shell call to salmon failed with error message: %s",
            stderr[error_start:],
            processor_job=job_context["job_id"],
        )

        # If salmon has an error exit code then we don't want to retry it.
        job_context["job"].no_retry = True
        job_context["job"].failure_reason = (
            "Shell call to salmon failed because: " + stderr[error_start:])
        job_context["success"] = False
    else:
        result = ComputationalResult()
        result.commands.append(formatted_command)
        result.time_start = job_context["time_start"]
        result.time_end = job_context["time_end"]
        result.organism_index = job_context["organism_index"]
        result.is_ccdl = True

        try:
            processor_key = "SALMON_QUANT"
            result.processor = utils.find_processor(processor_key)
        except Exception as e:
            return utils.handle_processor_exception(job_context, processor_key,
                                                    e)

        # Zip up the output of Salmon Quant
        try:
            with tarfile.open(job_context["output_archive"], "w:gz") as tar:
                tar.add(job_context["output_directory"], arcname=os.sep)
        except Exception:
            logger.exception(
                "Exception caught while zipping processed directory %s",
                job_context["output_directory"],
                processor_job=job_context["job_id"],
            )
            failure_template = "Exception caught while zipping processed directory {}"
            job_context["job"].failure_reason = failure_template.format(
                job_context["output_archive"])
            job_context["success"] = False
            return job_context

        salmon_quant_archive = ComputedFile()
        salmon_quant_archive.absolute_file_path = job_context["output_archive"]
        salmon_quant_archive.filename = os.path.split(
            job_context["output_archive"])[-1]
        salmon_quant_archive.calculate_sha1()
        salmon_quant_archive.calculate_size()
        salmon_quant_archive.is_public = True
        salmon_quant_archive.is_smashable = False
        salmon_quant_archive.is_qc = False

        quant_file = ComputedFile()
        quant_file.s3_bucket = S3_BUCKET_NAME
        timestamp = str(timezone.now().timestamp()).split(".")[0]
        quant_file.s3_key = "quant_files/sample_{0}_{1}_quant.sf".format(
            job_context["sample"].id, timestamp)
        quant_file.filename = "quant.sf"
        quant_file.absolute_file_path = job_context[
            "output_directory"] + "quant.sf"
        quant_file.is_public = False
        quant_file.is_smashable = False
        quant_file.is_qc = False
        quant_file.calculate_sha1()
        quant_file.calculate_size()

        # If we're running in the cloud we need to upload the quant.sf
        # file so that it can be used by a job running on any machine
        # to run tximport. We can't use sync_to_s3 though because we
        # have to sync it before we can save the file so it cannot be
        # discovered by other jobs before it is uploaded.
        if settings.RUNNING_IN_CLOUD:
            try:
                S3.upload_file(
                    quant_file.absolute_file_path,
                    quant_file.s3_bucket,
                    quant_file.s3_key,
                    ExtraArgs={
                        "ACL": "public-read",
                        "StorageClass": "STANDARD_IA"
                    },
                )
            except Exception as e:
                logger.exception(e,
                                 processor_job=job_context["job_id"],
                                 sample=job_context["sample"].id)
                failure_template = "Exception caught while uploading quantfile to S3: {}"
                job_context["job"].failure_reason = failure_template.format(
                    quant_file.absolute_file_path)
                job_context["success"] = False
                return job_context

        # Here select_for_update() is used as a mutex that forces multiple
        # jobs to execute this block of code in serial manner. See:
        # https://docs.djangoproject.com/en/1.11/ref/models/querysets/#select-for-update
        # Theorectically any rows in any table can be locked here, we're
        # locking all existing rows in ComputationalResult table.
        with transaction.atomic():
            ComputationalResult.objects.select_for_update()
            result.save()
            job_context["quant_result"] = result
            quant_file.result = result
            quant_file.save()

            job_context["result"] = result

            job_context["pipeline"].steps.append(result.id)
            SampleResultAssociation.objects.get_or_create(
                sample=job_context["sample"], result=result)

            salmon_quant_archive.result = result
            salmon_quant_archive.save()
            job_context["computed_files"].append(salmon_quant_archive)

        kv = ComputationalResultAnnotation()
        kv.data = {
            "index_length": job_context["index_length"],
            "index_length_get": job_context.get("index_length_raw", None),
        }
        kv.result = result
        kv.is_public = True
        kv.save()

        try:
            with open(
                    os.path.join(job_context["output_directory"],
                                 "lib_format_counts.json")) as lfc_file:
                format_count_data = json.load(lfc_file)
                kv = ComputationalResultAnnotation()
                kv.data = format_count_data
                kv.result = result
                kv.is_public = True
                kv.save()
        except Exception:
            # See: https://github.com/AlexsLemonade/refinebio/issues/1167
            logger.exception(
                "Error parsing Salmon lib_format_counts JSON output!",
                processor_job=job_context["job_id"],
            )

        try:
            with open(
                    os.path.join(job_context["output_directory"], "aux_info",
                                 "meta_info.json")) as mi_file:
                meta_info = json.load(mi_file)
                kv = ComputationalResultAnnotation()
                kv.data = meta_info
                kv.result = result
                kv.is_public = True
                kv.save()
        except Exception:
            # See: https://github.com/AlexsLemonade/refinebio/issues/1167
            logger.exception("Error parsing Salmon meta_info JSON output!",
                             processor_job=job_context["job_id"])

        job_context["success"] = True

    return job_context
Пример #37
0
 def test_fifo(self):
     os.mkfifo(TESTFN, 0o700)
     st_mode, modestr = self.get_mode()
     self.assertEqual(modestr, 'prwx------')
     self.assertS_IS("FIFO", st_mode)

#initiateprocess()

def createinputDeamon():
    newinputpid = os.fork()
    if newinputpid == 0:
        inputmainlink()
        return False
    else:
        return True  #To Check is this the parent
print "----*----"
#inputthread=None
control_pipe_name="controlnamepipe"
if not os.path.exists(control_pipe_name):
    os.mkfifo(control_pipe_name)


while(1):
    print "loop terminated"

    newinputpid = os.fork()
    currentDb.updateinputpid(newinputpid)
    if newinputpid == 0:
        inputmainlink()

    else:
        newoutputpid = os.fork()
        currentDb.updateoutputpid(newoutputpid)
        if newoutputpid == 0:
            #outputmainlink()
Пример #39
0
 def create_pipe(self):
     filename = self.filename;
     try:
         os.mkfifo(filename)
     except OSError as e:
         pass
Пример #40
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        # f stand for first, s for second.
        first_sandbox = create_sandbox(file_cacher,
                                       multithreaded=job.multithreaded_sandbox,
                                       name="first_evaluate")
        second_sandbox = create_sandbox(
            file_cacher,
            multithreaded=job.multithreaded_sandbox,
            name="second_evaluate")
        fifo_dir = tempfile.mkdtemp(dir=config.temp_dir)
        fifo = os.path.join(fifo_dir, "fifo")
        os.mkfifo(fifo)
        os.chmod(fifo_dir, 0o755)
        os.chmod(fifo, 0o666)

        # First step: we start the first manager.
        first_filename = "manager"
        first_command = ["./%s" % first_filename, "0", fifo]
        first_executables_to_get = {
            first_filename: job.executables[first_filename].digest
        }
        first_files_to_get = {"input.txt": job.input}
        first_allow_path = [fifo_dir]

        # Put the required files into the sandbox
        for filename, digest in iteritems(first_executables_to_get):
            first_sandbox.create_file_from_storage(filename,
                                                   digest,
                                                   executable=True)
        for filename, digest in iteritems(first_files_to_get):
            first_sandbox.create_file_from_storage(filename, digest)

        first = evaluation_step_before_run(first_sandbox,
                                           first_command,
                                           job.time_limit,
                                           job.memory_limit,
                                           first_allow_path,
                                           stdin_redirect="input.txt",
                                           wait=False)

        # Second step: we start the second manager.
        second_filename = "manager"
        second_command = ["./%s" % second_filename, "1", fifo]
        second_executables_to_get = {
            second_filename: job.executables[second_filename].digest
        }
        second_files_to_get = {}
        second_allow_path = [fifo_dir]

        # Put the required files into the second sandbox
        for filename, digest in iteritems(second_executables_to_get):
            second_sandbox.create_file_from_storage(filename,
                                                    digest,
                                                    executable=True)
        for filename, digest in iteritems(second_files_to_get):
            second_sandbox.create_file_from_storage(filename, digest)

        second = evaluation_step_before_run(second_sandbox,
                                            second_command,
                                            job.time_limit,
                                            job.memory_limit,
                                            second_allow_path,
                                            stdout_redirect="output.txt",
                                            wait=False)

        # Consume output.
        wait_without_std([second, first])
        # TODO: check exit codes with translate_box_exitcode.

        success_first, first_plus = \
            evaluation_step_after_run(first_sandbox)
        success_second, second_plus = \
            evaluation_step_after_run(second_sandbox)

        job.sandboxes = [first_sandbox.path, second_sandbox.path]
        job.plus = second_plus

        success = True
        outcome = None
        text = []

        # Error in the sandbox: report failure!
        if not success_first or not success_second:
            success = False

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(first_plus) or \
                not is_evaluation_passed(second_plus):
            outcome = 0.0
            if not is_evaluation_passed(first_plus):
                text = human_evaluation_message(first_plus)
            else:
                text = human_evaluation_message(second_plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not second_sandbox.file_exists('output.txt'):
                outcome = 0.0
                text = [N_("Evaluation didn't produce file %s"), "output.txt"]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = second_sandbox.get_file_to_storage(
                        "output.txt", "Output file in job %s" % job.info)

                # If not asked otherwise, evaluate the output file
                if not job.only_execution:
                    # Put the reference solution into the sandbox
                    second_sandbox.create_file_from_storage(
                        "res.txt", job.output)

                    # If a checker is not provided, use white-diff
                    if self.parameters[0] == "diff":
                        outcome, text = white_diff_step(
                            second_sandbox, "output.txt", "res.txt")

                    elif self.parameters[0] == "comparator":
                        if TwoSteps.CHECKER_FILENAME not in job.managers:
                            logger.error(
                                "Configuration error: missing or "
                                "invalid comparator (it must be "
                                "named `checker')",
                                extra={"operation": job.info})
                            success = False
                        else:
                            second_sandbox.create_file_from_storage(
                                TwoSteps.CHECKER_FILENAME,
                                job.managers[TwoSteps.CHECKER_FILENAME].digest,
                                executable=True)
                            # Rewrite input file, as in Batch.py
                            try:
                                second_sandbox.remove_file("input.txt")
                            except OSError as e:
                                assert not second_sandbox.file_exists(
                                    "input.txt")
                            second_sandbox.create_file_from_storage(
                                "input.txt", job.input)
                            success, _ = evaluation_step(
                                second_sandbox, [[
                                    "./%s" % TwoSteps.CHECKER_FILENAME,
                                    "input.txt", "res.txt", "output.txt"
                                ]])
                            if success:
                                try:
                                    outcome, text = extract_outcome_and_text(
                                        second_sandbox)
                                except ValueError as e:
                                    logger.error(
                                        "Invalid output from "
                                        "comparator: %s",
                                        e,
                                        extra={"operation": job.info})
                                    success = False
                    else:
                        raise ValueError("Uncrecognized first parameter"
                                         " `%s' for TwoSteps tasktype." %
                                         self.parameters[0])

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = str(outcome) if outcome is not None else None
        job.text = text

        delete_sandbox(first_sandbox, job.success)
        delete_sandbox(second_sandbox, job.success)
Пример #41
0
    def download(self, req):
        rsp = DownloadRsp()

        def _get_origin_format(path):
            qcow2_length = 0x9007
            if path.startswith('http://') or path.startswith(
                    'https://') or path.startswith('ftp://'):
                resp = urllib2.urlopen(path)
                qhdr = resp.read(qcow2_length)
                resp.close()
            elif path.startswith('sftp://'):
                fd, tmp_file = tempfile.mkstemp()
                get_header_from_pipe_cmd = "timeout 60 head --bytes=%d %s > %s" % (
                    qcow2_length, pipe_path, tmp_file)
                clean_cmd = "pkill -f %s" % pipe_path
                shell.run(
                    '%s & %s && %s' %
                    (scp_to_pipe_cmd, get_header_from_pipe_cmd, clean_cmd))
                qhdr = os.read(fd, qcow2_length)
                if os.path.exists(tmp_file):
                    os.remove(tmp_file)
            else:
                resp = open(path)
                qhdr = resp.read(qcow2_length)
                resp.close()
            if len(qhdr) < qcow2_length:
                return "raw"
            if qhdr[:4] == 'QFI\xfb':
                if qhdr[16:20] == '\x00\x00\x00\00':
                    return "qcow2"
                else:
                    return "derivedQcow2"

            if qhdr[0x8001:0x8006] == 'CD001':
                return 'iso'

            if qhdr[0x8801:0x8806] == 'CD001':
                return 'iso'

            if qhdr[0x9001:0x9006] == 'CD001':
                return 'iso'
            return "raw"

        def get_origin_format(fpath, fail_if_has_backing_file=True):
            image_format = _get_origin_format(fpath)
            if image_format == "derivedQcow2" and fail_if_has_backing_file:
                raise Exception('image has backing file or %s is not exist!' %
                                fpath)
            return image_format

        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool, image_name = self._parse_install_path(cmd.installPath)
        tmp_image_name = 'tmp-%s' % image_name

        @rollbackable
        def _1():
            shell.check_run('rbd rm %s/%s' % (pool, tmp_image_name))

        def _getRealSize(length):
            '''length looks like: 10245K'''
            logger.debug(length)
            if not length[-1].isalpha():
                return length
            units = {
                "g": lambda x: x * 1024 * 1024 * 1024,
                "m": lambda x: x * 1024 * 1024,
                "k": lambda x: x * 1024,
            }
            try:
                if not length[-1].isalpha():
                    return length
                return units[length[-1].lower()](int(length[:-1]))
            except:
                logger.warn(linux.get_exception_stacktrace())
                return length

        # whether we have an upload request
        if cmd.url.startswith(self.UPLOAD_PROTO):
            self._prepare_upload(cmd)
            rsp.size = 0
            rsp.uploadPath = self._get_upload_path(req)
            self._set_capacity_to_response(rsp)
            return jsonobject.dumps(rsp)

        if cmd.sendCommandUrl:
            Report.url = cmd.sendCommandUrl

        report = Report(cmd.threadContext, cmd.threadContextStack)
        report.processType = "AddImage"
        report.resourceUuid = cmd.imageUuid
        report.progress_report("0", "start")

        url = urlparse.urlparse(cmd.url)
        if url.scheme in ('http', 'https', 'ftp'):
            image_format = get_origin_format(cmd.url, True)
            cmd.url = linux.shellquote(cmd.url)
            # roll back tmp ceph file after import it
            _1()

            _, PFILE = tempfile.mkstemp()
            content_length = shell.call('curl -sI %s|grep Content-Length' %
                                        cmd.url).strip().split()[1]
            total = _getRealSize(content_length)

            def _getProgress(synced):
                logger.debug(
                    "getProgress in ceph-bs-agent, synced: %s, total: %s" %
                    (synced, total))
                last = shell.call('tail -1 %s' % PFILE).strip()
                if not last or len(last.split()) < 1:
                    return synced
                logger.debug("last synced: %s" % last)
                written = _getRealSize(last.split()[0])
                if total > 0 and synced < written:
                    synced = written
                    if synced < total:
                        percent = int(round(float(synced) / float(total) * 90))
                        report.progress_report(percent, "report")
                return synced

            logger.debug("content-length is: %s" % total)

            _, _, err = bash_progress_1(
                'set -o pipefail;wget --no-check-certificate -O - %s 2>%s| rbd import --image-format 2 - %s/%s'
                % (cmd.url, PFILE, pool, tmp_image_name), _getProgress)
            if err:
                raise err
            actual_size = linux.get_file_size_by_http_head(cmd.url)

            if os.path.exists(PFILE):
                os.remove(PFILE)

        elif url.scheme == 'sftp':
            port = (url.port, 22)[url.port is None]
            _, PFILE = tempfile.mkstemp()
            pipe_path = PFILE + "fifo"
            scp_to_pipe_cmd = "scp -P %d -o StrictHostKeyChecking=no %s@%s:%s %s" % (
                port, url.username, url.hostname, url.path, pipe_path)
            sftp_command = "sftp -o StrictHostKeyChecking=no -o BatchMode=no -P %s -b /dev/stdin %s@%s" % (
                port, url.username, url.hostname) + " <<EOF\n%s\nEOF\n"
            if url.password is not None:
                scp_to_pipe_cmd = 'sshpass -p %s %s' % (url.password,
                                                        scp_to_pipe_cmd)
                sftp_command = 'sshpass -p %s %s' % (url.password,
                                                     sftp_command)

            actual_size = shell.call(
                sftp_command %
                ("ls -l " + url.path)).splitlines()[1].strip().split()[4]
            os.mkfifo(pipe_path)
            image_format = get_origin_format(cmd.url, True)
            cmd.url = linux.shellquote(cmd.url)
            # roll back tmp ceph file after import it
            _1()

            def _get_progress(synced):
                logger.debug("getProgress in add image")
                if not os.path.exists(PFILE):
                    return synced
                last = shell.call('tail -1 %s' % PFILE).strip()
                if not last or not last.isdigit():
                    return synced
                report.progress_report(int(last) * 90 / 100, "report")
                return synced

            get_content_from_pipe_cmd = "pv -s %s -n %s 2>%s" % (
                actual_size, pipe_path, PFILE)
            import_from_pipe_cmd = "rbd import --image-format 2 - %s/%s" % (
                pool, tmp_image_name)
            _, _, err = bash_progress_1(
                'set -o pipefail; %s & %s | %s' %
                (scp_to_pipe_cmd, get_content_from_pipe_cmd,
                 import_from_pipe_cmd), _get_progress)

            if os.path.exists(PFILE):
                os.remove(PFILE)

            if os.path.exists(pipe_path):
                os.remove(pipe_path)

            if err:
                raise err

        elif url.scheme == 'file':
            src_path = cmd.url.lstrip('file:')
            src_path = os.path.normpath(src_path)
            if not os.path.isfile(src_path):
                raise Exception('cannot find the file[%s]' % src_path)
            image_format = get_origin_format(src_path, True)
            # roll back tmp ceph file after import it
            _1()

            shell.check_run("rbd import --image-format 2 %s %s/%s" %
                            (src_path, pool, tmp_image_name))
            actual_size = os.path.getsize(src_path)
        else:
            raise Exception('unknown url[%s]' % cmd.url)

        file_format = shell.call(
            "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2"
            % (pool, tmp_image_name))
        file_format = file_format.strip()
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        if file_format == 'qcow2':
            conf_path = None
            try:
                with open('/etc/ceph/ceph.conf', 'r') as fd:
                    conf = fd.read()
                    conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
                    conf_path = linux.write_to_temp_file(conf)

                shell.check_run(
                    'qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s'
                    % (pool, tmp_image_name, pool, image_name, conf_path))
                shell.check_run('rbd rm %s/%s' % (pool, tmp_image_name))
            finally:
                if conf_path:
                    os.remove(conf_path)
        else:
            shell.check_run('rbd mv %s/%s %s/%s' %
                            (pool, tmp_image_name, pool, image_name))
        report.progress_report("100", "finish")

        @rollbackable
        def _2():
            shell.check_run('rbd rm %s/%s' % (pool, image_name))

        _2()

        o = shell.call('rbd --format json info %s/%s' % (pool, image_name))
        image_stats = jsonobject.loads(o)

        rsp.size = long(image_stats.size_)
        rsp.actualSize = actual_size
        if image_format == "qcow2":
            rsp.format = "raw"
        else:
            rsp.format = image_format

        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
Пример #42
0
    def check_dumpcap_pcapng_sections_real(self, multi_input=False, multi_output=False):
        # Make sure we always test multiple SHBs in an input.
        in_files_l = [ [
            capture_file('many_interfaces.pcapng.1'),
            capture_file('many_interfaces.pcapng.2')
            ] ]
        if multi_input:
            in_files_l.append([ capture_file('many_interfaces.pcapng.3') ])
        fifo_files = []
        fifo_procs = []
        # Default values for our validity tests
        check_val_d = {
            'filename': None,
            'packet_count': 0,
            'idb_count': 0,
            'ua_pt1_count': 0,
            'ua_pt2_count': 0,
            'ua_pt3_count': 0,
            'ua_dc_count': 0,
        }
        check_vals = [ check_val_d ]

        for in_files in in_files_l:
            fifo_file = self.filename_from_id('dumpcap_pcapng_sections_{}.fifo'.format(len(fifo_files) + 1))
            fifo_files.append(fifo_file)
            # If a previous test left its fifo laying around, e.g. from a failure, remove it.
            try:
                os.unlink(fifo_file)
            except: pass
            os.mkfifo(fifo_file)
            cat_cmd = subprocesstest.cat_cap_file_command(in_files)
            fifo_procs.append(self.startProcess(('{0} > {1}'.format(cat_cmd, fifo_file)), shell=True))

        if multi_output:
            rb_unique = 'sections_rb_' + uuid.uuid4().hex[:6] # Random ID
            testout_glob = '{}.{}_*.pcapng'.format(self.id(), rb_unique)
            testout_file = '{}.{}.pcapng'.format(self.id(), rb_unique)
            check_vals.append(check_val_d.copy())
            # check_vals[]['filename'] will be filled in below
        else:
            testout_file = self.filename_from_id(testout_pcapng)
            check_vals[0]['filename'] = testout_file

        # Capture commands
        if not multi_input and not multi_output:
            # Passthrough SHBs, single output file
            capture_cmd_args = (
                '-i', fifo_files[0],
                '-w', testout_file
            )
            check_vals[0]['packet_count'] = 79
            check_vals[0]['idb_count'] = 22
            check_vals[0]['ua_pt1_count'] = 1
            check_vals[0]['ua_pt2_count'] = 1
        elif not multi_input and multi_output:
            # Passthrough SHBs, multiple output files
            capture_cmd_args = (
                '-i', fifo_files[0],
                '-w', testout_file,
                '-a', 'files:2',
                '-b', 'packets:53'
            )
            check_vals[0]['packet_count'] = 53
            check_vals[0]['idb_count'] = 11
            check_vals[0]['ua_pt1_count'] = 1
            check_vals[1]['packet_count'] = 26
            check_vals[1]['idb_count'] = 22
            check_vals[1]['ua_pt1_count'] = 1
            check_vals[1]['ua_pt2_count'] = 1
        elif multi_input and not multi_output:
            # Dumpcap SHBs, single output file
            capture_cmd_args = (
                '-i', fifo_files[0],
                '-i', fifo_files[1],
                '-w', testout_file
            )
            check_vals[0]['packet_count'] = 88
            check_vals[0]['idb_count'] = 35
            check_vals[0]['ua_dc_count'] = 1
        else:
            # Dumpcap SHBs, multiple output files
            capture_cmd_args = (
                '-i', fifo_files[0],
                '-i', fifo_files[1],
                '-w', testout_file,
                '-a', 'files:2',
                '-b', 'packets:53'
            )
            check_vals[0]['packet_count'] = 53
            check_vals[0]['idb_count'] = 13
            check_vals[0]['ua_dc_count'] = 1
            check_vals[1]['packet_count'] = 35
            check_vals[1]['idb_count'] = 35
            check_vals[1]['ua_dc_count'] = 1

        capture_cmd = capture_command(cmd_dumpcap, *capture_cmd_args)

        capture_proc = self.assertRun(capture_cmd)
        for fifo_proc in fifo_procs: fifo_proc.kill()

        rb_files = []
        if multi_output:
            rb_files = sorted(glob.glob(testout_glob))
            self.assertEqual(len(rb_files), 2)
            check_vals[0]['filename'] = rb_files[0]
            check_vals[1]['filename'] = rb_files[1]

        for rbf in rb_files:
            self.cleanup_files.append(rbf)
            self.assertTrue(os.path.isfile(rbf))

        # Output tests

        if not multi_input and not multi_output:
            # Check strict bit-for-bit passthrough.
            in_hash = hashlib.sha256()
            out_hash = hashlib.sha256()
            for in_file in in_files_l[0]:
                in_cap_file = capture_file(in_file)
                with open(in_cap_file, 'rb') as f:
                    in_hash.update(f.read())
            with open(testout_file, 'rb') as f:
                out_hash.update(f.read())
            self.assertEqual(in_hash.hexdigest(), out_hash.hexdigest())

        # many_interfaces.pcapng.1 : 64 packets written by "Passthrough test #1"
        # many_interfaces.pcapng.2 : 15 packets written by "Passthrough test #2"
        # many_interfaces.pcapng.3 : 9 packets written by "Passthrough test #3"
        # Each has 11 interfaces.
        idb_compare_eq = True
        if multi_input and multi_output:
            # Having multiple inputs forces the use of threads. In our
            # case this means that non-packet block counts in the first
            # file in is nondeterministic.
            idb_compare_eq = False
        for check_val in check_vals:
            self.checkPacketCount(check_val['packet_count'], cap_file=check_val['filename'])

            tshark_proc = self.assertRun(capture_command(cmd_tshark,
                '-r', check_val['filename'],
                '-V',
                '-X', 'read_format:MIME Files Format'
            ))
            # XXX Are there any other sanity checks we should run?
            if idb_compare_eq:
                self.assertEqual(self.countOutput(r'Block: Interface Description Block',
                    proc=tshark_proc), check_val['idb_count'])
            else:
                self.assertGreaterEqual(self.countOutput(r'Block: Interface Description Block',
                    proc=tshark_proc), check_val['idb_count'])
                idb_compare_eq = True
            self.assertEqual(self.countOutput(r'Option: User Application = Passthrough test #1',
                proc=tshark_proc), check_val['ua_pt1_count'])
            self.assertEqual(self.countOutput(r'Option: User Application = Passthrough test #2',
                proc=tshark_proc), check_val['ua_pt2_count'])
            self.assertEqual(self.countOutput(r'Option: User Application = Passthrough test #3',
                proc=tshark_proc), check_val['ua_pt3_count'])
            self.assertEqual(self.countOutput(r'Option: User Application = Dumpcap \(Wireshark\)',
                proc=tshark_proc), check_val['ua_dc_count'])
Пример #43
0
import time
import sqlite3 as lite
import os
import logging

gpsd = gps(mode=WATCH_ENABLE | WATCH_NEWSTYLE)

con = lite.connect(config['DB_NAME'])

workDir = os.getcwd()
fifoPath = workDir + "/" + "latlongFifo"
fPath = None

if os.path.isfile(fifoPath) is False:
    try:
        os.mkfifo(fifoPath)
    except OSError, e:
        logginf.info("Failed to create FIFO: %s" % e)

try:
    fPath = open(fifoPath, "w")
except Exception as err:
    logginf.info("Error Opening FiFo ")

try:
    while True:
        logging.info('Working')
        report = gpsd.next()
        if report['class'] == 'TPV':
            logging.info("GPS signal received")
            with con:
Пример #44
0
def registry(realArgs):
    """
  Interact with the subuser registry.
  """
    options, args = parseCliArgs(realArgs)
    user = User()
    if len(args) < 1:
        sys.exit(
            "No arguments given. Please use subuser registry -h for help.")
    elif ["log"] == args:
        subuserlib.registry.showLog(user)
    elif "rollback" == args[0]:
        try:
            commit = args[1]
        except KeyError:
            sys.exit(
                "Wrong number of arguments.  Expected a commit.  Try running \nsubuser regsitry --help\nfor more info."
            )
        with user.getRegistry().getLock():
            subuserlib.registry.rollback(user, commit=commit)
    elif ["live-log"] == args:
        liveLogDir = os.path.join(user.homeDir, ".subuser/registry-live-log")
        liveLogPath = os.path.join(liveLogDir, str(os.getpid()))
        if not os.path.exists(liveLogDir):
            os.makedirs(liveLogDir)
        os.mkfifo(liveLogPath)
        # Why use os.open? http://stackoverflow.com/questions/5782279/why-does-a-read-only-open-of-a-named-pipe-block
        liveLog = os.open(liveLogPath, os.O_RDONLY | os.O_NONBLOCK)
        q = False
        line = ""
        while not q:
            ready, _, _ = select.select([sys.stdin, liveLog], [], [])
            for selection in ready:
                if selection == liveLog:
                    line += os.read(liveLog, 1)
                    try:
                        announcement = json.loads(line)
                        if options.json:
                            print(line)
                        else:
                            print("New commit to registry:" +
                                  announcement["commit"])
                        line = ""
                    except ValueError:
                        pass
                elif selection == sys.stdin:
                    stdinLine = sys.stdin.readline()
                    if "q" in stdinLine or not stdinLine:
                        q = True
                        print("Quitting...")
                        break
                else:
                    raise Exception("IMPOSSIBLE!" + str(selection))
        os.close(liveLog)
        os.remove(liveLogPath)
        sys.exit()
    elif len(args) == 1:
        sys.exit(
            " ".join(args) +
            " is not a valid registry subcommand. Please use subuser registry -h for help."
        )
    else:
        sys.exit(
            " ".join(args) +
            " is not a valid registry subcommand. Please use subuser registry -h for help."
        )
Пример #45
0
                    default="ERROR")

args = parser.parse_args()

logging.info(args.in_, args.out, args.ip, args.v)

logging.basicConfig(
    format=
    u'%(filename)s[LINE:%(lineno)d]* %(levelname)-8s [%(asctime)s]  %(message)s',
    level=args.v)

IN_FIFO = args.in_
OUT_FIFO = args.out

try:
    os.mkfifo(IN_FIFO)
except OSError as oe:
    if oe.errno != errno.EEXIST:
        raise

try:
    os.mkfifo(OUT_FIFO)
except OSError as oe:
    if oe.errno != errno.EEXIST:
        raise

if not stat.S_ISFIFO(os.stat(IN_FIFO).st_mode):
    logging.error("is not named pipe: %s", IN_FIFO)

if not stat.S_ISFIFO(os.stat(OUT_FIFO).st_mode):
    logging.error("is not named pipe: %s", OUT_FIFO)
Пример #46
0
def main_handler(event, context):
    logger.info(json.dumps(event))

    # 为了适配windows端用户
    # 将ffmeg文件复制到/tmp下并赋予执行权限
    subprocess.run('cp ./ffmpeg /tmp/ffmpeg && chmod 755 /tmp/ffmpeg',
                   shell=True)

    region = os.environ.get('REGION')  # 输出桶所在地域
    dst_bucket = os.environ.get('DST_BUCKET')  # 输出桶名称
    dst_path = os.environ.get('DST_PATH')  # 输出桶目录
    dst_format = os.environ.get('DST_FORMAT')  # 转码格式
    cmd_origin = '/tmp/' + os.environ.get('FFMPEG_CMD')  # ffmpeg命令
    secret_id = os.environ.get('TENCENTCLOUD_SECRETID')
    secret_key = os.environ.get('TENCENTCLOUD_SECRETKEY')
    token = os.environ.get('TENCENTCLOUD_SESSIONTOKEN')
    ffmpeg_debug = os.environ.get('FFMPEG_DEBUG',
                                  1)  # 是否输出ffmpeg日志, 1为输出 0为不输出
    read_limit = 1024 * 64  # 一次性读取多少输出数据(单位: byte),建议使用默认值即可,默认为64KB
    # 分块上传一次性最多上传多少数据(单位: byte),建议使用默认值即可,默认为10MB
    upload_size = 1024 * 1024 * 10

    if "Records" in event.keys():
        base_info = {}

        date = datetime.datetime.now()
        date_tag = date.strftime('%Y%m%d%H%M%S')

        cos_client = CosS3Client(
            CosConfig(Region=region,
                      SecretId=secret_id,
                      SecretKey=secret_key,
                      Token=token))

        key = "/".join(
            event['Records'][0]['cos']['cosObject']['key'].split("/")[3:])

        # 生成输入文件的预签名
        # cos sdk无token签名,因此这里需要手动拼接token
        download_input = cos_client.get_presigned_download_url(
            Bucket=dst_bucket,
            Key=key,
            Expired=60 * 60 * 12,
        )
        download_input += '&x-cos-security-token={token}'.format(token=token)

        output_name = key.split('/')[-1].split('.')[0] + '.' + dst_format
        file_name, file_format = output_name.split('.')
        upload_name = '{file_name}-{date_tag}.{file_format}'.format(
            file_name=file_name, date_tag=date_tag, file_format=file_format)

        # 创建fifo
        fifo_path = '/tmp/fifo' + str(int(time.time()))
        os.mkfifo(fifo_path)

        cmd = cmd_origin.format(input=download_input,
                                dst_format=dst_format,
                                output=fifo_path)
        cmd_list = cmd.split(' ')

        # 日志上报与文件上传线程使用的一些公共参数
        base_info['cos_client'] = cos_client
        base_info['region'] = region
        base_info['dst_format'] = dst_format
        base_info['dst_bucket'] = dst_bucket
        base_info['dst_path'] = dst_path
        base_info['ffmpeg_debug'] = ffmpeg_debug

        base_info['size'] = event['Records'][0]['cos']['cosObject']['size']
        base_info['key'] = key
        base_info['upload_name'] = upload_name
        base_info['date_tag'] = date_tag
        base_info['fifo_path'] = fifo_path

        base_info['upload_size'] = upload_size
        base_info['read_limit'] = read_limit

    else:
        raise Exception('event does not come from COS')

    try:
        # 转码进程启动
        proc = subprocess.Popen(cmd_list,
                                stderr=subprocess.PIPE,
                                universal_newlines=True)

        # 创建文件上传线程
        task_list = []
        cos_upload_task = threading.Thread(target=file_upload_ffmpeg_task,
                                           args=(proc, base_info))

        # 创建日志输出线程
        task_list.append(cos_upload_task)
        log_upload_task = threading.Thread(target=log_ffmpeg_task,
                                           args=(proc, base_info))
        task_list.append(log_upload_task)

        # 启动线程
        for task in task_list:
            task.start()

        # 等待线程结束
        for task in task_list:
            task.join()

    except Exception as e:
        print(traceback.format_exc())

    finally:
        # 显式删除fifo
        if os.path.exists(fifo_path):
            os.remove(fifo_path)
        # 显式删除/tmp下的ffmpeg可执行文件
        if os.path.exists('/tmp/ffmpeg'):
            os.remove('/tmp/ffmpeg')

    return {'code': 200, 'Msg': 'success'}
Пример #47
0
def mount(mountpoint,
          force_remount=False,
          timeout_ms=60000,
          use_metadata_server=False):
    """Mount your Google Drive at the specified mountpoint path."""

    if ' ' in mountpoint:
        raise ValueError('Mountpoint must not contain a space.')

    mountpoint = _os.path.expanduser(mountpoint)
    # If we've already mounted drive at the specified mountpoint, exit now.
    already_mounted = _os.path.isdir(_os.path.join(mountpoint, 'My Drive'))
    if not force_remount and already_mounted:
        print('Drive already mounted at {}; to attempt to forcibly remount, '
              'call drive.mount("{}", force_remount=True).'.format(
                  mountpoint, mountpoint))
        return

    env = _env()
    home = env.home
    root_dir = env.root_dir
    inet_family = env.inet_family
    dev = env.dev
    path = env.path
    config_dir = env.config_dir

    try:
        _os.makedirs(config_dir)
    except OSError:
        if not _os.path.isdir(config_dir):
            raise ValueError(
                '{} must be a directory if present'.format(config_dir))

    # Launch an intermediate bash to manage DriveFS' I/O (b/141747058#comment6).
    prompt = u'root@{}-{}: '.format(_socket.gethostname(), _uuid.uuid4().hex)
    logfile = None
    if mount._DEBUG:  # pylint:disable=protected-access
        logfile = _sys.stdout
    d = _popen_spawn.PopenSpawn(
        '/usr/bin/setsid /bin/bash --noediting -i',  # Need -i to get prompt echo.
        timeout=120,
        maxread=int(1e6),
        encoding='utf-8',
        logfile=logfile,
        env={
            'HOME': home,
            'FUSE_DEV_NAME': dev,
            'PATH': path
        })
    d.sendline('export PS1="{}"'.format(prompt))
    d.expect(prompt)  # The new prompt.
    # Robustify to previously-running copies of drive. Don't only [pkill -9]
    # because that leaves enough cruft behind in the mount table that future
    # operations fail with "Transport endpoint is not connected".
    d.sendline('umount -f {mnt} || umount {mnt}; pkill -9 -x drive'.format(
        mnt=mountpoint))
    # Wait for above to be received, using the next prompt.
    d.expect(prompt)
    # Only check the mountpoint after potentially unmounting/pkill'ing above.
    try:
        if _os.path.islink(mountpoint):
            raise ValueError('Mountpoint must not be a symlink')
        if _os.path.isdir(mountpoint) and _os.listdir(mountpoint):
            raise ValueError('Mountpoint must not already contain files')
        if not _os.path.isdir(mountpoint) and _os.path.exists(mountpoint):
            raise ValueError(
                'Mountpoint must either be a directory or not exist')
        normed = _os.path.normpath(mountpoint)
        if '/' in normed and not _os.path.exists(_os.path.dirname(normed)):
            raise ValueError('Mountpoint must be in a directory that exists')
    except:
        d.kill(_signal.SIGKILL)
        raise

    # Watch for success.
    success = u'google.colab.drive MOUNTED'
    success_watcher = (
        '( while `sleep 0.5`; do if [[ -d "{m}" && "$(ls -A {m})" != "" ]]; '
        'then echo "{s}"; break; fi; done ) &').format(m=mountpoint, s=success)
    d.sendline(success_watcher)
    d.expect(prompt)
    drive_dir = _os.path.join(root_dir, 'opt/google/drive')

    oauth_prompt = u'(Go to this URL in a browser: https://.*)$'
    problem_and_stopped = (
        u'Drive File Stream encountered a problem and has stopped')
    drive_exited = u'drive EXITED'
    metadata_auth_arg = (
        '--metadata_server_auth_uri={metadata_server}/computeMetadata/v1 '.
        format(metadata_server=_os.environ['TBE_CREDS_ADDR'])
        if use_metadata_server else '')
    drive_binary_dir = _os.path.join(
        root_dir,
        'opt/google/drive_latest') if use_metadata_server else drive_dir

    # Create a pipe for sending the oauth code to a backgrounded drive binary.
    # (popen -> no pty -> no bash job control -> can't background post-launch).
    fifo_dir = _tempfile.mkdtemp()
    fifo = _os.path.join(fifo_dir, 'drive.fifo')
    _os.mkfifo(fifo)
    # cat is needed below since the FIFO isn't opened for writing yet.
    d.sendline(
        ('cat {fifo} | head -1 | ( {drive_binary_dir}/drive '
         '--features=max_parallel_push_task_instances:10,'
         'max_operation_batch_size:15,opendir_timeout_ms:{timeout_ms},'
         'virtual_folders:true '
         '--inet_family=' + inet_family + ' ' + metadata_auth_arg +
         '--preferences=trusted_root_certs_file_path:'
         '{d}/roots.pem,mount_point_path:{mnt} --console_auth 2>&1 '
         '| grep --line-buffered -E "{oauth_prompt}|{problem_and_stopped}"; '
         'echo "{drive_exited}"; ) &').format(
             d=drive_dir,
             drive_binary_dir=drive_binary_dir,
             timeout_ms=timeout_ms,
             mnt=mountpoint,
             fifo=fifo,
             oauth_prompt=oauth_prompt,
             problem_and_stopped=problem_and_stopped,
             drive_exited=drive_exited))
    d.expect(prompt)

    # LINT.IfChange(drivetimedout)
    timeout_pattern = 'QueryManager timed out'
    # LINT.ThenChange()
    dfs_log = _os.path.join(config_dir, 'DriveFS/Logs/drive_fs.txt')

    wrote_to_fifo = False
    while True:
        case = d.expect([
            success,
            prompt,
            oauth_prompt,
            problem_and_stopped,
            drive_exited,
        ])
        if case == 0:
            break
        elif (case == 1 or case == 3 or case == 4):
            # Prompt appearing here means something went wrong with the drive binary.
            d.kill(_signal.SIGKILL)
            extra_reason = ''
            if 0 == _subprocess.call('grep -q "{}" "{}"'.format(
                    timeout_pattern, dfs_log),
                                     shell=True):
                extra_reason = (
                    ': timeout during initial read of root folder; for more info: '
                    'https://research.google.com/colaboratory/faq.html#drive-timeout'
                )
            raise ValueError('mount failed' + extra_reason)
def create_fifo(source):
    os.mkfifo(sanitize(source))
Пример #49
0
import os
from caiman.paths import caiman_datadir

# %% ********* Creating named pipes for communication with MicroManager: *********
timer = TicToc()
timer.tic()  # start measuring time

sendPipeName = "/tmp/getPipeMMCaImAn.ser"  # FOR SENDING MESSAGES --> TO MicroManager
receivePipeName = "/tmp/sendPipeMMCaImAn.ser"  # FOR READING MESSAGES --> FROM MicroManager

MMfileDirectory = '/Applications/MicroManager 2.0 gamma/uMresults'
CaimanFileDirectory = caiman_datadir()  # specify where the file is saved

if os.path.exists(sendPipeName):
    os.remove(sendPipeName)
    os.mkfifo(sendPipeName)
    print("Removed old write-pipe, created new write-pipe.")
else:
    os.mkfifo(sendPipeName)
    print("Write-pipe created sucessfully!")

if os.path.exists(receivePipeName):
    os.remove(receivePipeName)
    os.mkfifo(receivePipeName)
    print("Removed old read-pipe, created new read-pipe.")
else:
    os.mkfifo(receivePipeName)
    print("Read-pipe created sucessfully!")

timer.toc()
# %% ********* Wait for file name: *********
Пример #50
0
    def api_fifo_read(self):
        """
            A thread which keeps reading an input fifo,
            specified in the api configs, each line it reads is expected to
            be a json string encoding a dict of the form
            {
                FIFO_ENDPOINT_KEY: fifo_endpoint,
                COMMAND_ID_KEY: command_id,
                COMMAND_KEY: command,
                ARGS_KEY: args,
                METHOD_KEY: method
            }
            fifo_endpoint - Input FIFO to api server
            command_id - the sequence id of the command
            command - the api command called (command along with method should
                                              map to a unique function in
                                              moirai class)
            args - arugments for the command
            method - api method name (GET/PUT/POST)

            Args:
                None
            Return:
                None
            Raise:
                IOError/OSError - Unable to open input FIFO
        """
        try:
            if not os.path.exists(self.api_config['moirai_input_fifo']):
                os.mkfifo(self.api_config['moirai_input_fifo'])
            print("\nAPI FIFO open for read\n")
            self.fifo_fd = open(self.api_config['moirai_input_fifo'])
        except (IOError, OSError):
            print("[Error] Unable to read api input FIFO")
            raise
        while True:
            skip_iteration_flag = False
            request_json = self.fifo_fd.readline()
            if request_json:
                try:
                    # if request_json:
                    request_map = json.loads(request_json)
                except ValueError:
                    print("[Error] Recieved non json object, skipping")
                    print(request_json)
                    continue
                required_keys = [
                    ApiConstants.FIFO_ENDPOINT_KEY,
                    ApiConstants.COMMAND_ID_KEY, ApiConstants.COMMAND_KEY,
                    ApiConstants.ARGS_KEY, ApiConstants.METHOD_KEY
                ]
                for arr_key in required_keys:
                    if arr_key not in request_map.keys():
                        print("[Error] Malformed message missing {}".format(
                            arr_key))
                        skip_iteration_flag = True

                if skip_iteration_flag:
                    continue

                fifo_endpoint = request_map[ApiConstants.FIFO_ENDPOINT_KEY]
                command_id = request_map[ApiConstants.COMMAND_ID_KEY]
                command = request_map[ApiConstants.COMMAND_KEY]
                args = request_map[ApiConstants.ARGS_KEY]
                method = request_map[ApiConstants.METHOD_KEY]
                if command not in ApiConstants.API_COMMANDS:
                    output_msg = {
                        ApiConstants.COMMAND_ID_KEY: command_id,
                        ApiConstants.COMMAND_OUTPUT_KEY:
                        "No such command found"
                    }
                else:
                    if method == "GET":
                        moirai_function = (
                            ApiConstants.MOIRAI_GET_COMMAND_PREFIX + command)
                    else:
                        moirai_function = (
                            ApiConstants.MOIRAI_OTHER_COMMAND_PREFIX + command)
                try:
                    command_handler = getattr(self, moirai_function)
                    command_output = command_handler(args)
                    output_msg = {
                        ApiConstants.COMMAND_ID_KEY: command_id,
                        ApiConstants.COMMAND_OUTPUT_KEY: command_output
                    }
                except:
                    output_msg = {
                        ApiConstants.COMMAND_ID_KEY:
                        command_id,
                        ApiConstants.COMMAND_OUTPUT_KEY:
                        "Unexpected error in moirai"
                    }
                with open(fifo_endpoint, 'w') as output_fd:
                    output_fd.write(json.dumps(output_msg))
Пример #51
0
#!/usr/bin/env python
from scapy.all import *
import time
import subprocess
import fileinput
import os
import re

FIFONAME = '/tmp/802154fifo'

try:
    os.remove(FIFONAME)
except:
    pass  # not existing so no problem

os.mkfifo(FIFONAME)
subprocess.call("wireshark -k -i %s &" % FIFONAME, shell=True)


class IEEE802154(Packet):
    name = "802.15.4"
    fields_desc = [StrLenField("data", "", "len")]


bind_layers(IEEE802154, LLC, type=2)
conf.l2types.register(195, IEEE802154)

fdesc = PcapWriter(FIFONAME)

lines_left = 9999999999999
Пример #52
0
import os
import cv2
import time

pipePath = "./vision"
while 1:
    time.sleep(1)
    try:
        os.mkfifo(pipePath)
    except OSError:
        pass
    rp = open(pipePath, 'r')
    response = rp.read()
    print "Got response %s" % response
    rp.close()
    k = cv2.waitKey(5) & 0xFF
    if k == 27:
        break
import time
import math
import os
import errno
import re
import sys

connection_string = "/dev/ttyS0"
FIFO_1 = "/tmp/server_to_client_fifo"
FIFO_2 = "/tmp/client_to_server_fifo"

try:
    os.mkfifo(FIFO_1)
    os.mkfifo(FIFO_2)
except OSError as oe:
    if oe.errno != errno.EEXIST:
        raise


def respond(resp):
    os.system("echo " + resp + " > " + FIFO_1)
    return


try:
    os.mkfifo(FIFO_1)
    os.mkfifo(FIFO_2)
except OSError as oe:
    if oe.errno != errno.EEXIST:
        raise
Пример #54
0
        os.write(fifo, Qbo.strAudio)
        os.close(fifo)
        Qbo.GetAudio = False
        Listening = False
    return


#============================================================================================================

#Qbo.SpeechText("I am ready.")
# FIFO init.
FIFO_listen = '/home/pi/Documents/pipes/pipe_listen'
FIFO_cmd = '/home/pi/Documents/pipes/pipe_cmd'

try:
    os.mkfifo(FIFO_listen)
except OSError as oe:
    if oe.errno != errno.EEXIST:
        raise
try:
    os.mkfifo(FIFO_cmd)
except OSError as oe:
    if oe.errno != errno.EEXIST:
        raise

listen_thd = Qbo.StartBackListen()
#HeadServo.SetNoseColor(1)       # Set QBO nose green
#fifo = os.open(FIFO_cmd, os.O_WRONLY)
#os.write(fifo, "-c nose -co green")
#os.close(fifo)
Пример #55
0
    def __init__(self,
                 credentials_file=os.path.join(os.path.expanduser('~'),
                                               '.config',
                                               'google-oauthlib-tool',
                                               'credentials.json'),
                 device_config=os.path.join(os.path.expanduser('~'), '.config',
                                            'googlesamples-assistant',
                                            'device_config.json'),
                 lang='en-US',
                 conversation_start_fifo=os.path.join(os.path.sep, 'tmp',
                                                      'pushtotalk.fifo'),
                 *args,
                 **kwargs):
        """ Params:
            credentials_file -- Path to the Google OAuth credentials file
                (default: ~/.config/google-oauthlib-tool/credentials.json)
            device_config  -- Path to device_config.json. Register your
                device and create a project, then run the pushtotalk.py
                script from googlesamples to create your device_config.json
            lang -- Assistant language (default: en-US)
        """

        super().__init__(*args, **kwargs)

        self.lang = lang
        self.credentials_file = credentials_file
        self.device_config = device_config
        self.conversation_start_fifo = conversation_start_fifo

        try:
            os.mkfifo(self.conversation_start_fifo)
        except FileExistsError:
            pass

        with open(self.device_config) as f:
            device = json.load(f)
            self.device_id = device['id']
            self.device_model_id = device['model_id']

        # Load OAuth 2.0 credentials.
        try:
            with open(self.credentials_file, 'r') as f:
                credentials = google.oauth2.credentials.Credentials(
                    token=None, **json.load(f))
                http_request = google.auth.transport.requests.Request()
                credentials.refresh(http_request)
        except:
            logging.error('Error loading credentials: %s', e)
            logging.error('Run google-oauthlib-tool to initialize '
                          'new OAuth 2.0 credentials.')
            raise

        # Create an authorized gRPC channel.
        self.grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
            credentials, http_request, self.api_endpoint)
        logging.info('Connecting to %s', self.api_endpoint)

        # Configure audio source and sink.
        audio_device = None
        audio_source = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=self.audio_sample_rate,
                sample_width=self.audio_sample_width,
                block_size=self.audio_block_size,
                flush_size=self.audio_flush_size))

        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=self.audio_sample_rate,
                                         sample_width=self.audio_sample_width,
                                         block_size=self.audio_block_size,
                                         flush_size=self.audio_flush_size))

        # Create conversation stream with the given audio source and sink.
        self.conversation_stream = audio_helpers.ConversationStream(
            source=audio_source,
            sink=audio_sink,
            iter_size=self.audio_iter_size,
            sample_width=self.audio_sample_width,
        )

        self.device_handler = device_helpers.DeviceRequestHandler(
            self.device_id)
Пример #56
0
import os
import errno

FIFO = 'mypipe'

try:
    os.mkfifo(FIFO)
except OSError as oe:
    if oe.errno != errno.EEXIST:
        raise

while True:
    print("Opening FIFO...")
    with open(FIFO) as fifo:
        print("FIFO opened")
        while True:
            data = fifo.read()
            if len(data) == 0:
                print("Writer closed")
                break
            print('Read: "{0}"'.format(data))
Пример #57
0
    self_loop_scale=0.1)
phones = SymbolTable.read_text("phones.txt")
wb_info = WordBoundaryInfo.from_file(WordBoundaryInfoNewOpts(),
                                     "word_boundary.int")

# Define feature pipeline as a Kaldi rspecifier
feats_rspecifier = (
    "ark:compute-mfcc-feats --config=mfcc.conf scp:wav.scp ark:-"
    " | tee mfcc.pipe"
    " | compute-cmvn-stats ark:- ark:-"
    " | apply-cmvn ark:- ark:mfcc.pipe ark:-"
    " | add-deltas ark:- ark:- |")
try:
    os.remove("mfcc.pipe")  # remove leftover named pipe
except FileNotFoundError:
    pass

# Align wav files
os.mkfifo("mfcc.pipe")  # create named pipe used by the pipeline
with SequentialMatrixReader(feats_rspecifier) as f, open("text") as t:
    for (fkey, feats), line in zip(f, t):
        tkey, text = line.strip().split(None, 1)
        assert (fkey == tkey)
        out = aligner.align(feats, text)
        print(fkey, out["alignment"], flush=True)
        phone_alignment = aligner.to_phone_alignment(out["alignment"], phones)
        print(fkey, phone_alignment, flush=True)
        word_alignment = aligner.to_word_alignment(out["best_path"], wb_info)
        print(fkey, word_alignment, flush=True)
os.remove("mfcc.pipe")  # remove named pipe
Пример #58
0
def exec_func_shell(func, d, runfile, cwd=None):
    """Execute a shell function from the metadata

    Note on directory behavior.  The 'dirs' varflag should contain a list
    of the directories you need created prior to execution.  The last
    item in the list is where we will chdir/cd to.
    """

    # Don't let the emitted shell script override PWD
    d.delVarFlag('PWD', 'export')

    with open(runfile, 'w') as script:
        script.write(shell_trap_code())

        bb.data.emit_func(func, script, d)

        if bb.msg.loggerVerboseLogs:
            script.write("set -x\n")
        if cwd:
            script.write("cd '%s'\n" % cwd)
        script.write("%s\n" % func)
        script.write('''
# cleanup
ret=$?
trap '' 0
exit $ret
''')

    os.chmod(runfile, 0o775)

    cmd = runfile
    if d.getVarFlag(func, 'fakeroot', False):
        fakerootcmd = d.getVar('FAKEROOT', True)
        if fakerootcmd:
            cmd = [fakerootcmd, runfile]

    if bb.msg.loggerDefaultVerbose:
        logfile = LogTee(logger, sys.stdout)
    else:
        logfile = sys.stdout

    progress = d.getVarFlag(func, 'progress', True)
    if progress:
        if progress == 'percent':
            # Use default regex
            logfile = bb.progress.BasicProgressHandler(d, outfile=logfile)
        elif progress.startswith('percent:'):
            # Use specified regex
            logfile = bb.progress.BasicProgressHandler(d,
                                                       regex=progress.split(
                                                           ':', 1)[1],
                                                       outfile=logfile)
        elif progress.startswith('outof:'):
            # Use specified regex
            logfile = bb.progress.OutOfProgressHandler(d,
                                                       regex=progress.split(
                                                           ':', 1)[1],
                                                       outfile=logfile)
        else:
            bb.warn('%s: invalid task progress varflag value "%s", ignoring' %
                    (func, progress))

    fifobuffer = bytearray()

    def readfifo(data):
        nonlocal fifobuffer
        fifobuffer.extend(data)
        while fifobuffer:
            message, token, nextmsg = fifobuffer.partition(b"\00")
            if token:
                splitval = message.split(b' ', 1)
                cmd = splitval[0].decode("utf-8")
                if len(splitval) > 1:
                    value = splitval[1].decode("utf-8")
                else:
                    value = ''
                if cmd == 'bbplain':
                    bb.plain(value)
                elif cmd == 'bbnote':
                    bb.note(value)
                elif cmd == 'bbwarn':
                    bb.warn(value)
                elif cmd == 'bberror':
                    bb.error(value)
                elif cmd == 'bbfatal':
                    # The caller will call exit themselves, so bb.error() is
                    # what we want here rather than bb.fatal()
                    bb.error(value)
                elif cmd == 'bbfatal_log':
                    bb.error(value, forcelog=True)
                elif cmd == 'bbdebug':
                    splitval = value.split(' ', 1)
                    level = int(splitval[0])
                    value = splitval[1]
                    bb.debug(level, value)
                else:
                    bb.warn("Unrecognised command '%s' on FIFO" % cmd)
                fifobuffer = nextmsg
            else:
                break

    tempdir = d.getVar('T', True)
    fifopath = os.path.join(tempdir, 'fifo.%s' % os.getpid())
    if os.path.exists(fifopath):
        os.unlink(fifopath)
    os.mkfifo(fifopath)
    with open(fifopath, 'r+b', buffering=0) as fifo:
        try:
            bb.debug(2, "Executing shell function %s" % func)

            try:
                with open(os.devnull, 'r+') as stdin:
                    bb.process.run(cmd,
                                   shell=False,
                                   stdin=stdin,
                                   log=logfile,
                                   extrafiles=[(fifo, readfifo)])
            except bb.process.CmdError:
                logfn = d.getVar('BB_LOGFILE', True)
                raise FuncFailed(func, logfn)
        finally:
            os.unlink(fifopath)

    bb.debug(2, "Shell function %s finished" % func)
Пример #59
0
 def __init__(self):
     # XXX: this is insecure and might cause race conditions
     self.file_name = self._get_file_name()
     os.mkfifo(self.file_name)
Пример #60
-1
 def test_FifoFile(self):
     os.mkfifo(self.client.abspath("aFifo"))
     self.client.checkin("yuvu")
     os.unlink(self.client.abspath("aFifo"))
     self.client.checkout("yuvu")
     self.assertEquals(self.client.fileCount(), 1)
     self.assertTrue(stat.S_ISFIFO(os.stat(self.client.abspath("aFifo")).st_mode))