Пример #1
0
 def __init__(self, mode, *args):
     read_fd, write_fd = os.pipe()
     self.pid = os.fork()
     self.cmd = args[0]
     if self.pid:
         # Parent
         if mode == 'w':
             os.close(read_fd)
             self.file = os.fdopen(write_fd, 'w')
         else:
             os.close(write_fd)
             self.file = os.fdopen(read_fd, 'r')
     else:
         # Child
         if mode == 'w':
             os.close(write_fd)
             os.dup2(read_fd, 0)
             os.close(read_fd)
         else:
             os.close(read_fd)
             os.dup2(write_fd, 1)
             os.close(write_fd)
         try:
             os.execl(self.cmd, os.path.basename(args[0]), *args[1:])
         except OSError, (eno, estr):
             print >> sys.stderr, 'exec %s: %s' % (self.cmd, estr)
             os._exit(1)
Пример #2
0
    def __init__(self, ui, packdir, version=0):
        self._checkversion(version)

        opener = vfsmod.vfs(packdir)
        opener.createmode = 0o444
        self.opener = opener

        self.entries = {}

        shallowutil.mkstickygroupdir(ui, packdir)
        self.packfp, self.packpath = opener.mkstemp(
            suffix=self.PACKSUFFIX + '-tmp')
        self.idxfp, self.idxpath = opener.mkstemp(
            suffix=self.INDEXSUFFIX + '-tmp')
        self.packfp = os.fdopen(self.packfp, 'w+')
        self.idxfp = os.fdopen(self.idxfp, 'w+')
        self.sha = hashlib.sha1()
        self._closed = False

        # The opener provides no way of doing permission fixup on files created
        # via mkstemp, so we must fix it ourselves. We can probably fix this
        # upstream in vfs.mkstemp so we don't need to use the private method.
        opener._fixfilemode(opener.join(self.packpath))
        opener._fixfilemode(opener.join(self.idxpath))

        # Write header
        # TODO: make it extensible (ex: allow specifying compression algorithm,
        # a flexible key/value header, delta algorithm, fanout size, etc)
        versionbuf = struct.pack('!B', self.VERSION) # unsigned 1 byte int
        self.writeraw(versionbuf)
Пример #3
0
def readf_win32(f, m='r', encoding='ISO8859-1'):
	flags = os.O_NOINHERIT | os.O_RDONLY
	if 'b' in m:
		flags |= os.O_BINARY
	if '+' in m:
		flags |= os.O_RDWR
	try:
		fd = os.open(f, flags)
	except OSError:
		raise IOError('Cannot read from %r' % f)

	if sys.hexversion > 0x3000000 and not 'b' in m:
		m += 'b'
		f = os.fdopen(fd, m)
		try:
			txt = f.read()
		finally:
			f.close()
		if encoding:
			txt = txt.decode(encoding)
		else:
			txt = txt.decode()
	else:
		f = os.fdopen(fd, m)
		try:
			txt = f.read()
		finally:
			f.close()
	return txt
Пример #4
0
    def run(self):
        """
        Execute Package building action.
        """
        header = self._build_execution_header_output()
        print_info(
            header + "spawning package build: %s" % (
                " ".join(self._packages),))

        std_env = self._build_standard_environment(
            repository=self._params["repository"])

        matter_package_names = " ".join(self._packages)
        std_env["MATTER_PACKAGE_NAMES"] = matter_package_names

        # run pkgpre, if any
        pkgpre = self._params["pkgpre"]
        if pkgpre is not None:
            print_info("spawning --pkgpre: %s" % (pkgpre,))
            tmp_fd, tmp_path = mkstemp()
            with os.fdopen(tmp_fd, "wb") as tmp_f:
                with open(pkgpre, "rb") as pkgpre_f:
                    tmp_f.write(pkgpre_f.read())
            try:
                # now execute
                os.chmod(tmp_path, 0o700)
                exit_st = subprocess.call([tmp_path], env = std_env)
                if exit_st != 0:
                    return exit_st
            finally:
                os.remove(tmp_path)

        dirs_cleanup = []
        exit_st = self._run_builder(dirs_cleanup)

        print_info("builder terminated, exit status: %d" % (exit_st,))

        # cleanup temporary directories registered on the queue
        for tmp_dir in dirs_cleanup:
            self.__cleanup_dir(tmp_dir)

        # run pkgpost, if any
        pkgpost = self._params["pkgpost"]
        if pkgpost is not None:
            print_info("spawning --pkgpost: %s" % (pkgpost,))
            tmp_fd, tmp_path = mkstemp()
            with os.fdopen(tmp_fd, "wb") as tmp_f:
                with open(pkgpost, "rb") as pkgpost_f:
                    tmp_f.write(pkgpost_f.read())
            try:
                # now execute
                os.chmod(tmp_path, 0o700)
                post_exit_st = subprocess.call([tmp_path, str(exit_st)],
                    env = std_env)
                if post_exit_st != 0:
                    return post_exit_st
            finally:
                os.remove(tmp_path)

        return exit_st
Пример #5
0
def tag_raw_data(raw) :
    (name, temp) = tempfile.mkstemp()
    (rname, rtemp) = tempfile.mkstemp()
    f = os.fdopen(name, 'w') #weird function because we are using a tempfile
    f.writelines([r + '\n' for r in raw])
    f.close()

    #call external tagger
    print(TAG_COMMAND.format(temp))
    f = os.fdopen(rname)
    result = subprocess.call(TAG_COMMAND.format(temp),stdout=f, cwd=TAGGER_PATH, shell=True) 
    #read them back in
    f.seek(0)
    result = f.readlines()
    f.close()
    result = [x.strip() for x in result]


    final_result = []
    buf = []
    for i in range(len(result)) :
        if (result[i] == '') :
            final_result.append(tuple(buf)) #you have to cut of that extra ' '
            buf = []
        else :
            pieces = result[i].split('\t')
            buf.append( (pieces[0],pieces[1] ) )

    if (buf != []) :
        final_result.append(buf)

    
    return final_result
Пример #6
0
def write_env_var_to_conf_dir(var, value, conf_dir):
    env_jsonfile_path = os.path.join(conf_dir, 'environment.json')
    if var in CORE_VAR_NAMES:
        try:
            with open(env_jsonfile_path) as fd:
                env_vars = json.load(fd)
        except:
            env_vars = {}
        if value is None and var in env_vars:
            del env_vars[var]
        else:
            env_vars[var] = value
        # Make sure the file has 600 permissions
        try:
            os.remove(env_jsonfile_path)
        except:
            pass
        with os.fdopen(os.open(env_jsonfile_path, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as fd:
            json.dump(env_vars, fd, indent=4)
            fd.write("\n")
    else: # DX_CLI_WD, DX_USERNAME, DX_PROJECT_CONTEXT_NAME
        # Make sure the file has 600 permissions
        try:
            os.remove(os.path.join(conf_dir, var))
        except:
            pass
        with os.fdopen(os.open(os.path.join(conf_dir, var), os.O_CREAT | os.O_WRONLY, 0o600), 'w') as fd:
            fd.write(value.encode(sys_encoding) if is_py2 else value)

    if not os.path.exists(os.path.expanduser('~/.dnanexus_config/') + 'unsetenv'):
        with open(os.path.expanduser('~/.dnanexus_config/') + 'unsetenv', 'w') as fd:
            for var in CORE_VAR_NAMES:
                fd.write('unset ' + var + '\n')
Пример #7
0
def ndiff(scan_a, scan_b):
    """Run Ndiff on two scan results, which may be filenames or NmapParserSAX
    objects, and return a running NdiffCommand object."""
    temporary_filenames = []

    if isinstance(scan_a, NmapParserSAX):
        fd, filename_a = tempfile.mkstemp(
                prefix=APP_NAME + "-diff-",
                suffix=".xml"
                )
        temporary_filenames.append(filename_a)
        f = os.fdopen(fd, "wb")
        scan_a.write_xml(f)
        f.close()
    else:
        filename_a = scan_a

    if isinstance(scan_b, NmapParserSAX):
        fd, filename_b = tempfile.mkstemp(
                prefix=APP_NAME + "-diff-",
                suffix=".xml"
                )
        temporary_filenames.append(filename_b)
        f = os.fdopen(fd, "wb")
        scan_b.write_xml(f)
        f.close()
    else:
        filename_b = scan_b

    return NdiffCommand(filename_a, filename_b, temporary_filenames)
Пример #8
0
def worker(sock):
    """
    Called by a worker process after the fork().
    """
    signal.signal(SIGHUP, SIG_DFL)
    signal.signal(SIGCHLD, SIG_DFL)
    signal.signal(SIGTERM, SIG_DFL)
    # restore the handler for SIGINT,
    # it's useful for debugging (show the stacktrace before exit)
    signal.signal(SIGINT, signal.default_int_handler)

    # Read the socket using fdopen instead of socket.makefile() because the latter
    # seems to be very slow; note that we need to dup() the file descriptor because
    # otherwise writes also cause a seek that makes us miss data on the read side.
    infile = os.fdopen(os.dup(sock.fileno()), "a+", 65536)
    outfile = os.fdopen(os.dup(sock.fileno()), "a+", 65536)
    exit_code = 0
    try:
        worker_main(infile, outfile)
    except SystemExit as exc:
        exit_code = compute_real_exit_code(exc.code)
    finally:
        try:
            outfile.flush()
        except Exception:
            pass
    return exit_code
Пример #9
0
    def setUp(self):
        """
        Create a list of files separated in time so we can do dependency checking
        """
        import tempfile
        import time
        self.files = list()
        job_history = open_job_history(history_file)
        for i in range(6):
            #test_file =tempfile.NamedTemporaryFile(delete=False, prefix='testing_tmp')
            #self.files.append (test_file.name)
            # test_file.close()

            fh, temp_file_name = tempfile.mkstemp(suffix='.dot')
            self.files.append(temp_file_name)
            os.fdopen(fh, "w").close()

            # Save modify time in history file
            mtime = os.path.getmtime(temp_file_name)
            epoch_seconds = time.time()
            # Use epoch seconds unless there is a > 1 second discrepancy between system clock
            # and file system clock
            if epoch_seconds > mtime and epoch_seconds - mtime < 1.1:
                mtime = epoch_seconds
            else:
                # file system clock out of sync:
                #   Use file modify times: slow down in case of low counter resolution
                #       (e.g. old versions of NFS and windows)
                time.sleep(2)
            chksum = task.JobHistoryChecksum(
                temp_file_name, mtime, "", dummy_task())
            job_history[os.path.relpath(temp_file_name)] = chksum
Пример #10
0
    def __init__(self, impl=None):
        self._impl = impl or _poll()
        if hasattr(self._impl, 'fileno'):
            self._set_close_exec(self._impl.fileno())
        self._handlers = {}
        self._events = {}
        self._callbacks = []
        self._timeouts = []
        self._running = False
        self._stopped = False
        self._blocking_signal_threshold = None

        # Create a pipe that we send bogus data to when we want to wake
        # the I/O loop when it is idle
        if os.name != 'nt':
            r, w = os.pipe()
            self._set_nonblocking(r)
            self._set_nonblocking(w)
            self._set_close_exec(r)
            self._set_close_exec(w)
            self._waker_reader = os.fdopen(r, "rb", 0)
            self._waker_writer = os.fdopen(w, "wb", 0)
        else:
            self._waker_reader = self._waker_writer = win32_support.Pipe()
            r = self._waker_writer.reader_fd
        self.add_handler(r, self._read_waker, self.READ)
Пример #11
0
 def _write_tmpfile(self, entry):
     """ Write the file data to a temp file """
     filedata = self._get_data(entry)[0]
     # get a temp file to write to that is in the same directory as
     # the existing file in order to preserve any permissions
     # protections on that directory, and also to avoid issues with
     # /tmp set nosetuid while creating files that are supposed to
     # be setuid
     try:
         (newfd, newfile) = \
             tempfile.mkstemp(prefix=os.path.basename(entry.get("name")),
                              dir=os.path.dirname(entry.get("name")))
     except OSError:
         err = sys.exc_info()[1]
         self.logger.error("POSIX: Failed to create temp file in %s: %s" %
                           (os.path.dirname(entry.get('name')), err))
         return False
     try:
         if isinstance(filedata, str) and str != unicode:
             os.fdopen(newfd, 'w').write(filedata)
         else:
             os.fdopen(newfd, 'wb').write(
                 filedata.encode(Bcfg2.Options.setup.encoding))
     except (OSError, IOError):
         err = sys.exc_info()[1]
         self.logger.error("POSIX: Failed to open temp file %s for writing "
                           "%s: %s" %
                           (newfile, entry.get("name"), err))
         return False
     return newfile
Пример #12
0
 def record_site(self, job_status):
     """
     Need a doc string here.
     """
     job_status_name = None
     for name, code in JOB_RETURN_CODES._asdict().iteritems():
         if code == job_status:
             job_status_name = name
     try:
         with os.fdopen(
             os.open(
                 "task_statistics.%s.%s" % (self.site, job_status_name), os.O_APPEND | os.O_CREAT | os.O_RDWR, 0o644
             ),
             "a",
         ) as fd:
             fd.write("%d\n" % (self.job_id))
     except Exception as ex:
         self.logger.error(str(ex))
         # Swallow the exception - record_site is advisory only
     try:
         with os.fdopen(
             os.open("task_statistics.%s" % (job_status_name), os.O_APPEND | os.O_CREAT | os.O_RDWR, 0o644), "a"
         ) as fd:
             fd.write("%d\n" % (self.job_id))
     except Exception as ex:
         self.logger.error(str(ex))
    def testCMIP6(self):
        try:
            # -------------------------------------------
            # Try to call cmor with a bad institution_ID
            # -------------------------------------------
            cmor.setup(
                inpath='Tables',
                netcdf_file_action=cmor.CMOR_REPLACE)

            cmor.dataset_json("Test/test_python_CMIP6_CV_badgridlabel.json")

            # ------------------------------------------
            # load Omon table and create masso variable
            # ------------------------------------------
            cmor.load_table("CMIP6_Omon.json")
            itime = cmor.axis(table_entry="time", units='months since 2010',
                              coord_vals=numpy.array([0, 1, 2, 3, 4.]),
                              cell_bounds=numpy.array([0, 1, 2, 3, 4, 5.]))
            ivar = cmor.variable(table_entry="masso", axis_ids=[itime], units='kg')

            data = numpy.random.random(5)
            for i in range(0, 5):
                cmor.write(ivar, data[i:i])
        except:
            os.dup2(self.newstdout, 1)
            os.dup2(self.newstderr, 2)
            testOK = self.getAssertTest()

            sys.stdout = os.fdopen(self.newstdout, 'w', 0)
            sys.stderr = os.fdopen(self.newstderr, 'w', 0)
            # ------------------------------------------
            # Check error after signal handler is back
            # ------------------------------------------
            self.assertIn("\"gs1n\"", testOK)
Пример #14
0
    def handle(self, *addrport, **options):
        # reopen stdout/stderr file descriptor with write mode
        # and 0 as the buffer size (unbuffered).
        # XXX: why?
        sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
        sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 0)
 
        if len(addrport) == 0 :
            raise CommandError('Usage is runserver %s' % self.args)

        if len(addrport) == 1 :
            self.run_one(addrport[0], **options)
        else :
            from multiprocessing import Process

            plist = []
            for ap in addrport :
                p = Process(target=self.run_one, args=(ap,), kwargs=options)
                p.start()
                plist.append(p)

            # for p in plist : plist.terminate()

            while plist :
                if plist[0].exitcode is None :
                    plist.pop(0)
                else :
                    plist[0].join()
Пример #15
0
 def __init__(self, cmd, capturestderr=False, bufsize=-1):
     """The parameter 'cmd' is the shell command to execute in a
     sub-process.  On UNIX, 'cmd' may be a sequence, in which case arguments
     will be passed directly to the program without shell intervention (as
     with os.spawnv()).  If 'cmd' is a string it will be passed to the shell
     (as with os.system()).   The 'capturestderr' flag, if true, specifies
     that the object should capture standard error output of the child
     process.  The default is false.  If the 'bufsize' parameter is
     specified, it specifies the size of the I/O buffers to/from the child
     process."""
     _cleanup()
     self.cmd = cmd
     p2cread, p2cwrite = os.pipe()
     c2pread, c2pwrite = os.pipe()
     if capturestderr:
         errout, errin = os.pipe()
     self.pid = os.fork()
     if self.pid == 0:
         # Child
         os.dup2(p2cread, 0)
         os.dup2(c2pwrite, 1)
         if capturestderr:
             os.dup2(errin, 2)
         self._run_child(cmd)
     os.close(p2cread)
     self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
     os.close(c2pwrite)
     self.fromchild = os.fdopen(c2pread, 'r', bufsize)
     if capturestderr:
         os.close(errin)
         self.childerr = os.fdopen(errout, 'r', bufsize)
     else:
         self.childerr = None
Пример #16
0
def write_env_var_to_conf_dir(var, value, conf_dir):
    env_jsonfile_path = os.path.join(conf_dir, 'environment.json')
    std_vars = ['DX_APISERVER_HOST', 'DX_APISERVER_PORT', 'DX_APISERVER_PROTOCOL', 'DX_PROJECT_CONTEXT_ID', 'DX_WORKSPACE_ID', 'DX_SECURITY_CONTEXT']
    if var in std_vars:
        try:
            with open(env_jsonfile_path) as fd:
                env_vars = json.load(fd)
        except:
            env_vars = {}
        if value is None and var in env_vars:
            del env_vars[var]
        else:
            env_vars[var] = value
        # Make sure the file has 600 permissions
        try:
            os.remove(env_jsonfile_path)
        except:
            pass
        with os.fdopen(os.open(env_jsonfile_path, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as fd:
            json.dump(env_vars, fd, indent=4)
            fd.write("\n")
    else: # DX_CLI_WD, DX_USERNAME, DX_PROJECT_CONTEXT_NAME
        # Make sure the file has 600 permissions
        try:
            os.remove(os.path.join(conf_dir, var))
        except:
            pass
        with os.fdopen(os.open(os.path.join(conf_dir, var), os.O_CREAT | os.O_WRONLY, 0o600), 'w') as fd:
            fd.write(value)

    if not os.path.exists(os.path.expanduser('~/.dnanexus_config/') + 'unsetenv'):
        with open(os.path.expanduser('~/.dnanexus_config/') + 'unsetenv', 'w') as fd:
            for var in std_vars:
                fd.write('unset ' + var + '\n')
Пример #17
0
 def __init__(self, cmd, capturestderr=False, bufsize=-1):
     """The parameter 'cmd' is the shell command to execute in a
     sub-process.  The 'capturestderr' flag, if true, specifies that
     the object should capture standard error output of the child process.
     The default is false.  If the 'bufsize' parameter is specified, it
     specifies the size of the I/O buffers to/from the child process."""
     _cleanup()
     p2cread, p2cwrite = os.pipe()
     c2pread, c2pwrite = os.pipe()
     if capturestderr:
         errout, errin = os.pipe()
     self.pid = os.fork()
     if self.pid == 0:
         # Child
         os.dup2(p2cread, 0)
         os.dup2(c2pwrite, 1)
         if capturestderr:
             os.dup2(errin, 2)
         self._run_child(cmd)
     os.close(p2cread)
     self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
     os.close(c2pwrite)
     self.fromchild = os.fdopen(c2pread, 'r', bufsize)
     if capturestderr:
         os.close(errin)
         self.childerr = os.fdopen(errout, 'r', bufsize)
     else:
         self.childerr = None
     _active.append(self)
Пример #18
0
 def start(self):
     Ernie.log("Starting")
     input = os.fdopen(3)
     output = os.fdopen(4, "w")
     
     while(True):
         ipy = self.read_berp(input)
         if ipy == None:
             print 'Could not read BERP length header. Ernie server may have gone away. Exiting now.'
             exit()
         
         if len(ipy) == 4 and ipy[0] == bert.Atom('call'):
             mod, fun, args = ipy[1:4]
             self.log("-> " + ipy.__str__())
             try:
                 res = self.dispatch(mod, fun, args)
                 opy = (bert.Atom('reply'), res)
                 self.log("<- " + opy.__str__())
                 self.write_berp(output, opy)
             except ServerError, e:
                 opy = (bert.Atom('error'), (bert.Atom('server'), 0, str(type(e)), str(e), ''))
                 self.log("<- " + opy.__str__())
                 self.write_berp(output, opy)
             except Exception, e:
                 opy = (bert.Atom('error'), (bert.Atom('user'), 0, str(type(e)), str(e), ''))
                 self.log("<- " + opy.__str__())
                 self.write_berp(output, opy)
Пример #19
0
    def heartbeat(self):
        # setup pipes
        read_transport, read_proto = yield from self.loop.connect_read_pipe(
            asynchttp.StreamProtocol, os.fdopen(self.up_read, 'rb'))
        write_transport, _ = yield from self.loop.connect_write_pipe(
            asynchttp.StreamProtocol, os.fdopen(self.down_write, 'wb'))

        reader = read_proto.set_parser(websocket.WebSocketParser)
        writer = websocket.WebSocketWriter(write_transport)

        tulip.Task(self.start_server(writer))

        while True:
            try:
                msg = yield from reader.read()
            except asynchttp.EofStream:
                print('Superviser is dead, {} stopping...'.format(os.getpid()))
                self.loop.stop()
                break

            if msg.tp == websocket.MSG_PING:
                writer.pong()
            elif msg.tp == websocket.MSG_CLOSE:
                break
            elif msg.tp == websocket.MSG_TEXT:  # broadcast message
                for wsc in self.clients:
                    wsc.send(msg.data.strip().encode())

        read_transport.close()
        write_transport.close()
Пример #20
0
    def __init__(self, config_dir):
        self.app = Flask(__name__)
        self.service_map = {}
        self.event_map = {}
        signal.signal(signal.SIGTERM, self.term_handler)
        signal.signal(signal.SIGQUIT, self.term_handler)
        signal.signal(signal.SIGINT, self.term_handler)

        with open(config_dir) as config_fd:
            self.api_config = yaml.load(config_fd)
        self.api_fifo_name = str(uuid.uuid4()) + '.fifo'
        self.api_fifo_path = os.path.join(ApiConstants.API_PIPE_DIR,
                                          self.api_fifo_name)
        os.mkfifo(self.api_fifo_path)
        try:
            self.api_fifo_fd = os.open(self.api_fifo_path, os.O_NONBLOCK)
            self.api_fifo_file = os.fdopen(self.api_fifo_fd)
        except (IOError, OSError) as exc:
            print ("Unable to read the fifo file due to error {0} "
                   .format(exc))
            raise

        if not os.path.exists(self.api_config['moirai_input_fifo']):
            os.mkfifo(self.api_config['moirai_input_fifo'])

        try:
            self.moirai_fifo_fd = os.open(self.api_config['moirai_input_fifo'],
                                          os.O_WRONLY | os.O_NONBLOCK)
            self.moirai_fifo_file = os.fdopen(self.moirai_fifo_fd, 'w')
        except (IOError, OSError) as exc:
            print "Unable to connect to Moirai Server"
            self.moirai_fifo_fd = None
        self.setup_routes()
        self.command_id = 0
Пример #21
0
    def verify(self, data, signature=None, keyrings=None, homedir=None):
        '''
        `data` <string> the data to verify.
        `signature` <string> The signature, if detached from the data.
        `keyrings` <list of string> Additional keyrings to search in.
        `homedir` <string> Override the configured homedir.
        '''

        tmpdir = tempfile.mkdtemp()
        data_file, data_path = tempfile.mkstemp(dir=tmpdir)
        data_file = os.fdopen(data_file, 'w')
        data_file.write(data)
        data_file.close()
        if signature:
            sig_file, sig_path = tempfile.mkstemp(dir=tmpdir)
            sig_file = os.fdopen(sig_file, 'w')
            sig_file.write(signature)
            sig_file.close()
        else:
            sig_path = None
        try:
            return self.verify_from_file(
                data_path,
                sig_path=sig_path,
                keyrings=keyrings,
                homedir=homedir
            )
        finally:
            shutil.rmtree(tmpdir)
Пример #22
0
    def GET(self, uid, gid):
        web.header("Content-type","text/plain")
        uid, gid = map(int, (uid, gid))

        # Security
        if uid < 500 or gid < 500:
            yield "Invalid UID (%d) or GID (%d)\n" % (uid, gid)
            return

        try: pwd.getpwuid(uid)
        except KeyError:
            yield "UID (%d) does not exist\n" % (uid, gid)
            return

        for k in stop_program().GET(): yield k
        yield "Starting program with %d/%d\n" % (uid, gid)
        #p = subprocess.Popen(EXEC_SH, shell=True,
        #                     preexec_fn=lambda: change_user(uid,gid)) # This fails when running as daemon

        rpipe, wpipe = os.pipe() # Reference: http://ameblo.jp/oyasai10/entry-10615215673.html
        pid = os.fork()
        if pid == 0: # Child
            os.close(rpipe)
            wpipe = os.fdopen(wpipe, "w")
            change_user(uid,gid)
            p = subprocess.Popen(EXEC_SH, shell=True)
            wpipe.write("%d\n"%p.pid)
            sys.exit()
        else: # Parent
            os.close(wpipe)
            rpipe = os.fdopen(rpipe, "r")
            pid = int(rpipe.readline().strip())
            open(PID_FILE, "w").write("%d"%pid)
            os.wait() # Wait child
Пример #23
0
    def __init__(self, source_fd, sink_fd):
        """Create a new pipe object from the given file descriptors.

        ``source_fd`` is a file descriptor for the readable side of the pipe,
        ``sink_fd`` is a file descriptor for the writeable side."""
        self.source = os.fdopen(source_fd, 'rb', 0)
        self.sink = os.fdopen(sink_fd, 'wb', 0)
Пример #24
0
def override_io():
    old_out = sys.stdout
    old_err = sys.stderr

    fd_out = int(os.environ['TEST_WRITE_OUT'])
    fd_err = int(os.environ['TEST_WRITE_ERR'])
    if sys.platform == 'win32':
        import msvcrt
        fd_out = msvcrt.open_osfhandle(fd_out, 0)
        fd_err = msvcrt.open_osfhandle(fd_err, 0)
    api_out = os.fdopen(fd_out, 'w')
    api_err = os.fdopen(fd_err, 'w')

    class Intercept:
        def __init__(self, api, old):
            self.api = api
            self.old = old

        def write(self, data):
            import threading
            if threading.current_thread().name.startswith('APIThread'):
                self.api.write(data)
            else:
                self.old.write(data)

        def flush(self):
            self.api.flush()
            self.old.flush()

    sys.stdout = Intercept(api_out, old_out)
    sys.stderr = Intercept(api_err, old_err)
Пример #25
0
	def __runOnce(self, CMD, use_try=None):
		'''
		CMD: a R command string
		'''
		use_try = use_try or self._DEBUG_MODE
		newline = self.newline
		tail_token = 'R command at time: %s' % repr(time.time())
		#tail_token_r = re.sub(r'[\(\)\.]', r'\\\1', tail_token)
		tail_cmd = 'print("%s")%s' % (tail_token, newline)
		re_tail = re.compile(r'>\sprint\("%s"\)\r?\n\[1\]\s"%s"\r?\n$' % (tail_token.replace(' ', '\\s'), tail_token.replace(' ', '\\s')) )
		if len(CMD) <= self.max_len or not self.localhost: 
			fn = None
		else:
			fh, fn = tempfile.mkstemp()
			os.fdopen(fh, 'wb').write(_mybytes(CMD))
			if sys.platform == 'cli': os.close(fh) # this is necessary on IronPython 
			CMD = 'source("%s")' % fn.replace('\\', '/')
		CMD = (use_try and 'try({%s})%s%s' or '%s%s%s') % (CMD, newline, tail_cmd)
		sendAll(self.prog, CMD)
		rlt = ''
		while not re_tail.search(rlt): 
			try:
				rltonce = readLine(self.prog)
				if rltonce: rlt = rlt + rltonce
			except: break
		else: 
			rlt = re_tail.sub('', rlt)
			if rlt.startswith('> '): rlt = rlt[2:]
		if fn is not None:
			os.unlink(fn)
		return rlt
Пример #26
0
        def downloadStream(self):
            readable_fh, writable_fh = os.pipe()
            with os.fdopen(readable_fh, 'r') as readable:
                with os.fdopen(writable_fh, 'w') as writable:
                    def writer():
                        try:
                            if self.content is not None:
                                writable.write(self.content)
                            elif self.version:
                                headers = self._s3EncryptionHeaders()
                                key = self.outer.filesBucket.get_key(self.fileID, validate=False)
                                key.get_contents_to_file(writable,
                                                         headers=headers,
                                                         version_id=self.version)
                            else:
                                assert False
                        finally:
                            # This close() will send EOF to the reading end and ultimately cause
                            # the yield to return. It also makes the implict .close() done by the
                            #  enclosing "with" context redundant but that should be ok since
                            # .close() on file objects are idempotent.
                            writable.close()

                    thread = ExceptionalThread(target=writer)
                    thread.start()
                    yield readable
                    thread.join()
Пример #27
0
def write_output(data, options):
    if options.output == "-":
        output = sys.stdout
    elif options.reparse_validate_gir:
        main_f, main_f_name = tempfile.mkstemp(suffix='.gir')
        main_f = os.fdopen(main_f, 'w')
        main_f.write(data)
        main_f.close()

        temp_f, temp_f_name = tempfile.mkstemp(suffix='.gir')
        temp_f = os.fdopen(temp_f, 'w')
        passthrough_gir(main_f_name, temp_f)
        temp_f.close()
        if not utils.files_are_identical(main_f_name, temp_f_name):
            _error("Failed to re-parse gir file; scanned=%r passthrough=%r" % (
                main_f_name, temp_f_name))
        os.unlink(temp_f_name)
        try:
            shutil.move(main_f_name, options.output)
        except OSError as e:
            if e.errno == errno.EPERM:
                os.unlink(main_f_name)
                return 0
            raise
        return 0
    else:
        try:
            output = open(options.output, "w")
        except IOError as e:
            _error("opening output for writing: %s" % (e.strerror, ))

    try:
        output.write(data)
    except IOError as e:
        _error("while writing output: %s" % (e.strerror, ))
Пример #28
0
	def do_ipy(self, args):
		"""Start an interactive Python interpreter"""
		from c1218.data import C1218Packet
		from c1219.access.general import C1219GeneralAccess
		from c1219.access.security import C1219SecurityAccess
		from c1219.access.log import C1219LogAccess
		from c1219.access.telephone import C1219TelephoneAccess
		vars = {
			'__version__': __version__,
			'frmwk': self.frmwk,
			'C1218Packet': C1218Packet,
			'C1219GeneralAccess': C1219GeneralAccess,
			'C1219SecurityAccess': C1219SecurityAccess,
			'C1219LogAccess': C1219LogAccess,
			'C1219TelephoneAccess': C1219TelephoneAccess
		}
		banner = 'The Framework Instance Is In The Variable \'frmwk\'' + os.linesep
		if self.frmwk.is_serial_connected():
			vars['conn'] = self.frmwk.serial_connection
			banner = banner + 'The Connection Instance Is In The Variable \'conn\'' + os.linesep
		pyconsole = code.InteractiveConsole(vars)

		savestdin = os.dup(sys.stdin.fileno())
		savestdout = os.dup(sys.stdout.fileno())
		savestderr = os.dup(sys.stderr.fileno())
		try:
			pyconsole.interact(banner)
		except SystemExit:
			sys.stdin = os.fdopen(savestdin, 'r', 0)
			sys.stdout = os.fdopen(savestdout, 'w', 0)
			sys.stderr = os.fdopen(savestderr, 'w', 0)
Пример #29
0
def update_file(path, updater, merger = lambda f: True):
    """update a file in a transaction-like manner"""

    fr = fw = None
    try:
        fd = os.open(path, os.O_CREAT|os.O_RDWR)
        try:
            fr = os.fdopen(fd, 'r+b')
        except:
            os.close(fd)
            raise
        fcntl.lockf(fr, fcntl.LOCK_EX)
        if not merger(fr):
            return

        tmpp = path + '.tmp.' + str(os.getpid())
        fd = os.open(tmpp, os.O_CREAT|os.O_EXCL|os.O_WRONLY)
        try:
            fw = os.fdopen(fd, 'wb', 0)
        except:
            os.close(fd)
            raise
        updater(fw)
        os.fsync(fd)
        os.rename(tmpp, path)
    finally:
        for fx in (fr, fw):
            if fx:
                fx.close()
Пример #30
0
        def __t_pty_tracker(self, trackerclass, **kwargs):
                def __drain(masterf):
                        while True:
                                termdata = masterf.read(1024)
                                if len(termdata) == 0:
                                        break

                #
                # - Allocate a pty
                # - Create a thread to drain off the master side; without
                #   this, the slave side will block when trying to write.
                # - Connect the prog tracker to the slave side
                # - Set it running
                #
                (master, slave) = pty.openpty()
                slavef = os.fdopen(slave, "w")
                masterf = os.fdopen(master, "r")

                t = threading.Thread(target=__drain, args=(masterf,))
                t.start()

                p = trackerclass(output_file=slavef, **kwargs)
                progress.test_progress_tracker(p, gofast=True)
                slavef.close()

                t.join()
                masterf.close()
Пример #31
0
#
# Enables flux in influxdb, for use with influxdb-client
# Currently influxdb 2.x doesn't support 32-bit armhf (Pi 1/2/Zero) so not upgrading past 1.8 (for now)
#
import subprocess
from os import fdopen, remove
from shutil import move, copymode
from tempfile import mkstemp

influx_file = "/etc/influxdb/influxdb.conf"
changed = False

fd, abs_path = mkstemp()
with fdopen(fd, 'w') as new_file:
    with open(influx_file, 'r') as old_file:
        for line in old_file:
            if 'flux-enabled' in line and line != 'flux-enabled = true\n':
                changed = True
                new_file.write('flux-enabled = true')
            else:
                new_file.write(line)

copymode(influx_file, abs_path)
remove(influx_file)
move(abs_path, influx_file)

if changed:
    print("Enabling Flux and restarting influxdb.")
    subprocess.Popen('service influxdb restart', shell=True)
else:
    print("Flux is already enabled. No changes necessary.")
Пример #32
0
 def __enter__(self):
     self.readable_fh, writable_fh = os.pipe()
     self.writable = os.fdopen(writable_fh, 'wb')
     self.thread = ExceptionalThread(target=self._reader)
     self.thread.start()
     return self.writable
Пример #33
0
            log.debug('Changing ownership of %s with: %s' %
                      (path, ' '.join(cmd)))
            p = subprocess.Popen(cmd,
                                 shell=False,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            stdout, stderr = p.communicate()
            assert p.returncode == 0, stderr
        except Exception, e:
            log.warning('Changing ownership of uploaded file %s failed: %s' %
                        (path, str(e)))

    # TODO: json_file should go in the working directory
    json_file = tempfile.mkstemp()
    json_file_path = json_file[1]
    json_file = os.fdopen(json_file[0], 'w')
    for uploaded_dataset in uploaded_datasets:
        data = uploaded_dataset.data
        if uploaded_dataset.type == 'composite':
            # we need to init metadata before the job is dispatched
            data.init_meta()
            for meta_name, meta_value in uploaded_dataset.metadata.iteritems():
                setattr(data.metadata, meta_name, meta_value)
            trans.sa_session.add(data)
            trans.sa_session.flush()
            json = dict(file_type=uploaded_dataset.file_type,
                        dataset_id=data.dataset.id,
                        dbkey=uploaded_dataset.dbkey,
                        type=uploaded_dataset.type,
                        metadata=uploaded_dataset.metadata,
                        primary_file=uploaded_dataset.primary_file,
Пример #34
0
def main(args):
    parser = OptionParser()
    parser.add_option('--tmpmdpath', default=None, 
                help="path where the outputs should be dumped for this worker")
    parser.add_option('--pkglist', default=None, 
                help="file to read the pkglist from in lieu of all of them on the cli")
    parser.add_option("--pkgoptions", default=[], action='append',
                help="pkgoptions in the format of key=value")
    parser.add_option("--quiet", default=False, action='store_true',
                help="only output errors and a total")
    parser.add_option("--verbose", default=False, action='store_true',
                help="output errors and a total")
    parser.add_option("--globalopts", default=[], action='append',
                help="general options in the format of key=value")

    
    opts, pkgs = parser.parse_args(args)
    external_data = {'_packagenumber': 1}
    globalopts = {}
    
    for strs in opts.pkgoptions:
        k,v = strs.split('=', 1)
        if v in ['True', 'true', 'yes', '1', 1]:
            v = True
        elif v in ['False', 'false', 'no', '0', 0]:
            v = False
        elif v in ['None', 'none', '']:
            v = None
        external_data[k] = v

    for strs in opts.globalopts:
        k,v = strs.split('=', 1)
        if v in ['True', 'true', 'yes', '1', 1]:
            v = True
        elif v in ['False', 'false', 'no', '0', 0]:
            v = False
        elif v in ['None', 'none', '']:
            v = None
        globalopts[k] = v

    # turn off buffering on stdout
    sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
    
    reldir = external_data['_reldir']
    ts = rpmUtils.transaction.initReadOnlyTransaction()
    if opts.tmpmdpath:
        files = [open(opts.tmpmdpath + '/%s.xml' % i, 'w')
                 for i in ('primary', 'filelists', 'other')]
        def output(*xml):
            for fh, buf in zip(files, xml):
                fh.write(buf)
    else:
        def output(*xml):
            buf = ' '.join(str(len(i)) for i in xml)
            sys.stdout.write('*** %s\n' % buf)
            for buf in xml:
                sys.stdout.write(buf)

    if opts.pkglist:
        for line in open(opts.pkglist,'r').readlines():
            line = line.strip()
            if re.match('^\s*\#.*', line) or re.match('^\s*$', line):
                continue
            pkgs.append(line)

    clog_limit=globalopts.get('clog_limit', None)
    if clog_limit is not None:
         clog_limit = int(clog_limit)
    for pkgfile in pkgs:
        pkgpath = reldir + '/' + pkgfile
        if not os.path.exists(pkgpath):
            print >> sys.stderr, "File not found: %s" % pkgpath
            output()
            continue

        try:
            if not opts.quiet and opts.verbose:
                print "reading %s" % (pkgfile)

            pkg = createrepo.yumbased.CreateRepoPackage(ts, package=pkgpath, 
                                sumtype=globalopts.get('sumtype', None), 
                                external_data=external_data)
            output(pkg.xml_dump_primary_metadata(),
                   pkg.xml_dump_filelists_metadata(),
                   pkg.xml_dump_other_metadata(clog_limit=clog_limit))
        except yum.Errors.YumBaseError, e:
            print >> sys.stderr, "Error: %s" % e
            output()
            continue
        else:
            external_data['_packagenumber']+=1
Пример #35
0
 def _cache(self):
     if self._access_token_cache_file is not None:
         with os.fdopen(os.open(self._access_token_cache_file,
                                os.O_WRONLY | os.O_CREAT, 0o600),
                        'w') as f:
             json.dump(self._res, f)
Пример #36
0
def get_tempfilename(suffix='.tmp'):
        fh, fname = tempfile.mkstemp(suffix=suffix)
        outsock = os.fdopen(fh,'w')
        outsock.close()
        return fname
Пример #37
0
    def install(self, pkgplan, orig):
        """Client-side method that installs a file."""

        mode = None
        try:
            mode = int(self.attrs.get("mode", None), 8)
        except (TypeError, ValueError):
            # Mode isn't valid, so let validate raise a more
            # informative error.
            self.validate(fmri=pkgplan.destination_fmri)

        owner, group = self.get_fsobj_uid_gid(pkgplan,
                                              pkgplan.destination_fmri)

        final_path = self.get_installed_path(pkgplan.image.get_root())

        # Don't allow installation through symlinks.
        self.fsobj_checkpath(pkgplan, final_path)

        if not os.path.exists(os.path.dirname(final_path)):
            self.makedirs(os.path.dirname(final_path),
                          mode=misc.PKG_DIR_MODE,
                          fmri=pkgplan.destination_fmri)
        elif (not orig and not pkgplan.origin_fmri and "preserve" in self.attrs
              and self.attrs["preserve"] not in ("abandon", "install-only")
              and os.path.isfile(final_path)):
            # Unpackaged editable file is already present during
            # initial install; salvage it before continuing.
            pkgplan.salvage(final_path)

        # XXX If we're upgrading, do we need to preserve file perms from
        # existing file?

        # check if we have a save_file active; if so, simulate file
        # being already present rather than installed from scratch

        if "save_file" in self.attrs:
            orig = self.restore_file(pkgplan.image)

        # See if we need to preserve the file, and if so, set that up.
        #
        # XXX What happens when we transition from preserve to
        # non-preserve or vice versa? Do we want to treat a preserve
        # attribute as turning the action into a critical action?
        #
        # XXX We should save the originally installed file.  It can be
        # used as an ancestor for a three-way merge, for example.  Where
        # should it be stored?
        pres_type = self._check_preserve(orig, pkgplan)
        do_content = True
        old_path = None
        if pres_type == True or (pres_type and pkgplan.origin_fmri
                                 == pkgplan.destination_fmri):
            # File is marked to be preserved and exists so don't
            # reinstall content.
            do_content = False
        elif pres_type == "legacy":
            # Only rename old file if this is a transition to
            # preserve=legacy from something else.
            if orig.attrs.get("preserve", None) != "legacy":
                old_path = final_path + ".legacy"
        elif pres_type == "renameold.update":
            old_path = final_path + ".update"
        elif pres_type == "renameold":
            old_path = final_path + ".old"
        elif pres_type == "renamenew":
            final_path = final_path + ".new"
        elif pres_type == "abandon":
            return

        # If it is a directory (and not empty) then we should
        # salvage the contents.
        if os.path.exists(final_path) and \
            not os.path.islink(final_path) and \
            os.path.isdir(final_path):
            try:
                os.rmdir(final_path)
            except OSError as e:
                if e.errno == errno.ENOENT:
                    pass
                elif e.errno in (errno.EEXIST, errno.ENOTEMPTY):
                    pkgplan.salvage(final_path)
                elif e.errno != errno.EACCES:
                    # this happens on Windows
                    raise

        # XXX This needs to be modularized.
        if do_content and self.needsdata(orig, pkgplan):
            tfilefd, temp = tempfile.mkstemp(dir=os.path.dirname(final_path))
            if not self.data:
                # The state of the filesystem changed after the
                # plan was prepared; attempt a one-off
                # retrieval of the data.
                self.data = self.__set_data(pkgplan)
            stream = self.data()
            tfile = os.fdopen(tfilefd, "wb")
            try:
                # Always verify using the most preferred hash
                hash_attr, hash_val, hash_func  = \
                    digest.get_preferred_hash(self)
                shasum = misc.gunzip_from_stream(stream, tfile, hash_func)
            except zlib.error as e:
                raise ActionExecutionError(
                    self,
                    details=_("Error decompressing payload: "
                              "{0}").format(" ".join([str(a)
                                                      for a in e.args])),
                    error=e)
            finally:
                tfile.close()
                stream.close()

            if shasum != hash_val:
                raise ActionExecutionError(
                    self,
                    details=_("Action data hash verification "
                              "failure: expected: {expected} computed: "
                              "{actual} action: {action}").format(
                                  expected=hash_val,
                                  actual=shasum,
                                  action=self))

        else:
            temp = final_path

        try:
            os.chmod(temp, mode)
        except OSError as e:
            # If the file didn't exist, assume that's intentional,
            # and drive on.
            if e.errno != errno.ENOENT:
                raise
            else:
                return

        try:
            portable.chown(temp, owner, group)
        except OSError as e:
            if e.errno != errno.EPERM:
                raise

        # XXX There's a window where final_path doesn't exist, but we
        # probably don't care.
        if do_content and old_path:
            try:
                portable.rename(final_path, old_path)
            except OSError as e:
                if e.errno != errno.ENOENT:
                    # Only care if file isn't gone already.
                    raise

        # This is safe even if temp == final_path.
        try:
            portable.rename(temp, final_path)
        except OSError as e:
            raise api_errors.FileInUseException(final_path)

        # Handle timestamp if specified (and content was installed).
        if do_content and "timestamp" in self.attrs:
            t = misc.timestamp_to_time(self.attrs["timestamp"])
            try:
                os.utime(final_path, (t, t))
            except OSError as e:
                if e.errno != errno.EACCES:
                    raise

                # On Windows, the time cannot be changed on a
                # read-only file
                os.chmod(final_path, stat.S_IRUSR | stat.S_IWUSR)
                os.utime(final_path, (t, t))
                os.chmod(final_path, mode)

        # Handle system attributes.
        sattr = self.attrs.get("sysattr")
        if sattr:
            if isinstance(sattr, list):
                sattr = ",".join(sattr)
            sattrs = sattr.split(",")
            if len(sattrs) == 1 and \
                sattrs[0] not in portable.get_sysattr_dict():
                # not a verbose attr, try as a compact attr seq
                arg = sattrs[0]
            else:
                arg = sattrs

            try:
                portable.fsetattr(final_path, arg)
            except OSError as e:
                if e.errno != errno.EINVAL:
                    raise
                warn = _("System attributes are not supported "
                         "on the target image filesystem; 'sysattr'"
                         " ignored for {0}").format(self.attrs["path"])
                pkgplan.image.imageplan.pd.add_item_message(
                    pkgplan.destination_fmri,
                    misc.time_to_timestamp(time.time()), MSG_WARNING, warn)
            except ValueError as e:
                warn = _("Could not set system attributes for {path}"
                         "'{attrlist}': {err}").format(attrlist=sattr,
                                                       err=e,
                                                       path=self.attrs["path"])
                pkgplan.image.imageplan.pd.add_item_message(
                    pkgplan.destination_fmri,
                    misc.time_to_timestamp(time.time()), MSG_WARNING, warn)
Пример #38
0
def bitbake_main(configParams, configuration):

    # Python multiprocessing requires /dev/shm on Linux
    if sys.platform.startswith('linux') and not os.access('/dev/shm', os.W_OK | os.X_OK):
        raise BBMainException("FATAL: /dev/shm does not exist or is not writable")

    # Unbuffer stdout to avoid log truncation in the event
    # of an unorderly exit as well as to provide timely
    # updates to log files for use with tail
    try:
        if sys.stdout.name == '<stdout>':
            sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
    except:
        pass


    configuration.setConfigParameters(configParams)

    ui_module = import_extension_module(bb.ui, configParams.ui)
    servermodule = import_extension_module(bb.server, configParams.servertype)

    if configParams.server_only:
        if configParams.servertype != "xmlrpc":
            raise BBMainException("FATAL: If '--server-only' is defined, we must set the "
                                  "servertype as 'xmlrpc'.\n")
        if not configParams.bind:
            raise BBMainException("FATAL: The '--server-only' option requires a name/address "
                                  "to bind to with the -B option.\n")
        if configParams.remote_server:
            raise BBMainException("FATAL: The '--server-only' option conflicts with %s.\n" %
                                  ("the BBSERVER environment variable" if "BBSERVER" in os.environ \
                                   else "the '--remote-server' option" ))

    if configParams.bind and configParams.servertype != "xmlrpc":
        raise BBMainException("FATAL: If '-B' or '--bind' is defined, we must "
                              "set the servertype as 'xmlrpc'.\n")

    if configParams.remote_server and configParams.servertype != "xmlrpc":
        raise BBMainException("FATAL: If '--remote-server' is defined, we must "
                              "set the servertype as 'xmlrpc'.\n")

    if configParams.observe_only and (not configParams.remote_server or configParams.bind):
        raise BBMainException("FATAL: '--observe-only' can only be used by UI clients "
                              "connecting to a server.\n")

    if configParams.kill_server and not configParams.remote_server:
        raise BBMainException("FATAL: '--kill-server' can only be used to terminate a remote server")

    if "BBDEBUG" in os.environ:
        level = int(os.environ["BBDEBUG"])
        if level > configuration.debug:
            configuration.debug = level

    bb.msg.init_msgconfig(configParams.verbose, configuration.debug,
                         configuration.debug_domains)

    # Ensure logging messages get sent to the UI as events
    handler = bb.event.LogHandler()
    if not configParams.status_only:
        # In status only mode there are no logs and no UI
        logger.addHandler(handler)

    # Clear away any spurious environment variables while we stoke up the cooker
    cleanedvars = bb.utils.clean_environment()

    featureset = []
    if not configParams.server_only:
        # Collect the feature set for the UI
        featureset = getattr(ui_module, "featureSet", [])

    if not configParams.remote_server:
        # we start a server with a given configuration
        server = start_server(servermodule, configParams, configuration, featureset)
        bb.event.ui_queue = []
    else:
        # we start a stub server that is actually a XMLRPClient that connects to a real server
        server = servermodule.BitBakeXMLRPCClient(configParams.observe_only, configParams.xmlrpctoken)
        server.saveConnectionDetails(configParams.remote_server)


    if not configParams.server_only:
        try:
            server_connection = server.establishConnection(featureset)
        except Exception as e:
            bb.fatal("Could not connect to server %s: %s" % (configParams.remote_server, str(e)))

        # Restore the environment in case the UI needs it
        for k in cleanedvars:
            os.environ[k] = cleanedvars[k]

        logger.removeHandler(handler)


        if configParams.status_only:
            server_connection.terminate()
            return 0

        if configParams.kill_server:
            server_connection.connection.terminateServer()
            bb.event.ui_queue = []
            return 0

        try:
            return ui_module.main(server_connection.connection, server_connection.events, configParams)
        finally:
            bb.event.ui_queue = []
            server_connection.terminate()
    else:
        print("Bitbake server address: %s, server port: %s" % (server.serverImpl.host, server.serverImpl.port))
        return 0

    return 1
Пример #39
0
                    <min x="''' + str(-(SIZE_X / 2)) + '''" y="0" z="1"/>
                    <max x="''' + str(SIZE_X / 2) + '''" y="''' + str(
            SIZE_Y - 1) + '''" z="''' + str(SIZE_Z) + '''"/>
                </Grid>
            </ObservationFromGrid>
            <VideoProducer viewpoint="1">
                <Width>860</Width>
                <Height>480</Height>
            </VideoProducer>
        </AgentHandlers>
    </AgentSection>

  </Mission>''', gridJson


sys.stdout = os.fdopen(sys.stdout.fileno(), 'w',
                       0)  # flush print output immediately

agent_host = MalmoPython.AgentHost()
try:
    agent_host.parse(sys.argv)
except RuntimeError as e:
    print 'ERROR:', e
    print agent_host.getUsage()
    exit(1)
if agent_host.receivedArgument("help"):
    print agent_host.getUsage()
    exit(0)

num_iterations = 30000
if agent_host.receivedArgument("test"):
    num_iterations = 10  # Haven't got all day
Пример #40
0
def main(argv=sys.argv):
    log = logging.getLogger('heat-config')
    handler = logging.StreamHandler(sys.stderr)
    handler.setFormatter(
        logging.Formatter(
            '[%(asctime)s] (%(name)s) [%(levelname)s] %(message)s'))
    log.addHandler(handler)
    log.setLevel('DEBUG')

    prepare_dir(OUTPUTS_DIR)
    prepare_dir(WORKING_DIR)
    os.chdir(WORKING_DIR)

    c = json.load(sys.stdin)

    use_hiera = c['options'].get('enable_hiera', False)
    use_facter = c['options'].get('enable_facter', True)
    modulepath = c['options'].get('modulepath')
    tags = c['options'].get('tags')

    facts = {}
    hiera = {}

    fqdn = get_hostname_f(log)
    if fqdn:
        facts['FACTER_fqdn'] = fqdn

    for input in c['inputs']:
        input_name = input['name']
        input_value = input.get('value', '')
        if use_facter:
            fact_name = 'FACTER_%s' % input_name
            facts[fact_name] = input_value
        if use_hiera:
            hiera[input_name] = input_value

    if use_hiera:
        prepare_dir(HIERA_DATADIR)
        hiera_data = os.path.join(HIERA_DATADIR,
                                  'heat_config_%s.json' % c['name'])
        with os.fdopen(os.open(hiera_data,
                               os.O_CREAT | os.O_TRUNC | os.O_WRONLY, 0o600),
                       'w') as hiera_file:
            hiera_file.write(json.dumps(hiera).encode('utf8'))
        facts['FACTER_deploy_config_name'] = c['name']

    fn = os.path.join(WORKING_DIR, '%s.pp' % c['id'])
    heat_outputs_path = os.path.join(OUTPUTS_DIR, c['id'])
    facts['FACTER_heat_outputs_path'] = heat_outputs_path

    env_debug = ' '.join('%s="%s" ' % (k, v) for k, v in facts.items())

    env = os.environ.copy()
    env.update(facts)

    with os.fdopen(os.open(fn, os.O_CREAT | os.O_TRUNC | os.O_WRONLY, 0o700),
                   'w') as f:
        f.write(c.get('config', '').encode('utf-8'))
    cmd = [PUPPET_CMD, 'apply', '--detailed-exitcodes', fn]
    if modulepath:
        cmd.insert(-1, '--modulepath')
        cmd.insert(-1, modulepath)
    if tags:
        cmd.insert(-1, '--tags')
        cmd.insert(-1, tags)
    log.debug('Running %s %s' % (env_debug, ' '.join(cmd)))
    try:
        subproc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE, env=env)
    except OSError:
        log.warn('puppet not installed yet')
        return
    stdout, stderr = subproc.communicate()

    log.info('Return code %s' % subproc.returncode)
    if stdout:
        log.info(stdout)
    if stderr:
        log.info(stderr)

    # returncode of 2 means there were successfull changes
    if subproc.returncode in (0, 2):
        returncode = 0
        log.info('Completed %s' % fn)
    else:
        returncode = subproc.returncode
        log.error("Error running %s. [%s]\n" % (fn, subproc.returncode))

    response = {}

    for output in c.get('outputs') or []:
        output_name = output['name']
        try:
            with open('%s.%s' % (heat_outputs_path, output_name)) as out:
                response[output_name] = out.read()
        except IOError:
            pass

    response.update({
        'deploy_stdout': stdout,
        'deploy_stderr': stderr,
        'deploy_status_code': returncode,
    })

    json.dump(response, sys.stdout)
Пример #41
0
def process_msg(message):
    if message["act"] == "get_topics":
        response = artm_bridge.model.topics
    elif message["act"] == "get_documents":
        topic_id = message["topic_id"]
        offset = message["offset"]
        limit = message["limit"]
        if type(topic_id) is not str:
            raise BridgeParamError("incorrect param type: `topic_id`")
        if type(offset) is not int or type(limit) is not int:
            raise BridgeParamError(
                "`limit` and `offset` fields must be integer")
        docs, weights = artm_bridge.get_documents_by_topic(topic_id,
                                                           offset=offset,
                                                           limit=limit)
        response = {"docs": docs, "weights": weights}
    elif message["act"] == "get_document":
        doc_id = message["doc_id"]
        if type(doc_id) is not str:
            raise BridgeParamError("incorrect param type: `doc_id`")
        docs = artm_bridge.data_source.get_documents_by_ids(
            [doc_id], with_modalities=True)
        if len(docs) == 0:
            raise BridgeParamError(
                "document with `doc_id` = '%s' is not found" % doc_id)
        doc = docs[0]
        if message["recommend_tags"]:
            doc["recommended_tags"] = artm_bridge.recommend_tags_by_doc(doc)
        response = doc
    elif message["act"] == "perform_search":
        query = message["query"]
        limit = message["limit"]
        if type(query) is not str:
            raise BridgeParamError("incorrect param type: `query`")
        if type(limit) is not int:
            raise BridgeParamError("incorrect param type: `limit`")
        response = dict(
            zip(["docs", "theta"],
                artm_bridge.search_documents(query, limit=limit)))
    elif message["act"] == "recommend_docs":
        doc_id = message["doc_id"]
        if type(doc_id) is not str:
            raise BridgeParamError("incorrect param type: `doc_id`")
        sim_docs_ids = artm_bridge.recommend_docs_by_doc(doc_id)
        response = artm_bridge.data_source.get_documents_by_ids(
            sim_docs_ids, with_texts=False)
    elif message["act"] == "transform_doc":
        doc_path = message["doc_path"]
        filename = message["filename"]
        try:
            # Initialize file resources
            doc_file = open(doc_path)
            vw_fd, vw_path = tempfile.mkstemp(prefix="upload", text=True)
            vw_file = os.fdopen(vw_fd, "w")
            batch_path = tempfile.mkdtemp(prefix="batch")
            # Parse uploaded file
            doc = pipeline.fit_transform(doc_file)
            # Save to Vowpal Wabbit file
            text_utils.VowpalWabbitSink(vw_file, lambda x: "upload") \
                      .fit_transform([doc])
            # Transform uploaded document and return its Theta matrix
            response = {}
            response["filename"] = filename
            response["theta"] = artm_bridge.model.transform_one(
                vw_path, batch_path)
        except:
            raise
        finally:
            # Delete uploaded file
            doc_file.close()
            os.remove(doc_path)
            # Delete temporary files/dirs
            os.remove(vw_path)
            rm_flat_dir(batch_path)
    elif False and message["act"] == "get_next_assessment":
        ass_id = message["assessor_id"]
        ass_cnt = message["assessors_cnt"]
        col_name = message["collection_name"]

        if ass_id >= ass_cnt:
            response = "Incorrent `assessor_id`"
        else:
            docs_count = db["datasets"][col_name].count()
            min_id = int(ass_id * docs_count / ass_cnt)
            max_id = int((ass_id + 1) * docs_count / ass_cnt)
            # May take a long time for large datasets
            docs_ids = db["datasets"][col_name].find({}, {"_id": 1})
            docs_ids = list(
                map(lambda x: x["_id"], docs_ids.sort([("_id", 1)])))
            ass_docs_ids = docs_ids[min_id:max_id]
            # Get unused documents' ids
            used_docs_ids = db["assessment"][col_name].find({}, {"_id": 1})
            used_docs_ids = list(map(lambda x: x["_id"], used_docs_ids))
            unused_docs_ids = list(set(ass_docs_ids) - set(used_docs_ids))
            # Form response
            random.shuffle(unused_docs_ids)
            # Use batches of 100 docs per request
            response = unused_docs_ids[:100]
    elif False and message["act"] == "assess_document":
        doc_id = message["doc_id"]
        is_relevant = message["is_relevant"]
        col_names = [
            v for k, v in prefix_to_col_map.items()
            if doc_id.startswith(k + "_")
        ]
        if len(col_names) != 1:
            response = False
        else:
            col_name = col_names[0]
            dataset = db["assessment"][col_name]
            doc = {"is_relevant": is_relevant, "assess_time": datetime.now()}
            dataset.replace_one({"_id": doc_id}, doc, upsert=True)
            response = True
    else:
        raise BridgeParamError("unknown query")

    return response
Пример #42
0
        checksum = http_download_and_hash(rustup_url(platform, version))
        if validate and checksum != rustup_hash(platform):
            print('mismatch:\n  script: %s\n  server: %s' % (
                RUSTUP_HASHES[platform], checksum))
        else:
            print('OK')
        hashes.append((platform, checksum))
    return hashes


if __name__ == '__main__':
    '''Allow invoking the module as a utility to update checksums.'''

    # Unbuffer stdout so our two-part 'Checking...' messages print correctly
    # even if there's network delay.
    sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)

    # Hook the requests module from the greater source tree. We can't import
    # this at the module level since we might be imported into the bootstrap
    # script in standalone mode.
    #
    # This module is necessary for correct https certificate verification.
    mod_path = os.path.dirname(__file__)
    sys.path.insert(0, os.path.join(mod_path, '..', '..', 'requests'))

    update = False
    if len(sys.argv) > 1:
        if sys.argv[1] == '--update':
            update = True
        else:
            print(USAGE)
Пример #43
0
    def upload_haproxy_config(self, amphora_id, listener_id):
        """Upload the haproxy config

        :param amphora_id: The id of the amphora to update
        :param listener_id: The id of the listener
        """
        stream = Wrapped(flask.request.stream)
        # We have to hash here because HAProxy has a string length limitation
        # in the configuration file "peer <peername>" lines
        peer_name = octavia_utils.base64_sha1_string(amphora_id).rstrip('=')
        if not os.path.exists(util.haproxy_dir(listener_id)):
            os.makedirs(util.haproxy_dir(listener_id))

        name = os.path.join(util.haproxy_dir(listener_id), 'haproxy.cfg.new')
        flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
        # mode 00600
        mode = stat.S_IRUSR | stat.S_IWUSR
        b = stream.read(BUFFER)
        s_io = io.StringIO()
        while b:
            # Write haproxy configuration to StringIO
            s_io.write(b.decode('utf8'))
            b = stream.read(BUFFER)

        # Since haproxy user_group is now auto-detected by the amphora agent,
        # remove it from haproxy configuration in case it was provided
        # by an older Octavia controller. This is needed in order to prevent
        # a duplicate entry for 'group' in haproxy configuration, which will
        # result an error when haproxy starts.
        new_config = re.sub(r"\s+group\s.+", "", s_io.getvalue())

        # Handle any haproxy version compatibility issues
        new_config = haproxy_compatibility.process_cfg_for_version_compat(
            new_config)

        with os.fdopen(os.open(name, flags, mode), 'w') as file:
            file.write(new_config)

        # use haproxy to check the config
        cmd = "haproxy -c -L {peer} -f {config_file} -f {haproxy_ug}".format(
            config_file=name,
            peer=peer_name,
            haproxy_ug=consts.HAPROXY_USER_GROUP_CFG)

        try:
            subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            LOG.error("Failed to verify haproxy file: %s %s", e, e.output)
            # Save the last config that failed validation for debugging
            os.rename(name, ''.join([name, '-failed']))
            return webob.Response(json=dict(message="Invalid request",
                                            details=e.output),
                                  status=400)

        # file ok - move it
        os.rename(name, util.config_path(listener_id))

        try:

            init_system = util.get_os_init_system()

            LOG.debug('Found init system: %s', init_system)

            init_path = util.init_path(listener_id, init_system)

            if init_system == consts.INIT_SYSTEMD:
                template = SYSTEMD_TEMPLATE
                # Render and install the network namespace systemd service
                util.install_netns_systemd_service()
                util.run_systemctl_command(
                    consts.ENABLE, consts.AMP_NETNS_SVC_PREFIX + '.service')
            elif init_system == consts.INIT_UPSTART:
                template = UPSTART_TEMPLATE
            elif init_system == consts.INIT_SYSVINIT:
                template = SYSVINIT_TEMPLATE
                init_enable_cmd = "insserv {file}".format(file=init_path)
            else:
                raise util.UnknownInitError()

        except util.UnknownInitError:
            LOG.error("Unknown init system found.")
            return webob.Response(json=dict(
                message="Unknown init system in amphora",
                details="The amphora image is running an unknown init "
                "system.  We can't create the init configuration "
                "file for the load balancing process."),
                                  status=500)

        if init_system == consts.INIT_SYSTEMD:
            # mode 00644
            mode = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
        else:
            # mode 00755
            mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH
                    | stat.S_IXOTH)

        hap_major, hap_minor = haproxy_compatibility.get_haproxy_versions()
        if not os.path.exists(init_path):
            with os.fdopen(os.open(init_path, flags, mode), 'w') as text_file:

                text = template.render(
                    peer_name=peer_name,
                    haproxy_pid=util.pid_path(listener_id),
                    haproxy_cmd=util.CONF.haproxy_amphora.haproxy_cmd,
                    haproxy_cfg=util.config_path(listener_id),
                    haproxy_user_group_cfg=consts.HAPROXY_USER_GROUP_CFG,
                    respawn_count=util.CONF.haproxy_amphora.respawn_count,
                    respawn_interval=(
                        util.CONF.haproxy_amphora.respawn_interval),
                    amphora_netns=consts.AMP_NETNS_SVC_PREFIX,
                    amphora_nsname=consts.AMPHORA_NAMESPACE,
                    HasIFUPAll=self._osutils.has_ifup_all(),
                    haproxy_major_version=hap_major,
                    haproxy_minor_version=hap_minor)
                text_file.write(text)

        # Make sure the new service is enabled on boot
        if init_system == consts.INIT_SYSTEMD:
            util.run_systemctl_command(
                consts.ENABLE, "haproxy-{list}".format(list=listener_id))
        elif init_system == consts.INIT_SYSVINIT:
            try:
                subprocess.check_output(init_enable_cmd.split(),
                                        stderr=subprocess.STDOUT)
            except subprocess.CalledProcessError as e:
                LOG.error(
                    "Failed to enable haproxy-%(list)s service: "
                    "%(err)s %(out)s", {
                        'list': listener_id,
                        'err': e,
                        'out': e.output
                    })
                return webob.Response(json=dict(
                    message="Error enabling haproxy-{0} service".format(
                        listener_id),
                    details=e.output),
                                      status=500)

        res = webob.Response(json={'message': 'OK'}, status=202)
        res.headers['ETag'] = stream.get_md5()

        return res
Пример #44
0
def domain():
    global statefile
    global state
    global outdir
    global tmpfile
    global debugging

    try:
        opts, args = getopt.getopt(sys.argv[1:], 'dho:s:')
    except getopt.GetoptError:
        usage()
    for o, a in opts:
        if o == '-h':
            usage()
        elif o == '-o':
            outdir = a
        elif o == '-d':
            debugging = True
        elif o == '-s':
            statefile = a
    if len(args) < 3:
        usage()
    targetfile = args[0]
    recip = args[1]
    groupname = args[2]

    outdir_setup()

    state = load_scanstate()

    tmpout = tempfile.mkstemp()
    tmpfile = os.fdopen(tmpout[0], 'w')
    state.register_outfile(tmpfile)

    hn = os.uname()[1]
    tmpfile.write('Subject: diffscan2 %s %s\n' % (groupname, hn))
    tmpfile.write('From: diffscan2 <noreply@%s>\n' % hn)
    tmpfile.write('To: %s\n' % recip)
    tmpfile.write('\n')

    tmpfile.write('diffscan2 results output\n\n')

    run_nmap(targetfile)
    state.calculate()
    tmpfile.write('New Open Service List\n')
    tmpfile.write('---------------------\n')
    state.print_open_alerts()
    tmpfile.write('\n')
    tmpfile.write('New Closed Service List\n')
    tmpfile.write('---------------------\n')
    state.print_closed_alerts()

    tmpfile.write('\n')
    tmpfile.write('OPREV: number of times service was open in previous ' \
        'scans\n')
    tmpfile.write('CPREV: number of times service was closed in ' \
        'previous scans\n')
    tmpfile.write('maximum previous scans stored: %d\n' % state.KEEP_SCANS)
    tmpfile.write('current total services: %d\n' % \
        state.last_scan_total_services())
    tmpfile.write('previous total services: %d\n' % \
        state.previous_scan_total_services())
    tmpfile.write('up trend: %s\n' % state.up_trend())
    tmpfile.write('down trend: %s\n' % state.down_trend())

    state.clear_outfile()
    write_scanstate()

    tmpfile.close()

    f = open(tmpout[1], 'r')
    buf = f.read()
    f.close()
    if debugging:
        sys.stdout.write(buf)
    sp = subprocess.Popen(['sendmail', '-t'], stdin=subprocess.PIPE)
    sp.communicate(buf)
    os.remove(tmpout[1])
Пример #45
0
    def _save(self, name, content):
        full_path = self.path(name)

        # Create any intermediate directories that do not exist.
        # Note that there is a race between os.path.exists and os.makedirs:
        # if os.makedirs fails with EEXIST, the directory was created
        # concurrently, and we can continue normally. Refs #16082.
        directory = os.path.dirname(full_path)
        if not os.path.exists(directory):
            try:
                if self.directory_permissions_mode is not None:
                    # os.makedirs applies the global umask, so we reset it,
                    # for consistency with file_permissions_mode behavior.
                    old_umask = os.umask(0)
                    try:
                        os.makedirs(directory, self.directory_permissions_mode)
                    finally:
                        os.umask(old_umask)
                else:
                    os.makedirs(directory)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    raise
        if not os.path.isdir(directory):
            raise IOError("%s exists and is not a directory." % directory)

        # There's a potential race condition between get_available_name and
        # saving the file; it's possible that two threads might return the
        # same name, at which point all sorts of fun happens. So we need to
        # try to create the file, but if it already exists we have to go back
        # to get_available_name() and try again.

        while True:
            try:
                # This file has a file path that we can move.
                if hasattr(content, 'temporary_file_path'):
                    file_move_safe(content.temporary_file_path(), full_path)

                # This is a normal uploadedfile that we can stream.
                else:
                    # This fun binary flag incantation makes os.open throw an
                    # OSError if the file already exists before we open it.
                    flags = (os.O_WRONLY | os.O_CREAT | os.O_EXCL
                             | getattr(os, 'O_BINARY', 0))
                    # The current umask value is masked out by os.open!
                    fd = os.open(full_path, flags, 0o666)
                    _file = None
                    try:
                        locks.lock(fd, locks.LOCK_EX)
                        for chunk in content.chunks():
                            if _file is None:
                                mode = 'wb' if isinstance(chunk,
                                                          bytes) else 'wt'
                                _file = os.fdopen(fd, mode)
                            _file.write(chunk)
                    finally:
                        locks.unlock(fd)
                        if _file is not None:
                            _file.close()
                        else:
                            os.close(fd)
            except OSError as e:
                if e.errno == errno.EEXIST:
                    # Ooops, the file exists. We need a new file name.
                    name = self.get_available_name(name)
                    full_path = self.path(name)
                else:
                    raise
            else:
                # OK, the file save worked. Break out of the loop.
                break

        if self.file_permissions_mode is not None:
            os.chmod(full_path, self.file_permissions_mode)

        return name
Пример #46
0
 def export_segments(self, segments): #check edgesmask
     if not self.export_folder:
         return
     cur_segments = segments
     for i in range(self.pool_count + 1):
         edges = self.edges_list[i]
         #indices = self.indces_list[i]
         filename, file_extension = os.path.splitext(self.filename)
         import platform
         if platform.system()=="Windows":
             file = '%s/%s_%d.vtk' % (self.export_folder, filename.split('\\')[-1], i)
         elif platform.system()=="Linux":
             file = '%s/%s_%d.vtk' % (self.export_folder, filename.split('/')[-1], i)
          
         
         
         fh, abs_path = mkstemp()
         edge_key = 0
         faces=[]
         poly=False
         with os.fdopen(fh, 'w') as new_file:
             with open(file) as old_file:
                 for line in old_file:
                     new_file.write(line)
                     if line.split(' ')[0] == 'SCALARS':
                         poly = False
                     if poly:
                         face = np.asarray([int(j) for j in line.split(' ')[1:4]])
                         faces.append(face)
                     if line.split(' ')[0] == 'POLYGONS':
                         poly = True
              
             
             new_file.write("\n \nCELL_DATA %d \nSCALARS scalars double \nLOOKUP_TABLE default" % len(faces))
             
             s_faces = np.sort(np.asarray(faces))
             f01 = s_faces[:,[0,1]]
             f02 = s_faces[:,[0,2]]
             f12 = s_faces[:,[1,2]]
             
             face_labels = np.zeros(len(faces))
             for edge_k,edge in enumerate(edges):
                 idx = []
                 #e = np.array([indices[ed[0]], indices[ed[1]]])
                 
                 idx.append( (edge==f01).all(axis=1).nonzero() )
                 idx.append( (edge==f02).all(axis=1).nonzero() )
                 idx.append( (edge==f12).all(axis=1).nonzero() )
                 for j in list(flatten(idx)):
                     face_labels[j] += cur_segments[edge_k]
             
             face_labels[face_labels<1.5]=0
             face_labels[face_labels>0]=1
             for j,face in enumerate(faces):
               
                 if j%9 == 0:    
                     new_file.write("\n") 
                 new_file.write("%d " % face_labels[j])
                 '''
                     if line[0] == 'e':
                         new_file.write('%s %d' % (line.strip(), cur_segments[edge_key]))
                         if edge_key < len(cur_segments):
                             edge_key += 1
                             new_file.write('\n')
                     else:
                         new_file.write(line)'''
         os.remove(file)
         move(abs_path, file)
         if i < len(self.history_data['edges_mask']):
             cur_segments = segments[:len(self.history_data['edges_mask'][i])]
             cur_segments = cur_segments[self.history_data['edges_mask'][i]]
Пример #47
0
def fdopen(fd, mode="r", bufsize=-1):
    fp = os.fdopen(fd, mode, bufsize)
    return _fixseek(fp, mode)
Пример #48
0

		# Get file descriptor from argument
		print 'spID',args.status_pipe_id
		pipearg = int(args.status_pipe_id) #int(sys.argv[1])
		if sys.platform == "win32":
			pipeoutfd = msvcrt.open_osfhandle(pipearg,os.O_WRONLY)
		else:
			pipeoutfd = pipearg

		# Read from pipe
		# Note:  Could be done with os.read/os.close directly, instead of os.fdopen
		#os.write()
		#os.close()
		if 1:
			pipein = os.fdopen(pipeoutfd, 'w')
			#for i in range(1000):
			#print pipein
			#time.sleep(5)
			pipein.write("{'status':%s}" % (not etl_failed))
			#pprint (dir(pipein))
			#pipein.flush()
			#time.sleep(2)
			
			
			pipein.close()
			#time.sleep(2000)
except :
	print "Unexpected error:", sys.exc_info()
	#print 'error--------', e
	
Пример #49
0
    def doCommand(self, data):
        """Process a single SMTP Command"""
        cmd = data[0:4]
        cmd = string.upper(cmd)
        keep = 1
        rv = None

        if cmd in ["EHLO", 'HELO']:
            self.state = ESMTPPassthroughSession.ST_HELO
        elif cmd == "RSET":
            self.from_address = None
            self.to_address = None
            self.helo = None
            self.dataAccum = ""
            self.state = ESMTPPassthroughSession.ST_INIT
        elif cmd == "NOOP":
            pass
        elif cmd == "QUIT":
            keep = 0
            return "221 bye", keep

        elif cmd == "MAIL":
            if self.state != ESMTPPassthroughSession.ST_HELO:
                return "503 Bad command sequence", 1
            try:
                self.from_address = self.stripAddress(data)
            except:
                return "501 invalid address syntax", 1
            self.state = ESMTPPassthroughSession.ST_MAIL

        elif cmd == "RCPT":
            if (self.state != ESMTPPassthroughSession.ST_MAIL) and (
                    self.state != ESMTPPassthroughSession.ST_RCPT):
                return "503 Bad command sequence", 1
            try:
                rec = self.stripAddress(data)
                self.to_address = rec
                self.recipients.append(rec)
            except:
                return "501 invalid address syntax", 1
            self.state = ESMTPPassthroughSession.ST_RCPT

        elif cmd == "DATA":
            if self.state != ESMTPPassthroughSession.ST_RCPT:
                return "503 Bad command sequence", 1
            self.state = ESMTPPassthroughSession.ST_DATA
            self.dataAccum = ""
            try:
                (handle, tempfilename) = tempfile.mkstemp(prefix='fuglu',
                                                          dir=self.config.get(
                                                              'main',
                                                              'tempdir'))
                self.tempfilename = tempfilename
                self.tempfile = os.fdopen(handle, 'w+b')
            except Exception as e:
                self.endsession(421, "could not create file: %s" % str(e))

            return "354 OK, Enter data, terminated with a \\r\\n.\\r\\n", 1

        if data[0:8].upper() == 'XFORWARD':
            self.store_xforward(data)

        rv = self.forwardCommand(data)

        return rv, keep
Пример #50
0
def disable_output_buffering():
    """Disables the buffering of the stdout. Devtools command line scripts should
  do so, so that their stdout is consistent when not directly attached to a
  terminal (e.g. because another script runs devtools in a subprocess).
  """
    sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
Пример #51
0
# print people sorted by number of posts made
posts = sorted(people_list, key=lambda k: k['posts_made'])
for person in posts:
    print(str(person))

# Print people sorted by likes sent
print("SORTED BY WHO SENT THE MOST LIKES: ")
sent_likes = sorted(people_list, key=lambda k: k['likes_sent'])
for person in sent_likes:
    print(str(person))

# Print people sorted by likes received
print("SORTED BY WHO GOT THE MOST LIKES: ")
received_likes = sorted(people_list, key=lambda k: k['likes_received'])
for person in received_likes:
    print(str(person))

# Print urls of pictures sent to the chat
print("URLS:")
for line in url_list:
    print(line)

# Finally, write the URLS of the picuters sent to the chat to a text file
curDir = os.path.dirname(os.path.realpath('__file__'))

# REPLACE ****** with the filename you want to write to
with os.fdopen(os.open(curDir + "/******.txt", os.O_WRONLY),
               "w",
               encoding='utf-8') as file_write:
    file_write.write("\n".join(url_list))
Пример #52
0
def create_tempfile(command):
    filed, script_file = tempfile.mkstemp(prefix='at')
    fileh = os.fdopen(filed, 'w')
    fileh.write(command)
    fileh.close()
    return script_file
Пример #53
0
    def test_dialect_apply(self):
        class testA(csv.excel):
            delimiter = "\t"

        class testB(csv.excel):
            delimiter = ":"

        class testC(csv.excel):
            delimiter = "|"

        csv.register_dialect('testC', testC)
        try:
            fd, name = tempfile.mkstemp()
            fileobj = os.fdopen(fd, "w+b")
            try:
                writer = csv.writer(fileobj)
                writer.writerow([1, 2, 3])
                fileobj.seek(0)
                self.assertEqual(fileobj.read(), "1,2,3\r\n")
            finally:
                fileobj.close()
                os.unlink(name)

            fd, name = tempfile.mkstemp()
            fileobj = os.fdopen(fd, "w+b")
            try:
                writer = csv.writer(fileobj, testA)
                writer.writerow([1, 2, 3])
                fileobj.seek(0)
                self.assertEqual(fileobj.read(), "1\t2\t3\r\n")
            finally:
                fileobj.close()
                os.unlink(name)

            fd, name = tempfile.mkstemp()
            fileobj = os.fdopen(fd, "w+b")
            try:
                writer = csv.writer(fileobj, dialect=testB())
                writer.writerow([1, 2, 3])
                fileobj.seek(0)
                self.assertEqual(fileobj.read(), "1:2:3\r\n")
            finally:
                fileobj.close()
                os.unlink(name)

            fd, name = tempfile.mkstemp()
            fileobj = os.fdopen(fd, "w+b")
            try:
                writer = csv.writer(fileobj, dialect='testC')
                writer.writerow([1, 2, 3])
                fileobj.seek(0)
                self.assertEqual(fileobj.read(), "1|2|3\r\n")
            finally:
                fileobj.close()
                os.unlink(name)

            fd, name = tempfile.mkstemp()
            fileobj = os.fdopen(fd, "w+b")
            try:
                writer = csv.writer(fileobj, dialect=testA, delimiter=';')
                writer.writerow([1, 2, 3])
                fileobj.seek(0)
                self.assertEqual(fileobj.read(), "1;2;3\r\n")
            finally:
                fileobj.close()
                os.unlink(name)

        finally:
            csv.unregister_dialect('testC')
Пример #54
0
 def _open_file(path):
     return os.fdopen(
         os.open(path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600), 'w')
Пример #55
0
 def read_file(self, buffering=DEFAULT_BUFSIZE):
     """Get a file-like object for the read end of the pipe."""
     return os.fdopen(self.read_fd, 'rb', buffering)
Пример #56
0
    def createComposeFile(self, utilityobj, max_api_users=1):

        logging.info("Creating Compose configuration files")
        content = Template('''

version: "2"
services:


  apporbit-chef:
    container_name: apporbit-chef
    image: ${APPORBIT_REGISTRY}apporbit/apporbit-chef:2.0
    mem_limit: 2100000000
    hostname: ${APPORBIT_HOST}
    dns:  
      - 8.8.8.8
      - ${APPORBIT_DNS}
    dns_search: 
      - ${APPORBIT_DNSSEARCH}
    restart: always
    network_mode: "bridge"
    ports:
      - "9443:9443"
    environment:
      - UPGRADE
    volumes:
      - ${APPORBIT_LIB}/chef-server:/var/opt/chef-server:Z
      - ${APPORBIT_KEY}:/var/opt/chef-server/nginx/ca/:Z
      - chef-conf:/etc/chef-server/:Z

  apporbit-rmq:
    container_name: apporbit-rmq
    image: ${APPORBIT_REGISTRY}apporbit/apporbit-rmq:${APPORBIT_BUILDID}
    hostname: rmq
    dns:  
      - 8.8.8.8
      - ${APPORBIT_DNS}
    dns_search: 
      - ${APPORBIT_DNSSEARCH}
    restart: always
    network_mode: "bridge"
    mem_limit: 2100000000
    environment:
      - RABBITMQ_VM_MEMORY_HIGH_WATERMARK_PAGING_RATIO=0.1
    volumes:
      - rabbitmq-data:/var/lib/rabbitmq:Z
      - ${APPORBIT_LOG}/rmq:/var/log/rabbitmq:Z

  apporbit-db:
    container_name: apporbit-db
    image: mysql:5.6.24
    network_mode: "bridge"
    hostname: db
    dns:  
      - 8.8.8.8
      - ${APPORBIT_DNS}
    dns_search: 
      - ${APPORBIT_DNSSEARCH}
    restart: always
    ports:
      - "3306"
    environment:
      - MYSQL_USER=root
      - MYSQL_ROOT_PASSWORD=admin
      - MYSQL_PASSWORD=admin
      - MYSQL_DATABASE=apporbit_controller
    volumes:
      - ${APPORBIT_LIB}/mysql:/var/lib/mysql:Z


  apporbit-consul:
    container_name: apporbit-consul
    image: ${APPORBIT_REGISTRY}apporbit/consul:${APPORBIT_BUILDID}
    command: -server -bootstrap --domain=${APPORBIT_DOMAIN}
    hostname: consul
    dns:  
      - 8.8.8.8
      - ${APPORBIT_DNS}
    dns_search: 
      - ${APPORBIT_DNSSEARCH}
    restart: always
    network_mode: "bridge"
    ports:
      - "8400:8400"
      - "8500:8500"
      - "53:53/udp"
      - "53:53"
      - "8301:8301/udp"
      - "8302:8302/udp"
    volumes:
      - ${APPORBIT_LIB}/consul:/data:Z

  apporbit-services:
    container_name: apporbit-services
    image: ${APPORBIT_REGISTRY}apporbit/apporbit-services:${APPORBIT_BUILDID}
    restart: always
    hostname: services
    dns:  
      - 8.8.8.8
      - ${APPORBIT_DNS}
    dns_search: 
      - ${APPORBIT_DNSSEARCH}
    network_mode: "bridge"
    environment:
      - TERM=xterm
      - GEMINI_INT_REPO=${APPORBIT_REPO}
      - CHEF_URL=https://${APPORBIT_CHEFHOST}:9443
      - OFFLINE_MODE=${OFFLINE_MODE}
      - UPGRADE
      - MYSQL_HOST=db
      - MYSQL_USERNAME=root
      - MYSQL_PASSWORD=admin
      - MYSQL_DATABASE=apporbit_mist
      - GEMINI_STACK_IPANEMA=1
      - LOG_LEVEL=${APPORBIT_LOGLEVEL}
      - AO_CONTROLLER_HOST=${APPORBIT_HOST}
    links:
      - apporbit-db:db 
      - apporbit-rmq:rmq
    volumes_from:
      - apporbit-chef
    depends_on:
      - apporbit-captain
    volumes:
      - ${APPORBIT_LIB}/sshKey_root:/root:Z
      - ${APPORBIT_CONF}/apporbit.ini:/etc/apporbit.ini:Z
      - ${APPORBIT_LIB}/services:/var/lib/apporbit:Z
      - ${APPORBIT_LOG}/services:/var/log/apporbit:Z
      - ${APPORBIT_LIB}/chefconf:/opt/apporbit/chef:Z
      ${SERVICES_DEVMOUNT}


  apporbit-locator:
    container_name: apporbit-locator
    image: ${APPORBIT_REGISTRY}apporbit/locator:${APPORBIT_BUILDID}
    hostname: locator
    dns:  
      - 8.8.8.8
      - ${APPORBIT_DNS}
    dns_search: 
      - ${APPORBIT_DNSSEARCH}
    restart: always
    network_mode: "bridge"
    ports:
      - "8080"
    environment:
      - CONSUL_IP_PORT=http://consul:8500
    links:
      - apporbit-consul:consul
    volumes:
      - ${APPORBIT_LIB}/locator:/data:Z
      - ${APPORBIT_LOG}/locator:/var/log/apporbit:Z


  apporbit-svcd:
    container_name: apporbit-svcd
    image: ${APPORBIT_REGISTRY}apporbit/svcd:${APPORBIT_BUILDID}
    hostname: svcd
    dns:  
      - 8.8.8.8
      - ${APPORBIT_DNS}
    dns_search: 
      - ${APPORBIT_DNSSEARCH}
    restart: always
    network_mode: "bridge"
    environment:
      - CONTROLLER_ALIAS_NAME=${APPORBIT_HOST}
    ports:
      - "8080"
    links:
      - apporbit-db:db 
      - apporbit-locator:locator
    volumes:
      - ${APPORBIT_LOG}/svcd:/var/log/apporbit:Z


  apporbit-captain:
    container_name: apporbit-captain
    image: ${APPORBIT_REGISTRY}apporbit/captain:${APPORBIT_BUILDID}
    restart: always
    hostname: captain
    dns:  
      - 8.8.8.8
      - ${APPORBIT_DNS}
    dns_search: 
      - ${APPORBIT_DNSSEARCH}
    network_mode: "bridge"
    ports:
      - "8080"
      - "8091:8091"
    environment:
      - CONTROLLER_ALIAS_NAME=${APPORBIT_HOST}
      - AO_REGISTRY=${DATASVC_REGISTRY}
    links:
      - apporbit-svcd:svcd
    volumes:
      - ${APPORBIT_LOG}/captain:/var/log/apporbit:Z


  apporbit-controller:
    container_name: apporbit-controller
    image: ${APPORBIT_REGISTRY}apporbit/apporbit-controller:${APPORBIT_BUILDID}
    hostname: ${APPORBIT_HOST}
    dns:  
      - 8.8.8.8
      - ${APPORBIT_DNS}
    dns_search: 
      - ${APPORBIT_DNSSEARCH}
    restart: always
    network_mode: "bridge"
    ports:
      - "80:80"
      - "443:443"
    environment:
      - TERM=xterm
      - ON_PREM_MODE=true
      - OFFLINE_MODE=${OFFLINE_MODE}
      - THEME_NAME=apporbit-v2
      - CURRENT_API_VERSION=v2
      - ONPREM_EMAIL_ID=${APPORBIT_LOGINID}
      - LOG_LEVEL=${APPORBIT_LOGLEVEL}
      - MAX_POOL_SIZE=${MAX_API_USERS}
      - CHEF_URL=https://${APPORBIT_CHEFHOST}:9443
      - AO_HOST=${APPORBIT_HOST}
      - CONSUL_IP=consul
      - CONSUL_PORT=8500
      - CAPTAIN_TCP_ADDR=captain
      - CAPTAIN_TCP_PORT=8080
      - GEMINI_INT_REPO=${APPORBIT_REPO}
      - MYSQL_HOST=db
      - MYSQL_PORT=3306
      - MYSQL_USERNAME=root
      - MYSQL_PASSWORD=admin
      - MYSQL_DATABASE=apporbit_controller
      - AO_REGISTRY=${DATASVC_REGISTRY}
    links:
      - apporbit-db:db 
      - apporbit-rmq:rmq
      - apporbit-svcd:svcd
      - apporbit-consul:consul
      - apporbit-captain:captain
    volumes_from:
      - apporbit-chef
    volumes:
      - ${APPORBIT_LOG}/controller:/var/log/apporbit:Z
      - ${APPORBIT_KEY}:/home/apporbit/apporbit-controller/sslkeystore:Z
      - ${APPORBIT_LIB}/controller/ui:/var/lib/apporbit/ui:Z
      ${CONTROLLER_DEVMOUNT}

  apporbit-docs:
    container_name: apporbit-docs
    image: ${APPORBIT_REGISTRY}apporbit/apporbit-docs:${APPORBIT_BUILDID}
    restart: always
    hostname: docs
    dns:  
      - 8.8.8.8
      - ${APPORBIT_DNS}
    dns_search: 
      - ${APPORBIT_DNSSEARCH}
    network_mode: "bridge"
    ports:
      - "9080:80"
    depends_on:
      - apporbit-controller

networks:
  default:
    external:
      name: bridge

volumes:
  chef-conf:
    driver: local
  rabbitmq-data:
    driver: local

        ''')

        # Check if composeFile already exists then skip creating it.
        flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
        try:
            compose_file = os.open(self.composeFile, flags)
        except OSError as e:
            if e.errno == errno.EEXIST:
                pass
            else:
                raise
        else:
            with os.fdopen(compose_file, 'w') as file_obj:
                # cat apporbit-compose-template.yml |awk '{ for(i=1; i<=NF; i++)
                # { if (match($i, /\$\{([a-z|A-Z|0-9|_]+)\}/))  {print substr($i,RSTART+2,RLENGTH-3)} } }' |sort |uniq
                if self.volume_mount:
                    logging.warning("DEVELOPER MOUNT ENABLED")
                    services_devmount = '- ' + self.volume_mount + '/Gemini-poc-stack:/home/apporbit/apporbit-services:z'
                    if not os.path.isfile(
                            self.volume_mount +
                            "/Gemini-poc-stack/mist-cgp/run.jar"):
                        pull_mist_binary = "wget -P " + self.volume_mount + "/Gemini-poc-stack/mist-cgp http://repos.gsintlab.com/repos/mist/integration/run.jar"
                        utilityobj.cmdExecute(pull_mist_binary,
                                              'pull mist binary ', True)
                    controller_devmount = '- ' + self.volume_mount + '/Gemini-poc-mgnt:/home/apporbit/apporbit-controller:z'
                    gemfile = self.volume_mount + "/Gemini-poc-mgnt/Gemfile"
                    if not os.path.isfile(gemfile):
                        rename_gemfile = "cp -f " + gemfile + "-master " + gemfile
                        utilityobj.cmdExecute(
                            rename_gemfile, 'copy Gemfile-master as Gemfile ',
                            True)

                else:
                    services_devmount = ''
                    controller_devmount = ''

                # aoreg has / so local or full qualified images are handled in template
                aoreg = self.apporbit_registry
                if aoreg:
                    aoreg += '/'

                datareg = self.apporbit_registry
                if self.datasvc_registry != '':
                    datareg = self.datasvc_registry

                if self.apporbit_domain:
                    domain = self.apporbit_domain
                else:
                    domain = 'consul.'

                content = content.safe_substitute(
                    APPORBIT_CHEFHOST=self.chef_host,
                    APPORBIT_CONF=self.APPORBIT_CONF,
                    APPORBIT_HOST=self.apporbit_host,
                    APPORBIT_DOMAIN=domain,
                    APPORBIT_DNS=self.consul_host,
                    APPORBIT_DNSSEARCH=domain,
                    APPORBIT_KEY=self.APPORBIT_KEY,
                    APPORBIT_LIB=self.APPORBIT_DATA,
                    APPORBIT_LOG=self.APPORBIT_LOG,
                    APPORBIT_LOGINID=self.apporbit_loginid,
                    APPORBIT_LOGLEVEL=self.log_level,
                    APPORBIT_REGISTRY=aoreg,
                    APPORBIT_REPO=self.apporbit_repo,
                    APPORBIT_BUILDID=self.buildid,
                    MAX_API_USERS=max_api_users,
                    DATASVC_REGISTRY=datareg,
                    SERVICES_DEVMOUNT=services_devmount,
                    CONTROLLER_DEVMOUNT=controller_devmount,
                    OFFLINE_MODE=self.offline_mode,
                )
                file_obj.write(content)
                file_obj.close()
                logging.info("Create composeFile success!")

        return True
Пример #57
0
print(r_p_h1, w_p_h1, r_h1_p, w_h1_p, r_p_h2, w_p_h2, r_h2_p, w_h2_p)

pid = os.fork()

if pid > 0:
    # PADRE
    pid = os.fork()

    if pid > 0:
        # PADRE
        os.close(r_p_h1)
        os.close(w_h1_p)
        os.close(r_p_h2)
        os.close(w_h2_p)

        r = os.fdopen(r_h1_p)
        r2 = os.fdopen(r_h2_p)

        for i in range(10):
            escribir = str(random.randint(1, 20)) + "\n"
            os.write(w_p_h1, escribir.encode(encoding="utf-8"))
            linea = r.readline()
            print("PADRE: he recibido del hijo 1: " + linea)

            escribir = str(random.randint(1, 20)) + "\n"
            os.write(w_p_h2, escribir.encode(encoding="utf-8"))
            linea2 = r2.readline()
            print("PADRE: he recibido del hijo 2: " + linea2)

        os.close(w_p_h1)
        os.close(w_p_h2)
Пример #58
0
 def write_file(self, buffering=DEFAULT_BUFSIZE):
     """Get a file-like object for the write end of the pipe."""
     return os.fdopen(self.write_fd, 'wb', buffering)
Пример #59
0
    def create_iface(self, ipv6=False, dhcpserver_opts=None):
        '''Create test interface with DHCP server behind it'''

        # run "router-side" networkd in own mount namespace to shield it from
        # "client-side" configuration and networkd
        (fd, script) = tempfile.mkstemp(prefix='networkd-router.sh')
        self.addCleanup(os.remove, script)
        with os.fdopen(fd, 'w+') as f:
            f.write(
                '''\
#!/bin/sh -eu
mkdir -p /run/systemd/network
mkdir -p /run/systemd/netif
mount -t tmpfs none /run/systemd/network
mount -t tmpfs none /run/systemd/netif
[ ! -e /run/dbus ] || mount -t tmpfs none /run/dbus
# create router/client veth pair
cat << EOF > /run/systemd/network/test.netdev
[NetDev]
Name=%(ifr)s
Kind=veth

[Peer]
Name=%(ifc)s
EOF

cat << EOF > /run/systemd/network/test.network
[Match]
Name=%(ifr)s

[Network]
Address=192.168.5.1/24
%(addr6)s
DHCPServer=yes

[DHCPServer]
PoolOffset=10
PoolSize=50
DNS=192.168.5.1
%(dhopts)s
EOF

# run networkd as in systemd-networkd.service
exec $(systemctl cat systemd-networkd.service | sed -n '/^ExecStart=/ { s/^.*=//; p}')
''' % {
                    'ifr': self.if_router,
                    'ifc': self.iface,
                    'addr6': ipv6 and 'Address=2600::1/64' or '',
                    'dhopts': dhcpserver_opts or ''
                })

            os.fchmod(fd, 0o755)

        subprocess.check_call([
            'systemd-run', '--unit=networkd-test-router.service', '-p',
            'InaccessibleDirectories=-/etc/systemd/network', '-p',
            'InaccessibleDirectories=-/run/systemd/network', '-p',
            'InaccessibleDirectories=-/run/systemd/netif',
            '--service-type=notify', script
        ])

        # wait until devices got created
        for _ in range(50):
            out = subprocess.check_output(
                ['ip', 'a', 'show', 'dev', self.if_router])
            if b'state UP' in out and b'scope global' in out:
                break
            time.sleep(0.1)
Пример #60
0
    def dump_sigtask(self, fn, task, stampbase, runtime):

        tid = fn + ":" + task
        referencestamp = stampbase
        if isinstance(runtime, str) and runtime.startswith("customfile"):
            sigfile = stampbase
            referencestamp = runtime[11:]
        elif runtime and tid in self.taskhash:
            sigfile = stampbase + "." + task + ".sigdata" + "." + self.taskhash[tid]
        else:
            sigfile = stampbase + "." + task + ".sigbasedata" + "." + self.basehash[tid]

        bb.utils.mkdirhier(os.path.dirname(sigfile))

        data = {}
        data['task'] = task
        data['basewhitelist'] = self.basewhitelist
        data['taskwhitelist'] = self.taskwhitelist
        data['taskdeps'] = self.taskdeps[fn][task]
        data['basehash'] = self.basehash[tid]
        data['gendeps'] = {}
        data['varvals'] = {}
        data['varvals'][task] = self.lookupcache[fn][task]
        for dep in self.taskdeps[fn][task]:
            if dep in self.basewhitelist:
                continue
            data['gendeps'][dep] = self.gendeps[fn][dep]
            data['varvals'][dep] = self.lookupcache[fn][dep]

        if runtime and tid in self.taskhash:
            data['runtaskdeps'] = self.runtaskdeps[tid]
            data['file_checksum_values'] = [(os.path.basename(f), cs) for f,cs in self.file_checksum_values[tid]]
            data['runtaskhashes'] = {}
            for dep in data['runtaskdeps']:
                data['runtaskhashes'][dep] = self.get_unihash(dep)
            data['taskhash'] = self.taskhash[tid]

        taint = self.read_taint(fn, task, referencestamp)
        if taint:
            data['taint'] = taint

        if runtime and tid in self.taints:
            if 'nostamp:' in self.taints[tid]:
                data['taint'] = self.taints[tid]

        computed_basehash = calc_basehash(data)
        if computed_basehash != self.basehash[tid]:
            bb.error("Basehash mismatch %s versus %s for %s" % (computed_basehash, self.basehash[tid], tid))
        if runtime and tid in self.taskhash:
            computed_taskhash = calc_taskhash(data)
            if computed_taskhash != self.taskhash[tid]:
                bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[tid], tid))
                sigfile = sigfile.replace(self.taskhash[tid], computed_taskhash)

        fd, tmpfile = tempfile.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
        try:
            with os.fdopen(fd, "wb") as stream:
                p = pickle.dump(data, stream, -1)
                stream.flush()
            os.chmod(tmpfile, 0o664)
            os.rename(tmpfile, sigfile)
        except (OSError, IOError) as err:
            try:
                os.unlink(tmpfile)
            except OSError:
                pass
            raise err