Пример #1
0
    def __init__(self, config, timeout):
        # config
        self.host = config["host"]
        self.username = config["username"]
        self.python = config["python"]
        self.port = config["port"]
        self.interval = config["interval"]
        self.custom = config["custom"]
        self.telegraf = config["telegraf"]
        self.comment = config["comment"]
        self.config = AgentConfig(config)

        # connection
        self.session = None
        self.ssh = SecuredShell(self.host, self.port, self.username, timeout)
        self.incoming_queue = Queue.Queue()
        self.buffer = ""

        self.reader = MonitoringReader(self.incoming_queue)
        handle, cfg_path = tempfile.mkstemp(".cfg", "agent_")
        os.close(handle)
        self.path = {
            # Destination path on remote host
            "AGENT_REMOTE_FOLDER": "/tmp/",
            # Source path on tank
            "AGENT_LOCAL_FOLDER": os.path.dirname(__file__) + "/agent",
            "TELEGRAF_REMOTE_PATH": "/tmp/telegraf",
            "TELEGRAF_LOCAL_PATH": self.telegraf,
        }
Пример #2
0
    def test_create_volume_from_image_exception(self):
        """Verify that create volume from image, the volume status is
        'downloading'."""
        dst_fd, dst_path = tempfile.mkstemp()
        os.close(dst_fd)

        self.stubs.Set(self.volume.driver, 'local_path', lambda x: dst_path)

        image_id = 'aaaaaaaa-0000-0000-0000-000000000000'
        # creating volume testdata
        volume_id = 1
        db.volume_create(self.context,
                         {'id': volume_id,
                          'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
                          'display_description': 'Test Desc',
                          'size': 20,
                          'status': 'creating',
                          'host': 'dummy'})

        self.assertRaises(exception.ImageNotFound,
                          self.volume.create_volume,
                          self.context,
                          volume_id, None, None, None,
                          None,
                          image_id)
        volume = db.volume_get(self.context, volume_id)
        self.assertEqual(volume['status'], "error")
        # cleanup
        db.volume_destroy(self.context, volume_id)
        os.unlink(dst_path)
Пример #3
0
def onConnect():
#    if keyAgent and options['agent']:
#        cc = protocol.ClientCreator(reactor, SSHAgentForwardingLocal, conn)
#        cc.connectUNIX(os.environ['SSH_AUTH_SOCK'])
    if hasattr(conn.transport, 'sendIgnore'):
        _KeepAlive(conn)
    if options.localForwards:
        for localPort, hostport in options.localForwards:
            s = reactor.listenTCP(localPort,
                        forwarding.SSHListenForwardingFactory(conn,
                            hostport,
                            SSHListenClientForwardingChannel))
            conn.localForwards.append(s)
    if options.remoteForwards:
        for remotePort, hostport in options.remoteForwards:
            log.msg('asking for remote forwarding for %s:%s' %
                    (remotePort, hostport))
            conn.requestRemoteForwarding(remotePort, hostport)
        reactor.addSystemEventTrigger('before', 'shutdown', beforeShutdown)
    if not options['noshell'] or options['agent']:
        conn.openChannel(SSHSession())
    if options['fork']:
        if os.fork():
            os._exit(0)
        os.setsid()
        for i in range(3):
            try:
                os.close(i)
            except OSError as e:
                import errno
                if e.errno != errno.EBADF:
                    raise
Пример #4
0
 def close_safe(fd):
     '''Close a file descriptor ignoring any exception it generates'''
     #pylint: disable-msg=W0704
     try:
         os.close(fd)
     except OSError:
         pass
Пример #5
0
 def print_xcf(self, filename_or_obj, *args, **kwargs):
     "Writes the figure to a GIMP XCF image file"
     # If filename_or_obj is a file-like object we need a temporary file for
     # GIMP's output too...
     if is_string(filename_or_obj):
         out_temp_handle, out_temp_name = None, filename_or_obj
     else:
         out_temp_handle, out_temp_name = tempfile.mkstemp(suffix='.xcf')
     try:
         # Create a temporary file and write the "layer" to it as a PNG
         in_temp_handle, in_temp_name = tempfile.mkstemp(suffix='.png')
         try:
             FigureCanvasAgg.print_png(self, in_temp_name, *args, **kwargs)
             run_gimp_script(
                 SINGLE_LAYER_SCRIPT.format(
                     input=quote_string(in_temp_name),
                     output=quote_string(out_temp_name)))
         finally:
             os.close(in_temp_handle)
             os.unlink(in_temp_name)
     finally:
         if out_temp_handle:
             os.close(out_temp_handle)
             # If we wrote the XCF to a temporary file, write its content to
             # the file-like object we were given (the copy is chunked as
             # XCF files can get pretty big)
             with open(out_temp_name, 'rb') as source:
                 for chunk in iter(lambda: source.read(131072), ''):
                     filename_or_obj.write(chunk)
             os.unlink(out_temp_name)
Пример #6
0
    def config_control(control_path):
        """
        Edit the control file to adapt the current environment.

        Replace CLIENTIP with guestip, and replace SERVERIP with hostip.

        :return: Path of a temp file which contains the result of replacing.
        """
        pattern2repl_dict = {r'CLIENTIP': vm.get_address(),
                             r'SERVERIP': utils_net.get_host_ip_address(params)}
        control_file = open(control_path)
        lines = control_file.readlines()
        control_file.close()

        for pattern, repl in pattern2repl_dict.items():
            for index in range(len(lines)):
                line = lines[index]
                lines[index] = re.sub(pattern, repl, line)

        fd, temp_control_path = tempfile.mkstemp(prefix="control",
                                                 dir=data_dir.get_tmp_dir())
        os.close(fd)

        temp_control = open(temp_control_path, "w")
        temp_control.writelines(lines)
        temp_control.close()
        return temp_control_path
Пример #7
0
def daemonize( errfile ):
    """
    Detach process and become a daemon.
    """
    pid = os.fork()
    if pid:
        os._exit(0)

    os.setsid()
    signal.signal(signal.SIGHUP, signal.SIG_IGN)
    os.umask(0)

    pid = os.fork()
    if pid:
        os._exit(0)

    os.chdir("/")
    for fd in range(0,20):
        try:
            os.close(fd)
        except OSError:
            pass

    sys.stdin = open("/dev/null","r")
    sys.stdout = open("/dev/null","w")
    sys.stderr = ErrorLog( errfile )
Пример #8
0
def get_img_id(doc, image, bpc, nr_channels, cs, p = default_cfg()):
    spec = doc.image_definition()
    spec.dimensions(*p.img_dim)
    spec.bits_per_component(bpc)
    spec.dpi(*p.dpi)
    spec.color_space(cs)
    spec.format(jagpdf.IMAGE_FORMAT_NATIVE)
    img_data = g_img_cache.img_data(image, bpc, nr_channels, *p.img_dim)
    if p.from_file:
        handle, tmp_file = tempfile.mkstemp()
        img_data.tofile(open(tmp_file,'wb'))
        os.close(handle)
        g_temp_files.add(tmp_file)

    if p.from_file:
        spec.file_name(tmp_file)
    else:
        spec.data(img_data)

    desc = "%d levels per channel (%d %s)" % (2**bpc, bpc, bpc > 1 and "bits" or "bit")
    if p.spec_fn:
        desc2 = p.spec_fn(spec)
        if desc2:
            desc = desc2
    id_ = doc.image_load(spec)
    testlib.must_throw(doc.image_load, spec) # cannot load the same spec twice
    return id_, desc
Пример #9
0
    def _write_file_chunks(self, chunk_map, file_extension=""):
        """
        Given a mapping of chunks, write their contents to a temporary file,
        returning the path to that file.

        Returned file path should be manually removed by the user.

        :param chunk_map: Mapping of integer index to file-like chunk
        :type chunk_map: dict of (int, StringIO)
        :param file_extension: String extension to suffix the temporary file
            with
        :type file_extension: str

        :raises OSError: OS problems creating temporary file or writing it out.

        :return: Path to temporary combined file
        :rtype: str

        """
        # Make sure write dir exists...
        if not os.path.isdir(self.working_dir):
            safe_create_dir(self.working_dir)
        tmp_fd, tmp_path = tempfile.mkstemp(file_extension, dir=self.working_dir)
        self.log.debug("Combining chunks into temporary file: %s", tmp_path)
        # tmp_file = os.fdopen(tmp_fd, 'wb')
        tmp_file = open(tmp_path, "wb")
        for idx, chunk in sorted(chunk_map.items(), key=lambda p: p[0]):
            data = chunk.read()
            tmp_file.write(data)
        tmp_file.close()  # apparently also closes file descriptor?
        os.close(tmp_fd)
        return tmp_path
def save_data(data, output_filename):
    """
    Save data to file.

    If the file already exists, the function will not save the data and return
    1

    Parameters
    ----------
    data : str
         String containing the data you wish to write out to a file

    output_filename : str
         Path (full or relative) to the file you will save the data into.

    Returns
    -------
    out : int
        Return 0 if the data was saved successfully. Return 1 if the file 
        already exists.

    Hint
    ----
    Check out the os module for determining whether a file exists already.

    """
    if not os.path.exists(output_filename): # output_file doesn't currently exist
        
        fd = os.open(output_filename,os.O_WRONLY|os.O_CREAT)
        os.write(fd,data)
        os.close(fd)
        return 0
    else:
        return 1
Пример #11
0
    def create(obj, dummy_eng):
        #FIXME change share tmp directory
        from invenio.config import CFG_TMPSHAREDDIR
        from invenio.legacy.bibsched.bibtask import task_low_level_submission, \
            bibtask_allocate_sequenceid
        d = Deposition(obj)

        sip = d.get_latest_sip(sealed=False)
        sip.seal()

        tmp_file_fd, tmp_file_path = mkstemp(
            prefix="webdeposit-%s-%s" % (d.id, sip.uuid),
            suffix='.xml',
            dir=CFG_TMPSHAREDDIR,
        )

        os.write(tmp_file_fd, sip.package)
        os.close(tmp_file_fd)

        # Trick to have access to task_sequence_id in subsequent tasks.
        d.workflow_object.task_sequence_id = bibtask_allocate_sequenceid()

        task_id = task_low_level_submission(
            'bibupload', 'webdeposit',
            '-r' if 'recid' in sip.metadata else '-i', tmp_file_path,
            '-I', str(d.workflow_object.task_sequence_id)
        )

        sip.task_ids.append(task_id)

        d.update()
Пример #12
0
def processFileSize(fileName):
    global rpmDict
    sizeDict = rpmDict['fileSizes']

    fd = os.open(fileName, os.O_RDONLY)
    header = transactionSet.hdrFromFdno(fd)
    os.close(fd)

    files = header.fiFromHeader()

    for fileObj in files:
        fileName = fileObj[FILEOBJ_FILENAME]
        size = roundUpTo(fileObj[FILEOBJ_SIZE], BLOCK_SIZE)
        mode = fileObj[FILEOBJ_MODE]

        if stat.S_ISDIR(mode):
            if fileName not in sizeDict:
                sizeDict[fileName] = BLOCK_SIZE
        elif stat.S_ISREG(mode) or stat.S_ISLNK(mode):
            dirName = os.path.dirname(fileName)
            if dirName not in sizeDict:
                sizeDict[dirName] = BLOCK_SIZE
            if stat.S_ISLNK(mode):
                sizeDict[dirName] += BLOCK_SIZE
            else:
                sizeDict[dirName] += size
        else:
            print "Didn't know how to handle %s" % fileName
Пример #13
0
 def setUp(self):
   self.packager = packager_app.PackagerApp()
   self.input = os.path.join(
       test_env.SRC_DIR, 'media', 'test', 'data', 'bear-1280x720.mp4')
   self.tmpdir = tempfile.mkdtemp()
   fd, self.output = tempfile.mkstemp(dir=self.tmpdir)
   os.close(fd)
Пример #14
0
def install_nvm(options):
  print '---------- Installing NVM ---------'
  check_run_quick('sudo chmod 775 /usr/local')
  check_run_quick('sudo mkdir -m 777 -p /usr/local/node /usr/local/nvm')

  result = check_fetch(
    'https://raw.githubusercontent.com/creationix/nvm/{nvm_version}/install.sh'
    .format(nvm_version=NVM_VERSION))

  fd, temp = tempfile.mkstemp()
  os.write(fd, result.content)
  os.close(fd)

  try:
    run_and_monitor(
        'bash -c "NVM_DIR=/usr/local/nvm source {temp}"'.format(temp=temp))
  finally:
    os.remove(temp)

#  curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.26.0/install.sh | NVM_DIR=/usr/local/nvm bash


  check_run_and_monitor('sudo bash -c "cat > /etc/profile.d/nvm.sh"',
                        input=__NVM_SCRIPT)

  print '---------- Installing Node {version} ---------'.format(
    version=NODE_VERSION)

  run_and_monitor('bash -c "source /etc/profile.d/nvm.sh'
                  '; nvm install {version}'
                  '; nvm alias default {version}"'
                  .format(version=NODE_VERSION))
Пример #15
0
def WriteToTemporaryFile(data):
    (fd, fname) = tempfile.mkstemp()
    os.close(fd)
    tmp_file = open(fname, "w")
    tmp_file.write(data)
    tmp_file.close()
    return fname
Пример #16
0
 def Close(self):
     """
     The physical file is automatically deleted after being closed.
     """
     super(TemporaryFile, self).Close()
     os.close(self.__fd)
     os.remove(self.__tmp_path)
Пример #17
0
def saved_fd(fd):
    new_fd = os.dup(fd)
    try:
        yield
    finally:
        os.dup2(new_fd, fd)
        os.close(new_fd)
Пример #18
0
    def __call__(self, *args): ##, **kw):   kw unused
        import hotshot, hotshot.stats, os, tempfile ##, time already imported
        f, filename = tempfile.mkstemp()
        os.close(f)
        
        prof = hotshot.Profile(filename)

        stime = time.time()
        result = prof.runcall(self.func, *args)
        stime = time.time() - stime
        prof.close()

        import cStringIO
        out = cStringIO.StringIO()
        stats = hotshot.stats.load(filename)
        stats.stream = out
        stats.strip_dirs()
        stats.sort_stats('time', 'calls')
        stats.print_stats(40)
        stats.print_callers()

        x =  '\n\ntook '+ str(stime) + ' seconds\n'
        x += out.getvalue()

        # remove the tempfile
        try:
            os.remove(filename)
        except IOError:
            pass
            
        return result, x
Пример #19
0
def dumpScreen(utilityPath):
  """dumps a screenshot of the entire screen to a directory specified by
  the MOZ_UPLOAD_DIR environment variable"""

  # Need to figure out which OS-dependent tool to use
  if mozinfo.isUnix:
    utility = [os.path.join(utilityPath, "screentopng")]
    utilityname = "screentopng"
  elif mozinfo.isMac:
    utility = ['/usr/sbin/screencapture', '-C', '-x', '-t', 'png']
    utilityname = "screencapture"
  elif mozinfo.isWin:
    utility = [os.path.join(utilityPath, "screenshot.exe")]
    utilityname = "screenshot"

  # Get dir where to write the screenshot file
  parent_dir = os.environ.get('MOZ_UPLOAD_DIR', None)
  if not parent_dir:
    log.info('Failed to retrieve MOZ_UPLOAD_DIR env var')
    return

  # Run the capture
  try:
    tmpfd, imgfilename = tempfile.mkstemp(prefix='mozilla-test-fail-screenshot_', suffix='.png', dir=parent_dir)
    os.close(tmpfd)
    returncode = subprocess.call(utility + [imgfilename])
    printstatus(returncode, utilityname)
  except OSError, err:
    log.info("Failed to start %s for screenshot: %s",
             utility[0], err.strerror)
    return
Пример #20
0
    def __exit__(self, exe_type, exe_val, tb):
        sys.stdout.flush()
        sys.stderr.flush()
        os.dup2(self.old_out, 1)
        os.dup2(self.old_err, 2)
        os.close(self.old_out)
        os.close(self.old_err)
        self.temp.seek(0)

        # dump everything if an exception was thrown
        if exe_type is not None:
            for line in self.temp:
                sys.stderr.write(line)
            return False

        # if no exception is thrown only dump important lines
        for line in self.temp:
            if self._should_veto(line):
                continue
            accept = self._should_accept(line)
            if self.re is not None:
                re_found = self.re.search(line)
            else:
                re_found = False
            if accept or re_found:
                sys.stderr.write(line)
Пример #21
0
    def _load_master_key(self):
        """Load the master key from file, or create one if not available."""

        # TODO(jamielennox): This is but one way that a key file could be
        # stored. This can be pluggable later for storing/fetching keys from
        # better locations.

        mkey = None

        try:
            with open(CONF.crypto.master_key_file, 'r') as f:
                mkey = base64.b64decode(f.read())
        except IOError as e:
            if e.errno == errno.ENOENT:
                flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
                mkey = self.crypto.new_key(self.KEY_SIZE)
                f = None
                try:
                    f = os.open(CONF.crypto.master_key_file, flags, 0o600)
                    os.write(f, base64.b64encode(mkey))
                except Exception as x:
                    _logger.warn('Failed to read master key initially: %s', e)
                    _logger.warn('Failed to create new master key: %s', x)
                    raise x
                finally:
                    if f:
                        os.close(f)
            else:
                # the file could be unreadable due to bad permissions
                # so just pop up whatever error comes
                raise e

        return mkey
Пример #22
0
 def getTerminalSize():
     def ioctl_GWINSZ(fd):
         try:
             import fcntl, termios, struct, os
             cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
         except:
             return None
         return cr
     cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
     if not cr:
         import os
         try:
             fd = os.open(os.ctermid(), os.O_RDONLY)
             cr = ioctl_GWINSZ(fd)
             os.close(fd)
         except:
             pass
     if cr:
         return int(cr[1]), int(cr[0])        
     try:
         h, w = os.popen("stty size", "r").read().split()
         return int(w), int(h)            
     except:
         pass
     return 80, 25
Пример #23
0
    def __deserialize_file(self, response):
        """
        Saves response body into a file in a temporary folder,
        using the filename from the `Content-Disposition` header if provided.

        :param response:  RESTResponse.
        :return: file path.
        """
        config = Configuration()

        fd, path = tempfile.mkstemp(dir=config.temp_folder_path)
        os.close(fd)
        os.remove(path)

        content_disposition = response.getheader("Content-Disposition")
        if content_disposition:
            filename = re.\
                search(r'filename=[\'"]?([^\'"\s]+)[\'"]?', content_disposition).\
                group(1)
            path = os.path.join(os.path.dirname(path), filename)

        with open(path, "w") as f:
            f.write(response.data)

        return path
Пример #24
0
 def closerange(fd_low, fd_high):
     # Iterate through and close all file descriptors.
     for fd in range(fd_low, fd_high):
         try:
             os.close(fd)
         except OSError:  # ERROR, fd wasn't open to begin with (ignored)
             pass
Пример #25
0
    def sort( self, fields, offset = 0 ):
        """sort file according to criteria."""

        if self.mIsOpened:
            reopen = 1
            self.close()
        else:
            reopen = 0
        
        sort_criteria = []
        for field in fields:
            id, modifier = self.mMapKey2Field[ field ]
            field_id = 1 + id + offset
            sort_criteria.append( "-k%i,%i%s" % (field_id, field_id, modifier) )            
            
        outfile, filename_temp = tempfile.mkstemp()
        os.close(outfile)

        ## empty fields are a problem with sort, which by default uses whitespace to non-whitespace
        ## transitions as field separate. 
        ## make \t explicit field separator.
        statement = "sort -t'\t' %s %s > %s" % (string.join( sort_criteria, " " ), self.mFilename, filename_temp)
        exit_code = os.system(statement)
        if exit_code:
            raise "error while sorting, statement =%s" % statement
        os.system( "mv %s %s" % (filename_temp, self.mFilename))

        if reopen: self.open()
Пример #26
0
 def create_signed_cert(self, ou, san="IP:127.0.0.1,IP:::1,DNS:localhost"):
     print_ok("generating {0}.key, {0}.crt, {0}.p12".format(ou))
     fd, openssl_config = mkstemp(dir='.')
     os.write(fd, "extendedKeyUsage=clientAuth,serverAuth\n".encode('utf-8'))
     os.write(fd, "subjectAltName = {0}".format(san).encode('utf-8'))
     call("openssl genrsa -out {0}.key 1024".format(ou),
          shell=True, stderr=FNULL)
     call(
         "openssl req -new -key {0}.key -out {0}.csr -subj /C=US/ST=CA/O=ghostunnel/OU={0}".format(ou),
         shell=True,
         stderr=FNULL)
     call("chmod 600 {0}.key".format(ou), shell=True)
     call(
         "openssl x509 -req -in {0}.csr -CA {1}.crt -CAkey {1}.key -CAcreateserial -out {0}_temp.crt -days 5 -extfile {2}".format(
             ou,
             self.name,
             openssl_config),
         shell=True,
         stderr=FNULL)
     call(
         "openssl pkcs12 -export -out {0}_temp.p12 -in {0}_temp.crt -inkey {0}.key -password pass:"******"{0}_temp.crt".format(ou), "{0}.crt".format(ou))
     os.rename("{0}_temp.p12".format(ou), "{0}.p12".format(ou))
     os.close(fd)
     os.remove(openssl_config)
     self.leaf_certs.append(ou)
Пример #27
0
 def install_rust(self):
     """Download and run the rustup installer."""
     import errno
     import stat
     import tempfile
     platform = rust.platform()
     url = rust.rustup_url(platform)
     checksum = rust.rustup_hash(platform)
     if not url or not checksum:
         print('ERROR: Could not download installer.')
         sys.exit(1)
     print('Downloading rustup-init... ', end='')
     fd, rustup_init = tempfile.mkstemp(prefix=os.path.basename(url))
     os.close(fd)
     try:
         self.http_download_and_save(url, rustup_init, checksum)
         mode = os.stat(rustup_init).st_mode
         os.chmod(rustup_init, mode | stat.S_IRWXU)
         print('Ok')
         print('Running rustup-init...')
         subprocess.check_call([rustup_init, '-y',
             '--default-toolchain', 'stable',
             '--default-host', platform,
         ])
         cargo_home, cargo_bin = self.cargo_home()
         self.print_rust_path_advice(RUST_INSTALL_COMPLETE,
                 cargo_home, cargo_bin)
     finally:
         try:
             os.remove(rustup_init)
         except OSError as e:
             if e.errno != errno.ENOENT:
                 raise
Пример #28
0
    def verboseCallback(self, what, amount, total, mydata, wibble):
        """ This method provides a verbose callback.

        @param what: callback type
        @param amount: bytes processed
        @param total: total bytes
        @param mydata: package key (hdr, path)
        @param wibble: user data
        """

        if wibble: print wibble
        
        if what == rpm.RPMCALLBACK_TRANS_START:
            pass

        elif what == rpm.RPMCALLBACK_INST_OPEN_FILE:
            hdr, path = mydata
            print "Installing %s\r" % (hdr["name"])
            fdno = os.open(path, os.O_RDONLY)
            nvr = '%s-%s-%s' % ( hdr['name'], hdr['version'], hdr['release'] )
            self.fdnos[nvr] = fdno
            return fdno
        
        elif what == rpm.RPMCALLBACK_INST_CLOSE_FILE:
            hdr, path = mydata
            nvr = '%s-%s-%s' % ( hdr['name'], hdr['version'], hdr['release'] )
            os.close(self.fdnos[nvr])

        elif what == rpm.RPMCALLBACK_INST_PROGRESS:
            hdr, path = mydata
            print "%s:  %.5s%% done\r" % (hdr["name"], (float(amount) / total) * 100)

        return None
Пример #29
0
 def make_tmp_file(self, content):
     fd, path = tempfile.mkstemp(suffix=u'-nxdrive-file-to-upload',
                                dir=self.upload_tmp_dir)
     with open(path, "wb") as f:
         f.write(content)
     os.close(fd)
     return path
Пример #30
0
 def test_dataError(self, *args, **kw):
     import aio
     # read more data than available
     q = aio.Queue(1)
     fd = os.open(TEST_FILENAME, os.O_DIRECT)
     self.assertRaises(IOError, q.scheduleRead, fd, 0, 1, 4096 * 4096)
     os.close(fd)
    def getFile(self, *args, **kwds):
        ''' Keeping this as an alternative for the code.
            We don't use it because it's not possible to know if the call was a
            PK-one (and so we push the content of a temporary filename to fd or
            file) or a non-PK-one (in which case nothing should be done).

                filename = None
                fd = None
                file = None
                if use_pycups:
                    if len(kwds) != 1:
                        use_pycups = True
                    elif kwds.has_key('filename'):
                        filename = kwds['filename']
                    elif kwds.has_key('fd'):
                        fd = kwds['fd']
                    elif kwds.has_key('file'):
                        file = kwds['file']
                    else:
                        use_pycups = True

                    if fd or file:
        '''

        file_object = None
        fd = None
        if len(args) == 2:
            (use_pycups, resource, filename) = self._args_to_tuple([str, str], *args)
        else:
            (use_pycups, resource) = self._args_to_tuple([str], *args)
            if 'filename' in kwds:
                filename = kwds['filename']
            elif 'fd' in kwds:
                fd = kwds['fd']
            elif 'file' in kwds:
                file_object = kwds['file']
            else:
                if not use_pycups:
                    raise TypeError()
                else:
                    filename = None

        if (not use_pycups) and (fd is not None or file_object is not None):
            # Create the temporary file in /tmp to ensure that
            # cups-pk-helper-mechanism is able to write to it.
            (tmpfd, tmpfname) = tempfile.mkstemp(dir="/tmp")
            os.close (tmpfd)

            pk_args = (resource, tmpfname)
            self._call_with_pk_and_fallback(use_pycups,
                                            'FileGet', pk_args,
                                            self._connection.getFile,
                                            *args, **kwds)

            tmpfd = os.open (tmpfname, os.O_RDONLY)
            tmpfile = os.fdopen (tmpfd, 'rt')
            tmpfile.seek (0)

            if fd is not None:
                os.lseek (fd, 0, os.SEEK_SET)
                line = tmpfile.readline()
                while line != '':
                    os.write (fd, line.encode('UTF-8'))
                    line = tmpfile.readline()
            else:
                file_object.seek (0)
                line = tmpfile.readline()
                while line != '':
                    file_object.write (line.encode('UTF-8'))
                    line = tmpfile.readline()

            tmpfile.close ()
            os.remove (tmpfname)
        else:
            pk_args = (resource, filename)

            self._call_with_pk_and_fallback(use_pycups,
                                            'FileGet', pk_args,
                                            self._connection.getFile,
                                            *args, **kwds)
Пример #32
0
def main ():
	options = docopt.docopt(usage, help=False)
	options['--env'] = ''  # exabgp compatibility

	root = root_folder(options,['/bin/exabgpcli','/sbin/exabgpcli','/lib/exabgp/application/cli.py'])
	prefix = '' if root == '/usr' else root
	etc = prefix + '/etc/exabgp'
	envfile = get_envfile(options,etc)
	env = get_env(envfile)
	pipename = env['api']['pipename']

	if options['--help']:
		sys.stdout.write(usage)
		sys.stdout.flush()
		sys.exit(0)

	if not options['<command>']:
		sys.stdout.write(usage)
		sys.stdout.flush()
		sys.exit(0)

	command = ' '.join(options['<command>'])

	pipes = named_pipe(root)
	if len(pipes) != 1:
		sys.stdout.write('could not find ExaBGP\'s named pipes (%s.in and %s.out) for the cli\n' % (pipename, pipename))
		sys.stdout.write('we scanned the following folders (the number is your PID):\n - ')
		sys.stdout.write('\n - '.join(pipes))
		sys.stdout.flush()
		sys.exit(1)

	send = pipes[0] + pipename + '.in'
	recv = pipes[0] + pipename + '.out'

	if not check_fifo(send):
		sys.stdout.write('could not find write named pipe to connect to ExaBGP')
		sys.stdout.flush()
		sys.exit(1)

	if not check_fifo(recv):
		sys.stdout.write('could not find read named pipe to connect to ExaBGP')
		sys.stdout.flush()
		sys.exit(1)

	def write_timeout(signum, frame):
		sys.stderr.write('could not send command to ExaBGP')
		sys.stderr.flush()
		sys.exit(1)

	signal.signal(signal.SIGALRM, write_timeout)
	signal.alarm(2)

	try:
		writer = os.open(send, os.O_WRONLY | os.O_EXCL)
		os.write(writer,command.encode('utf-8') + b'\n')
		os.close(writer)
	except OSError as exc:
		if exc.errno == errno.ENXIO:
			sys.stdout.write('ExaBGP is not running / using the configured named pipe')
			sys.stdout.flush()
			sys.exit(1)
		sys.stdout.write('could not communicate with ExaBGP')
		sys.stdout.flush()
		sys.exit(1)
	except IOError as exc:
		sys.stdout.write('could not communicate with ExaBGP')
		sys.stdout.flush()
		sys.exit(1)

	signal.alarm(0)

	if command == 'reset':
		sys.exit(0)

	def read_timeout(signum, frame):
		sys.stderr.write('could not read answer to ExaBGP')
		sys.stderr.flush()
		sys.exit(1)

	signal.signal(signal.SIGALRM, read_timeout)

	try:
		signal.alarm(5)
		reader = os.open(recv, os.O_RDONLY | os.O_EXCL)
		signal.alarm(0)

		buf = b''
		done = False
		while not done:
			try:
				raw = os.read(reader,4096)
				buf += raw
				while b'\n' in buf:
					line,buf = buf.split(b'\n',1)
					if line == b'done':
						done = True
						break
					if line == b'shutdown':
						sys.stderr.write('ExaBGP is shutting down, command aborted\n')
						sys.stderr.flush()
						done = True
						break
					if line == b'error':
						done = True
						sys.stderr.write('ExaBGP returns an error\n')
						sys.stderr.flush()
						break
					sys.stdout.write('%s\n' % line.decode())
					sys.stdout.flush()

				select.select([reader],[],[],0.01)
			except OSError as exc:
				if exc.errno in error.block:
					break
			except IOError as exc:
				if exc.errno in error.block:
					break
		os.close(reader)

		sys.exit(0)
	except IOError:
		sys.stdout.write('could not read answer from ExaBGP')
		sys.stdout.flush()
	except OSError:
		sys.stdout.write('could not read answer from ExaBGP')
		sys.stdout.flush()
Пример #33
0
    pass

try:
    import termios
except ImportError:
    termios = None

USE_INOTIFY = False
try:
    # Test whether inotify is enabled and likely to work
    import pyinotify

    fd = pyinotify.INotifyWrapper.create().inotify_init()
    if fd >= 0:
        USE_INOTIFY = True
        os.close(fd)
except ImportError:
    pass

RUN_RELOADER = True

FILE_MODIFIED = 1
I18N_MODIFIED = 2

_mtimes = {}
_win = (sys.platform == "win32")

_exception = None
_error_files = []
_cached_modules = set()
_cached_filenames = []
Пример #34
0
 def tearDown(self):
     os.close(self.db_fd)
     os.remove(self.db_path)
Пример #35
0
    def _daemon(cls, jupyter_notebook_filename):
        from clearml import Task

        # load jupyter notebook package
        # noinspection PyBroadException
        try:
            # noinspection PyPackageRequirements
            from nbconvert.exporters.script import ScriptExporter
            _script_exporter = ScriptExporter()
        except Exception as ex:
            _logger.warning('Could not read Jupyter Notebook: {}'.format(ex))
            return
        # load pigar
        # noinspection PyBroadException
        try:
            from ....utilities.pigar.reqs import get_installed_pkgs_detail, file_import_modules
            from ....utilities.pigar.modules import ReqsModules
            from ....utilities.pigar.log import logger
            logger.setLevel(logging.WARNING)
        except Exception:
            file_import_modules = None
        # load IPython
        # noinspection PyBroadException
        try:
            # noinspection PyPackageRequirements
            from IPython import get_ipython
        except Exception:
            # should not happen
            get_ipython = None

        # setup local notebook files
        if jupyter_notebook_filename:
            notebook = Path(jupyter_notebook_filename)
            local_jupyter_filename = jupyter_notebook_filename
        else:
            notebook = None
            fd, local_jupyter_filename = mkstemp(suffix='.ipynb')
            os.close(fd)
        last_update_ts = None
        counter = 0
        prev_script_hash = None

        # noinspection PyBroadException
        try:
            from ....version import __version__
            our_module = cls.__module__.split('.')[0], __version__
        except Exception:
            our_module = None

        # noinspection PyBroadException
        try:
            import re
            replace_ipython_pattern = re.compile(r'\n([ \t]*)get_ipython\(\)')
        except Exception:
            replace_ipython_pattern = None

        # main observer loop, check if we need to exit
        while not cls._exit_event.wait(timeout=0.):
            # wait for timeout or sync event
            cls._sync_event.wait(cls._sample_frequency if counter else cls.
                                 _first_sample_frequency)

            cls._sync_event.clear()
            counter += 1
            # noinspection PyBroadException
            try:
                # if there is no task connected, do nothing
                task = Task.current_task()
                if not task:
                    continue

                script_code = None
                fmodules = None
                current_cell = None
                # if we have a local file:
                if notebook:
                    if not notebook.exists():
                        continue
                    # check if notebook changed
                    if last_update_ts is not None and notebook.stat(
                    ).st_mtime - last_update_ts <= 0:
                        continue
                    last_update_ts = notebook.stat().st_mtime
                else:
                    # serialize notebook to a temp file
                    if cls._jupyter_history_logger:
                        script_code, current_cell = cls._jupyter_history_logger.history_to_str(
                        )
                    else:
                        # noinspection PyBroadException
                        try:
                            # noinspection PyBroadException
                            try:
                                os.unlink(local_jupyter_filename)
                            except Exception:
                                pass
                            get_ipython().run_line_magic(
                                'history',
                                '-t -f {}'.format(local_jupyter_filename))
                            with open(local_jupyter_filename, 'r') as f:
                                script_code = f.read()
                            # load the modules
                            from ....utilities.pigar.modules import ImportedModules
                            fmodules = ImportedModules()
                            for nm in set(
                                [str(m).split('.')[0] for m in sys.modules]):
                                fmodules.add(nm, 'notebook', 0)
                        except Exception:
                            continue

                # get notebook python script
                if script_code is None and local_jupyter_filename:
                    script_code, _ = _script_exporter.from_filename(
                        local_jupyter_filename)
                    if cls._store_notebook_artifact:
                        # also upload the jupyter notebook as artifact
                        task.upload_artifact(
                            name='notebook',
                            artifact_object=Path(local_jupyter_filename),
                            preview='See `notebook preview` artifact',
                            metadata={
                                'UPDATE':
                                datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
                            },
                            wait_on_upload=True,
                        )
                        # noinspection PyBroadException
                        try:
                            from nbconvert.exporters import HTMLExporter  # noqa
                            html, _ = HTMLExporter().from_filename(
                                filename=local_jupyter_filename)
                            local_html = Path(
                                gettempdir()) / 'notebook_{}.html'.format(
                                    task.id)
                            with open(local_html.as_posix(), 'wt') as f:
                                f.write(html)
                            task.upload_artifact(
                                name='notebook preview',
                                artifact_object=local_html,
                                preview='Click `FILE PATH` link',
                                metadata={
                                    'UPDATE':
                                    datetime.utcnow().strftime(
                                        '%Y-%m-%d %H:%M:%S')
                                },
                                delete_after_upload=True,
                                wait_on_upload=True,
                            )
                        except Exception:
                            pass

                current_script_hash = hash(script_code + (current_cell or ''))
                if prev_script_hash and prev_script_hash == current_script_hash:
                    continue

                # remove ipython direct access from the script code
                # we will not be able to run them anyhow
                if replace_ipython_pattern:
                    script_code = replace_ipython_pattern.sub(
                        r'\n# \g<1>get_ipython()', script_code)

                requirements_txt = ''
                conda_requirements = ''
                # parse jupyter python script and prepare pip requirements (pigar)
                # if backend supports requirements
                if file_import_modules and Session.check_min_api_version(
                        '2.2'):
                    if fmodules is None:
                        fmodules, _ = file_import_modules(
                            notebook.parts[-1] if notebook else 'notebook',
                            script_code)
                        if current_cell:
                            cell_fmodules, _ = file_import_modules(
                                notebook.parts[-1] if notebook else 'notebook',
                                current_cell)
                            # noinspection PyBroadException
                            try:
                                fmodules |= cell_fmodules
                            except Exception:
                                pass
                    # add current cell to the script
                    if current_cell:
                        script_code += '\n' + current_cell
                    fmodules = ScriptRequirements.add_trains_used_packages(
                        fmodules)
                    # noinspection PyUnboundLocalVariable
                    installed_pkgs = get_installed_pkgs_detail()
                    # make sure we are in installed packages
                    if our_module and (our_module[0] not in installed_pkgs):
                        installed_pkgs[our_module[0]] = our_module

                    # noinspection PyUnboundLocalVariable
                    reqs = ReqsModules()
                    for name in fmodules:
                        if name in installed_pkgs:
                            pkg_name, version = installed_pkgs[name]
                            reqs.add(pkg_name, version, fmodules[name])
                    requirements_txt, conda_requirements = ScriptRequirements.create_requirements_txt(
                        reqs)

                # update script
                prev_script_hash = current_script_hash
                data_script = task.data.script
                data_script.diff = script_code
                data_script.requirements = {
                    'pip': requirements_txt,
                    'conda': conda_requirements
                }
                # noinspection PyProtectedMember
                task._update_script(script=data_script)
                # update requirements
                # noinspection PyProtectedMember
                task._update_requirements(requirements=requirements_txt)
            except Exception:
                pass
def get_sequencepage(request):
    if request.method == 'POST':
        # getting values from post
        requirement1 = request.POST.get('requirement')
        requirement1=requirement1.replace("'","")
        requirement = nltk.sent_tokenize(requirement1)
        usecases = Sequence.ExtractMultiMessages(requirement)
        print("check yyyyyyyyyyyyyyyyyyyyyyy")
        print(usecases)
        # UsecaseName=Usecase.Name
        # adding the values in a context variable
        context = {
            'requirement': usecases,
        }
        connectionObject = sqlite3.connect(":memory:")
        cursorObject = connectionObject.cursor()

        createTableActors = "CREATE TABLE Sequence_Components(sender varchar(32),reciever varchar(32),Message varchar(32),loop INTEGER,MessageType varchar(32),conditions varchar(32),conditionMsg varchar(32),elsemsg varchar(32),sender_else varchar(32),reciver_else varchar(32),conditionBit INTEGER,If_loop INTEGER,else_loop INTEGER, SeqId  INTEGER PRIMARY KEY)"
        cursorObject.execute(createTableActors)

        NoOfNulls=0
        NullList={}
        NullKeys=[]
        NoOfNulls_else = 0
        NullList_else = {}
        NullKeys_else = []
        for index,values in enumerate(usecases):
            #for the not if conditions or if have if conditions but not else part
            #for the null recivers
          if(values[10]!=1 or (values[7]=='' and values[10]==1)):
            if(values[1]==''):
                NoOfNulls=NoOfNulls+1
                NullKeys.append(values[13])
               # NullList[values[13]] =values[2]
                NullList[values[13]] = values[2]

                cursorObject.execute(
                    'INSERT INTO  Sequence_Components(sender,reciever,Message,loop,MessageType,conditions,conditionMsg,elsemsg,sender_else,reciver_else,conditionBit,If_loop,else_loop,SeqId) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)',
                    [
                        values[0], '', values[2], values[3], values[4], values[5], values[6], values[7], values[8],
                        values[9], values[10], values[11], values[12], values[13]])
            # print(key)
            elif((values[7]!='' and values[10]==1)):
                if (values[1] == ''):
                    NoOfNulls = NoOfNulls + 1
                    NullKeys.append(values[13])
                    # NullList[values[13]] =values[2]
                    NullList[values[13]] = values[2]

                    cursorObject.execute(
                        'INSERT INTO  Sequence_Components(sender,reciever,Message,loop,MessageType,conditions,conditionMsg,elsemsg,sender_else,reciver_else,conditionBit,If_loop,else_loop,SeqId) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)',
                        [
                            values[0], '', values[2], values[3], values[4], values[5], values[6], values[7], values[8],
                            values[9], values[10], values[11], values[12], values[13]])
                if(values[9]==''):
                    NoOfNulls_else = NoOfNulls_else + 1
                    NullKeys_else.append(values[13])
                    # NullList[values[13]] =values[2]
                    NullList_else[values[13]] = values[2]

                    cursorObject.execute(
                        'INSERT INTO  Sequence_Components(sender,reciever,Message,loop,MessageType,conditions,conditionMsg,elsemsg,sender_else,reciver_else,conditionBit,If_loop,else_loop,SeqId) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)',
                        [
                            values[0], values[1], values[2], values[3], values[4], values[5], values[6], values[7], values[8],
                            '', values[10], values[11], values[12], values[13]])
            else:
             cursorObject.execute('INSERT INTO  Sequence_Components(sender,reciever,Message,loop,MessageType,conditions,conditionMsg,elsemsg,sender_else,reciver_else,conditionBit,If_loop,else_loop,SeqId) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)', [values[0], values[1],values[2],values[3],values[4],values[5],values[6],values[7],values[8],values[9],values[10],values[11],values[12],values[13]])
            # cursorObject.executemany('INSERT INTO  Relations(relation) VALUES(?)', relations[key])
            # cursorObject.executemany('INSERT INTO  Relations VALUES(?,?)', relations)
            # print(relations)

          #if has else part
          elif ((values[7] != '' and values[10] == 1)):
            if (values[1] == '' and values[9] != ''):
                NoOfNulls = NoOfNulls + 1
                NullKeys.append(values[13])
                # NullList[values[13]] =values[2]
                NullList[values[13]] = values[2]

                cursorObject.execute(
                    'INSERT INTO  Sequence_Components(sender,reciever,Message,loop,MessageType,conditions,conditionMsg,elsemsg,sender_else,reciver_else,conditionBit,If_loop,else_loop,SeqId) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)',
                    [
                        values[0], '', values[2], values[3], values[4], values[5], values[6], values[7], values[8],
                        values[9], values[10], values[11], values[12], values[13]])
            elif (values[9] == '' and values[1]!=''):
                NoOfNulls_else = NoOfNulls_else + 1
                NullKeys_else.append(values[13])
                # NullList[values[13]] =values[2]
                NullList_else[values[13]] = values[7]

                cursorObject.execute(
                    'INSERT INTO  Sequence_Components(sender,reciever,Message,loop,MessageType,conditions,conditionMsg,elsemsg,sender_else,reciver_else,conditionBit,If_loop,else_loop,SeqId) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)',
                    [
                        values[0], values[1], values[2], values[3], values[4], values[5], values[6], values[7],
                        values[8],'', values[10], values[11], values[12], values[13]])
            elif(values[9] == '' and values[1]==''):
                NoOfNulls = NoOfNulls + 1
                NullKeys.append(values[13])
                # NullList[values[13]] =values[2]
                NullList[values[13]] = values[2]
                NoOfNulls_else = NoOfNulls_else + 1
                NullKeys_else.append(values[13])
                # NullList[values[13]] =values[2]
                NullList_else[values[13]] = values[7]
                cursorObject.execute(
                 'INSERT INTO  Sequence_Components(sender,reciever,Message,loop,MessageType,conditions,conditionMsg,elsemsg,sender_else,reciver_else,conditionBit,If_loop,else_loop,SeqId) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)',
                 [
                    values[0], '', values[2], values[3], values[4], values[5], values[6], values[7],
                    values[8], '', values[10], values[11], values[12], values[13]])

            else:
                cursorObject.execute(
                    'INSERT INTO  Sequence_Components(sender,reciever,Message,loop,MessageType,conditions,conditionMsg,elsemsg,sender_else,reciver_else,conditionBit,If_loop,else_loop,SeqId) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)',
                    [
                        values[0], values[1], values[2], values[3], values[4], values[5], values[6], values[7],
                        values[8], values[9], values[10], values[11], values[12], values[13]])
          else:
              cursorObject.execute(
                  'INSERT INTO  Sequence_Components(sender,reciever,Message,loop,MessageType,conditions,conditionMsg,elsemsg,sender_else,reciver_else,conditionBit,If_loop,else_loop,SeqId) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)',
                  [values[0], values[1], values[2], values[3], values[4], values[5], values[6], values[7], values[8],
                   values[9], values[10], values[11],values[12],values[13]])


        queryTable_usecases = "SELECT * from Sequence_Components"
        queryResults_Relations_usecases = cursorObject.execute(queryTable_usecases)
        usecase_list = cursorObject.fetchall()


        DropTable = "Drop Table Sequence_Components"
        cursorObject.execute(DropTable)
        print("drop the usecases table")
        print(usecase_list)

        # ------------------------------------------------------------------
        if request.method == 'POST':
            # Open a file
            if os.path.exists("uml1app/static/images/draft.png"):
                try:
                    os.remove("uml1app/static/images/draft.png")
                    # os.remove("draft.png")
                    print("yes")
                except OSError:
                    print("no")
                    pass
            # Open a file
            if os.path.exists("draft.txt"):
                try:
                    os.remove("draft.txt")
                except OSError:
                    pass

            # Open a file
            if os.path.exists("draft.png"):
                try:
                    os.remove("draft.png")
                except OSError:
                    pass

            fd = os.open("draft.txt", os.O_RDWR | os.O_CREAT)

            # Write one string

            os.write(fd, b"@startuml\n")
            #os.write(fd, b"Alice -> Bob: Authentication Request\n")

            for component in usecase_list:
                #os.write(fd, (b"Alice -> "+component[0]+b": Authentication Request\n").encode('ascii'))
                #if conditions are null
               if(component[10]==0 or component[10]=='None'):
                #verify this as a self message
                # if(component[1]!=''):
                    if(component[3]==1 or component[11]==1):
                        os.write(fd, ("loop\n"+str(component[0]).lower() + "->" + (str(component[1]).lower()).replace(" ","") + ":" +component[2] + "\nend\n").encode('ascii'))
                    else:
                     os.write(fd, (str(component[0]).lower() +"->" + (str(component[1]).lower()).replace(" ","") + ":"+component[2] +"\n").encode('ascii'))

               #  else:
               #      if (component[3] != 1 or component[11]!=1):
               #          os.write(fd, (str(component[0]).lower() + "->" + (str(component[0]).lower()).replace(" ","") + ":" + component[2] + "\n").encode('ascii'))
               #      else:
               #          os.write(fd, ("loop\n"+str(component[0]).lower() + "->" + (str(component[0]).lower()).replace(" ", "") + ":" +component[2] + "\nend\n").encode('ascii'))
               # #if the sentence has conditios

               else:
                  if(component[7]=='' or component[7]=='None'):
                   # if (component[1] != ''):
                       if (component[3] == 1 or component[11]==1):
                           os.write(fd, ("opt "+component[5]+"\nloop\n" + str(component[0]).lower() + "->" + (
                               str(component[1]).lower()).replace(" ", "") + ":" + component[2] + "\nend\nend\n").encode(
                               'ascii'))
                       else:
                           os.write(fd, ("opt "+component[5]+"\n"+str(component[0]).lower() + "->" + (str(component[1]).lower()).replace(" ",
                                                                                                                "") + ":" +
                                         component[2] + "\nend\n").encode('ascii'))

                   # else:
                   #     if (component[3] != 1 or component[11]!=1):
                   #         os.write(fd, ("opt "+component[5]+"\n"+str(component[0]).lower() + "->" + (str(component[0]).lower()).replace(" ",
                   #                                                                                              "") + ":" +
                   #                       component[6] + "\nend\n").encode('ascii'))
                   #     else:
                   #         os.write(fd, ("alt"+component[5]+"\nloop\n" + str(component[0]).lower() + "->" + (
                   #             str(component[0]).lower()).replace(" ", "") + ":" + component[6] + "\nend\n").encode(
                   #             'ascii'))
                  #for the else messages
                  else:
                         # if (component[1] != ''):
                              if (component[3] == 1 or component[11]==1):
                                 if(component[12]==1):
                                  os.write(fd,
                                           ("alt " + component[5] + "\nloop\n" + str(component[0]).lower() + "->" + (
                                               str(component[1]).lower()).replace(" ", "") + ":" + component[
                                                6] + "\nend\nelse else\n"+str(component[8].lower()).replace(" ", "") +"->"+str(component[9].lower()).replace(" ", "") +":"+component[7]+"\nend\n").encode(
                                               'ascii'))
                                 else:
                                    if (component[12] == 1):
                                     os.write(fd,
                                              ("alt " + component[5] + "\nloop\n" + str(component[0]).lower() + "->" + (
                                                  str(component[1]).lower()).replace(" ", "") + ":" + component[
                                                   6] + "\nend\nelse else\nloop\n" + str(component[8].lower()).replace(" ",
                                                                                                                 "") + "->" + str(
                                                  component[9].lower()).replace(" ", "") + ":" + component[
                                                   7] + "\nend\nend\n").encode(
                                                  'ascii'))
                                    else:
                                        os.write(fd,
                                                 ("alt " + component[5] + "\nloop\n" + str(
                                                     component[0]).lower() + "->" + (
                                                      str(component[1]).lower()).replace(" ", "") + ":" + component[
                                                      6] + "\nend\nelse else\n" + str(component[8].lower()).replace(" ",
                                                                                                                    "") + "->" + str(
                                                     component[9].lower()).replace(" ", "") + ":" + component[
                                                      7] + "\nend\n").encode(
                                                     'ascii'))
                              else:
                                  if (component[12] == 1):
                                   os.write(fd, ("alt " + component[5] + "\n" + str(component[0]).lower() + "->" + (
                                      str(component[1]).lower()).replace(" ",
                                                                         "") + ":" +
                                                component[6] + "\nelse else\nloop\n"+str(component[8].lower()).replace(" ", "") +"->"+str(component[9].lower()).replace(" ", "") +":"+component[7]+ "\nend\nend\n").encode('ascii'))
                                  else:
                                      os.write(fd, ("alt " + component[5] + "\n" + str(component[0]).lower() + "->" + (
                                          str(component[1]).lower()).replace(" ",
                                                                             "") + ":" +
                                                    component[6] + "\nelse else\n" + str(component[8].lower()).replace(
                                                  " ", "") + "->" + str(component[9].lower()).replace(" ", "") + ":" +
                                                    component[7] + "\nend\n").encode('ascii'))

                          # else:
                          #     if (component[3] !=1 or component[11]!=1):
                          #         if(component[12] == 1):
                          #          os.write(fd, ("alt " + component[5] + "\n" + str(component[0]).lower() + "->" + (
                          #             str(component[0]).lower()).replace(" ",
                          #                                                "") + ":" +
                          #                       component[6] + "\nelse else\nloop\n"+str(component[8].lower()).replace(" ", "") +"->"+str(component[9].lower()).replace(" ", "") +":"+component[7]+ "\nend\nend\n").encode('ascii'))
                          #         else:
                          #             os.write(fd, ("alt " + component[5] + "\n" + str(component[0]).lower() + "->" + (
                          #                 str(component[0]).lower()).replace(" ",
                          #                                                    "") + ":" +
                          #                           component[6] + "\nelse else\n" + str(component[8].lower()).replace(
                          #                         " ", "") + "->" + str(component[9].lower()).replace(" ", "") + ":" +
                          #                           component[7] + "\nend\n").encode('ascii'))
                          #
                          #     else:
                          #       if (component[12] == 1):
                          #         os.write(fd, ("alt" + component[5] + "\nloop\n" + str(component[0]).lower() + "->" + (
                          #             str(component[0]).lower()).replace(" ", "") + ":" + component[
                          #                           6] +"\nend\nelse else\nloop\n"+str(component[8].lower()).replace(" ", "") +"->"+str(component[9].lower()).replace(" ", "") +":"+component[7]+"\nend\nend\n").encode(
                          #             'ascii'))
                          #       else:
                          #           os.write(fd,
                          #                    ("alt" + component[5] + "\nloop\n" + str(component[0]).lower() + "->" + (
                          #                        str(component[0]).lower()).replace(" ", "") + ":" + component[
                          #                         6] + "\nend\nelse else\n" + str(component[8].lower()).replace(" ",
                          #                                                                                       "") + "->" + str(
                          #                        component[9].lower()).replace(" ", "") + ":" + component[
                          #                         7] + "\nend\n").encode(
                          #                        'ascii'))
        os.write(fd, b"@enduml")

        # Close opened file
        os.close(fd)
        # time.sleep(5)

        # # for ubuntu-----------------------------------------
        # os.system("python -m plantuml draft.txt")
        # print("file is  created successfully!!")
        # os.system("cp draft.png uml1app/static/images")
        # # -----------------------------------------------------

        # for windows-----------------------------------------
        subprocess.call("python -m plantuml draft.txt")
        print("file is  created successfully!!")
        os.system("copy draft.png uml1app\static\images")
        # -----------------------------------------------------

        obj = Seq_Items.objects.values()
        list1 = list(obj)
        loopMessages=[]
        conditionMessages = []

        for items in list1:
          print(items['sender'])
          if (items['loop'] == '1'):
              loopMessages.append(items)
          if (items['conditionBit'] == '1'):
            conditionMessages.append(items)
            print("yyy")
        print("mmmmm")
        print(NullList)

        # for ubuntu-----------------------------------------
        os.system("python -m plantuml draft.txt")
        print("file is  created successfully!!")
        os.system("cp draft.png uml1app/static/images")
        # -----------------------------------------------------

        # # for windows-----------------------------------------
        subprocess.call("python -m plantuml draft.txt")
        print("file is  created successfully!!")
        os.system("copy draft.png uml1app\static\images")
        # # -----------------------------------------------------

        # ------------------------------------------------------------------

        # ------------------------------------------------------------------
    if request.method == 'POST':

        # Open a file
        if os.path.exists("uml1app/static/images/antsModel.docx"):
            try:
                os.remove("uml1app/static/images/antsModel.docx")
                # os.remove("draft.png")
                print("yes")
            except OSError:
                print("no")
                pass
        # Open a file
        if os.path.exists("antsModel.docx"):
            try:
                os.remove("antsModel.docx")
            except OSError:
                pass

        # Open a file
        if os.path.exists("antsModel.docx"):
            try:
                os.remove("antsModel.docx")
            except OSError:
                pass

        fd = os.open("antsModel.docx", os.O_RDWR | os.O_CREAT)

        # Write one string

        os.write(fd, b"@antsuml\n")
        os.write(fd, b"by Ants UML Diagram designers\n")

        os.write(fd, b"Actors and usecases\n")
        os.write(fd, b"\n")

        for values in list1:
          if(values['conditionBit']!='1' and values['loop']!='1'):
            os.write(fd, ("" + values['sender'] + "--->("+values['Message']+"--->:"+values['MessageType']+")---> "+values['reciever']).encode('ascii'))
          elif(values['loop']=='1' and values['conditionBit']!='1' ):
            os.write(fd, ("[loop] : " + values['sender'] + "--->(" + values['Message'] + "--->:" + values[
                  'MessageType'] + ")---> " + values['reciever']).encode('ascii'))
          elif (values['loop'] != '1' and values['conditionBit'] == '1'):
            if(values['else_loop']!='1'):
              os.write(fd, b"if\n")
              os.write(fd, ("[opt] : " + values['sender'] + "--->(" + values['Message'] + "--->:" + values[
              'MessageType'] + ")---> " + values['reciever']).encode('ascii'))
            else:
                os.write(fd, b"if\n")
                os.write(fd, ("[alt] : " + values['sender'] + "--->(" + values['Message'] + "--->:" + values[
                    'MessageType'] + ")---> " + values['reciever']).encode('ascii'))
                os.write(fd, b"\n")
                os.write(fd, b"else\n")
                os.write(fd, ("[alt] : " + values['sender_else'] + "--->(" + values['elsemsg'] + "--->:" + values[
                    'MessageType'] + ")---> " + values['reciver_else']).encode('ascii'))
          elif (values['loop'] == '1' and values['conditionBit'] == '1'):
            if (values['else_loop'] != '1'):
                os.write(fd, b"if\n")
                os.write(fd, ("[opt] : [loop]:" + values['sender'] + "--->(" + values['Message'] + "--->:" + values[
                    'MessageType'] + ")---> " + values['reciever']).encode('ascii'))
            else:
                os.write(fd, b"if\n")
                os.write(fd, ("[alt] : [loop]:" + values['sender'] + "--->(" + values['Message'] + "--->:" + values[
                    'MessageType'] + ")---> " + values['reciever']).encode('ascii'))
                os.write(fd, b"\n")
                os.write(fd, b"else\n")
                os.write(fd, ("[alt] :[loop]: " + values['sender_else'] + "--->(" + values['elsemsg'] + "--->:" + values[
                    'MessageType'] + ")---> " + values['reciver_else']).encode('ascii'))
        # Close opened file
        os.close(fd)

        # time.sleep(5)

        # # # for ubuntu-----------------------------------------
        # os.system("cp antsModel.docx uml1app/static/images")
        # # -----------------------------------------------------

        # # for windows-----------------------------------------
        os.system("copy antsModel.docx uml1app\static\images")


        print(list1)
        print("get these data")
        print(usecase_list)
        print(NoOfNulls)
        print(NullList)
        print(NullKeys)
        print(list1)
        context1 = {
            'participants': usecase_list,
            'NoOfNulls'   :NoOfNulls,
            'NullList'    :NullList,
            'NullKeys'    :NullKeys,
            'seq_items'   :list1,
            'loops'       :loopMessages,
            'conditions'  :conditionMessages,
            'NoOfNulls_else': NoOfNulls_else,
            'NullList_else': NullList_else,
            'NullKeys_else': NullKeys_else,
        }
        print(loopMessages)
        print("context 1 print")
        print(context1)
        template = loader.get_template("uml1app/sequence.html")
        return HttpResponse(template.render(context1, request))
 def test_access(self):
     f = os.open(test_support.TESTFN, os.O_CREAT | os.O_RDWR)
     os.close(f)
     self.assertTrue(os.access(test_support.TESTFN, os.W_OK))
Пример #38
0
 def __init__(self, dir=None, suffix=''):
     fd, name = tempfile.mkstemp(dir=dir, suffix=suffix)
     os.close(fd)
     self.name = os.path.normpath(name)
Пример #39
0
 def mktemp(self, prefix=''):
     f, name = tempfile.mkstemp(prefix=prefix, dir=self.tempdir)
     os.close(f)
     return name
Пример #40
0
    def test_ext_add_multi(self):
        """Test behaviour when 'add' options are used multiple times."""
        fd, cfgfile = tempfile.mkstemp()
        os.close(fd)

        f = open(cfgfile, 'w')
        f.write('[ext]\nadd-default=two')
        f.close()
        args = ['--configfiles=%s' % cfgfile]
        topt = TestOption1(go_args=args, envvar_prefix='TEST')
        self.assertEqual(topt.options.ext_add_default, 'nowtwo')

        # environment overrides config file
        os.environ['TEST_EXT_ADD_DEFAULT'] = 'three'
        topt = TestOption1(go_args=args, envvar_prefix='TEST')
        self.assertEqual(topt.options.ext_add_default, 'nowthree')

        # command line overrides environment + config file, last value wins
        args.extend(['--ext-add-default=four', '--ext-add-default=five'])
        topt = TestOption1(go_args=args, envvar_prefix='TEST')
        self.assertEqual(topt.options.ext_add_default, 'nowfive')
        del os.environ['TEST_EXT_ADD_DEFAULT']

        f = open(cfgfile, 'w')
        f.write('[ext]\nadd-list-default=two,three')
        f.close()
        args = ['--configfiles=%s' % cfgfile]
        topt = TestOption1(go_args=args, envvar_prefix='TEST')
        self.assertEqual(topt.options.ext_add_list_default, ['now', 'two', 'three'])

        os.environ['TEST_EXT_ADD_LIST_DEFAULT'] = 'four,five'
        topt = TestOption1(go_args=args, envvar_prefix='TEST')
        self.assertEqual(topt.options.ext_add_list_default, ['now', 'four', 'five'])

        args.extend([
            '--ext-add-list-default=six',
            '--ext-add-list-default=seven,eight',
        ])
        topt = TestOption1(go_args=args, envvar_prefix='TEST')
        self.assertEqual(topt.options.ext_add_list_default, ['now', 'seven', 'eight'])
        del os.environ['TEST_EXT_ADD_LIST_DEFAULT']

        f = open(cfgfile, 'w')
        f.write('[ext]\nadd-list-flex=two,,three')
        f.close()
        args = ['--configfiles=%s' % cfgfile]
        topt = TestOption1(go_args=args, envvar_prefix='TEST')
        self.assertEqual(topt.options.ext_add_list_flex, ['two', 'x', 'y', 'three'])

        os.environ['TEST_EXT_ADD_LIST_FLEX'] = 'four,,five'
        topt = TestOption1(go_args=args, envvar_prefix='TEST')
        self.assertEqual(topt.options.ext_add_list_flex, ['four', 'x', 'y', 'five'])

        args.extend([
            '--ext-add-list-flex=six,',
            '--ext-add-list-flex=seven,,eight',
            '--ext-add-list-flex=,last',
        ])
        topt = TestOption1(go_args=args, envvar_prefix='TEST')
        self.assertEqual(topt.options.ext_add_list_flex, ['x', 'y', 'last'])
        del os.environ['TEST_EXT_ADD_LIST_FLEX']

        f = open(cfgfile, 'w')
        f.write('[ext]\nadd-pathlist-flex=two::three')
        f.close()
        args = ['--configfiles=%s' % cfgfile]
        topt = TestOption1(go_args=args, envvar_prefix='TEST')
        self.assertEqual(topt.options.ext_add_pathlist_flex, ['two', 'p2', 'p3', 'three'])

        os.environ['TEST_EXT_ADD_PATHLIST_FLEX'] = 'four::five'
        args = ['--configfiles=%s' % cfgfile]
        topt = TestOption1(go_args=args, envvar_prefix='TEST')
        self.assertEqual(topt.options.ext_add_pathlist_flex, ['four', 'p2', 'p3', 'five'])

        args.extend([
            '--ext-add-pathlist-flex=six:',
            '--ext-add-pathlist-flex=:last',
            '--ext-add-pathlist-flex=seven::eight',
        ])
        topt = TestOption1(go_args=args, envvar_prefix='TEST')
        self.assertEqual(topt.options.ext_add_pathlist_flex, ['seven', 'p2', 'p3', 'eight'])
        del os.environ['TEST_EXT_ADD_PATHLIST_FLEX']
Пример #41
0
    def convert_quickbook_pages(self, refresh = False):
        try:
            subprocess.check_call(['quickbook', '--version'])
        except:
            print("Problem running quickbook, will not convert quickbook articles.")
            return
        
        bb_parser = boost_site.boostbook_parser.BoostBookParser()
    
        for page in self.pages:
            page_data = self.pages[page]
            if page_data.page_state or refresh:
                xml_file = tempfile.mkstemp('', '.xml')
                os.close(xml_file[0])
                xml_filename = xml_file[1]
                try:
                    print("Converting " + page + ":")
                    subprocess.check_call(['quickbook', '--output-file', xml_filename, '-I', 'feed', page])
                    page_data.load(bb_parser.parse(xml_filename), refresh)
                finally:
                    os.unlink(xml_filename)

                template_vars = {
                    'history_style' : '',
                    'full_title_xml' : page_data.full_title_xml,
                    'title_xml' : page_data.title_xml,
                    'note_xml' : '',
                    'web_date' : page_data.web_date(),
                    'documentation_para' : '',
                    'download_table' : page_data.download_table(),
                    'description_xml' : page_data.description_xml
                }

                if page_data.type == 'release' and 'released' not in page_data.flags and 'beta' not in page_data.flags:
                    template_vars['note_xml'] = """
                        <div class="section-note"><p>Note: This release is
                        still under development. Please don't use this page as
                        a source of information, it's here for development
                        purposes only. Everything is subject to
                        change.</p></div>"""

                if page_data.documentation:
                    template_vars['documentation_para'] = '<p><a href="' + boost_site.util.htmlencode(page_data.documentation) + '">Documentation</a>'

                if(page_data.location.startswith('users/history/')):
                    template_vars['history_style'] = """
  <style type="text/css">
/*<![CDATA[*/
  #content .news-description ul {
    list-style: none;
  }
  #content .news-description ul ul {
    list-style: circle;
  }
  /*]]>*/
  </style>
"""

                boost_site.util.write_template(page_data.location,
                    'site-tools/templates/entry-template.html',
                    template_vars)
Пример #42
0
 def __del__(self):
     if self._fd:
         os.close(self._fd)
Пример #43
0
 def test_signature_verification(self):
     "Test that signing and verification works"
     logger.debug("test_signature_verification begins")
     key = self.generate_key("Andrew", "Able", "alpha.com")
     self.gpg.encoding = 'latin-1'
     if gnupg._py3k:
         data = 'Hello, André!'
     else:
         data = unicode('Hello, André', self.gpg.encoding)
     data = data.encode(self.gpg.encoding)
     sig = self.gpg.sign(data, keyid=key.fingerprint, passphrase='bbrown')
     self.assertFalse(sig, "Bad passphrase should fail")
     sig = self.gpg.sign(data, keyid=key.fingerprint, passphrase='aable')
     self.assertTrue(sig, "Good passphrase should succeed")
     self.assertTrue(sig.hash_algo)
     verified = self.gpg.verify(sig.data)
     if key.fingerprint != verified.fingerprint:  # pragma: no cover
         logger.debug("key: %r", key.fingerprint)
         logger.debug("ver: %r", verified.fingerprint)
     self.assertEqual(key.fingerprint, verified.fingerprint,
                      "Fingerprints must match")
     self.assertEqual(verified.trust_level, verified.TRUST_ULTIMATE)
     self.assertEqual(verified.trust_text, 'TRUST_ULTIMATE')
     data_file = open(self.test_fn, 'rb')
     sig = self.gpg.sign_file(data_file, keyid=key.fingerprint,
                              passphrase='aable')
     data_file.close()
     self.assertTrue(sig, "File signing should succeed")
     self.assertTrue(sig.hash_algo)
     try:
         file = gnupg._make_binary_stream(sig.data, self.gpg.encoding)
         verified = self.gpg.verify_file(file)
     except UnicodeDecodeError:  # pragma: no cover
         # sometimes happens in Python 2.6
         from io import BytesIO
         verified = self.gpg.verify_file(BytesIO(sig.data))
     if key.fingerprint != verified.fingerprint:  # pragma: no cover
         logger.debug("key: %r", key.fingerprint)
         logger.debug("ver: %r", verified.fingerprint)
     self.assertEqual(key.fingerprint, verified.fingerprint,
                      "Fingerprints must match")
     data_file = open(self.test_fn, 'rb')
     sig = self.gpg.sign_file(data_file, keyid=key.fingerprint,
                              passphrase='aable', detach=True)
     data_file.close()
     self.assertTrue(sig, "File signing should succeed")
     self.assertTrue(sig.hash_algo)
     try:
         file = gnupg._make_binary_stream(sig.data, self.gpg.encoding)
         verified = self.gpg.verify_file(file, self.test_fn)
     except UnicodeDecodeError:  # pragma: no cover
         # sometimes happens in Python 2.6
         from io import BytesIO
         verified = self.gpg.verify_file(BytesIO(sig.data))
     if key.fingerprint != verified.fingerprint:  # pragma: no cover
         logger.debug("key: %r", key.fingerprint)
         logger.debug("ver: %r", verified.fingerprint)
     self.assertEqual(key.fingerprint, verified.fingerprint,
                      "Fingerprints must match")
     # Test in-memory verification
     data_file = open(self.test_fn, 'rb')
     data = data_file.read()
     data_file.close()
     fd, fn = tempfile.mkstemp()
     os.write(fd, sig.data)
     os.close(fd)
     try:
         verified = self.gpg.verify_data(fn, data)
     finally:
         os.unlink(fn)
     if key.fingerprint != verified.fingerprint:  # pragma: no cover
         logger.debug("key: %r", key.fingerprint)
         logger.debug("ver: %r", verified.fingerprint)
     self.assertEqual(key.fingerprint, verified.fingerprint,
                      "Fingerprints must match")
     logger.debug("test_signature_verification ends")
Пример #44
0
 def close(self):
     os.close(self.watchdog_pipe[0])
     os.close(self.watchdog_pipe[1])
     os.close(self.eintr_pipe[0])
     os.close(self.eintr_pipe[1])
Пример #45
0
 def resulttempfile(self):
     fd, path = tempfile.mkstemp(prefix="pyfai_", suffix=".out")
     os.close(fd)
     os.remove(path)
     yield path
     os.remove(path)
Пример #46
0
def train(X, Y, val_X=None, val_Y=None, test_X=None, test_Y=None):
    '''
    train()
    Train a Conditional Random Field for sequence tagging.
    
    @param X.     List of sparse-matrix sequences. Each sequence is one sentence.
    @param Y.     List of sequence tags. Each sequence is the sentence's per-token tags.
    @param val_X. More X data, but a heldout dev set.
    @param val_Y. More Y data, but a heldout dev set.
    @return A tuple of encoded parameter weights and hyperparameters for predicting.
    '''

    # Sanity Check detection: features & label
    #with open('a','w') as f:
    #    for xline,yline in zip(X,Y):
    #        for x,y in zip(xline,yline):
    #            print >>f, y, '\t', x.nonzero()[1][0]
    #        print >>f

    # Format features fot crfsuite
    feats = format_features(X, Y)

    # Create a Trainer object.
    trainer = pycrfsuite.Trainer(verbose=False)
    for xseq, yseq in pycrf_instances(feats, labeled=True):
        trainer.append(xseq, yseq)

    # Train the model
    os_handle, tmp_file = tempfile.mkstemp(dir=tmp_dir, suffix="crf_temp")
    trainer.train(tmp_file)

    # Read the trained model into a string (so it can be pickled)
    model = ''
    with open(tmp_file, 'rb') as f:
        model = f.read()
    os.close(os_handle)

    # Remove the temporary file
    os.remove(tmp_file)

    ######################################################################

    # information about fitting the model
    scores = {}

    # how well does the model fir the training data?
    train_pred = predict(model, X)
    train_stats = compute_performance_stats('train', train_pred, Y)
    scores['train'] = train_stats

    if val_X:
        val_pred = predict(model, val_X)
        val_stats = compute_performance_stats('dev', val_pred, val_Y)
        scores['dev'] = val_stats

    if test_X:
        test_pred = predict(model, test_X)
        test_stats = compute_performance_stats('test', test_pred, test_Y)
        scores['test'] = test_stats

    # keep track of which external modules were used for building this model!
    scores['hyperparams'] = {}
    enabled_mods = enabled_modules()
    for module, enabled in enabled_mods.items():
        e = bool(enabled)
        scores['hyperparams'][module] = e

    return model, scores
Пример #47
0
def install_cert(domain, ssl_cert, ssl_chain, env, raw=False):
    # Write the combined cert+chain to a temporary path and validate that it is OK.
    # The certificate always goes above the chain.
    import tempfile
    fd, fn = tempfile.mkstemp('.pem')
    os.write(fd, (ssl_cert + '\n' + ssl_chain).encode("ascii"))
    os.close(fd)

    # Do validation on the certificate before installing it.
    ssl_private_key = os.path.join(
        os.path.join(env["STORAGE_ROOT"], 'ssl', 'ssl_private_key.pem'))
    cert_status, cert_status_details = check_certificate(
        domain, fn, ssl_private_key)
    if cert_status != "OK":
        if cert_status == "SELF-SIGNED":
            cert_status = "This is a self-signed certificate. I can't install that."
        os.unlink(fn)
        if cert_status_details is not None:
            cert_status += " " + cert_status_details
        return cert_status

    # Where to put it?
    # Make a unique path for the certificate.
    from cryptography.hazmat.primitives import hashes
    from binascii import hexlify
    cert = load_pem(load_cert_chain(fn)[0])
    all_domains, cn = get_certificate_domains(cert)
    path = "%s-%s-%s.pem" % (
        safe_domain_name(
            cn
        ),  # common name, which should be filename safe because it is IDNA-encoded, but in case of a malformed cert make sure it's ok to use as a filename
        cert.not_valid_after.date().isoformat().replace("-",
                                                        ""),  # expiration date
        hexlify(cert.fingerprint(
            hashes.SHA256())).decode("ascii")[0:8],  # fingerprint prefix
    )
    ssl_certificate = os.path.join(
        os.path.join(env["STORAGE_ROOT"], 'ssl', path))

    # Install the certificate.
    os.makedirs(os.path.dirname(ssl_certificate), exist_ok=True)
    shutil.move(fn, ssl_certificate)

    ret = ["OK"]

    # When updating the cert for PRIMARY_HOSTNAME, symlink it from the system
    # certificate path, which is hard-coded for various purposes, and then
    # restart postfix and dovecot.
    if domain == env['PRIMARY_HOSTNAME']:
        # Update symlink.
        system_ssl_certificate = os.path.join(
            os.path.join(env["STORAGE_ROOT"], 'ssl', 'ssl_certificate.pem'))
        os.unlink(system_ssl_certificate)
        os.symlink(ssl_certificate, system_ssl_certificate)

        # Restart postfix and dovecot so they pick up the new file.
        shell('check_call', ["/usr/sbin/service", "postfix", "restart"])
        shell('check_call', ["/usr/sbin/service", "dovecot", "restart"])
        ret.append("mail services restarted")

        # The DANE TLSA record will remain valid so long as the private key
        # hasn't changed. We don't ever change the private key automatically.
        # If the user does it, they must manually update DNS.

    # Update the web configuration so nginx picks up the new certificate file.
    from web_update import do_web_update
    ret.append(do_web_update(env))
    if raw: return ret
    return "\n".join(ret)
Пример #48
0
def workerScript(jobStore,
                 config,
                 jobName,
                 jobStoreID,
                 redirectOutputToLogFile=True):
    """
    Worker process script, runs a job. 
    
    :param str jobName: The "job name" (a user friendly name) of the job to be run
    :param str jobStoreLocator: Specifies the job store to use
    :param str jobStoreID: The job store ID of the job to be run
    
    :return int: 1 if a job failed, or 0 if all jobs succeeded
    """

    configureRootLogger()
    setLogLevel(config.logLevel)

    ##########################################
    #Create the worker killer, if requested
    ##########################################

    logFileByteReportLimit = config.maxLogFileSize

    if config.badWorker > 0 and random.random() < config.badWorker:
        # We need to kill the process we are currently in, to simulate worker
        # failure. We don't want to just send SIGKILL, because we can't tell
        # that from a legitimate OOM on our CI runner. We're going to send
        # SIGUSR1 so our terminations are distinctive, and then SIGKILL if that
        # didn't stick. We definitely don't want to do this from *within* the
        # process we are trying to kill, so we fork off. TODO: We can still
        # leave the killing code running after the main Toil flow is done, but
        # since it's now in a process instead of a thread, the main Python
        # process won't wait around for its timeout to expire. I think this is
        # better than the old thread-based way where all of Toil would wait
        # around to be killed.

        killTarget = os.getpid()
        sleepTime = config.badWorkerFailInterval * random.random()
        if os.fork() == 0:
            # We are the child
            # Let the parent run some amount of time
            time.sleep(sleepTime)
            # Kill it gently
            os.kill(killTarget, signal.SIGUSR1)
            # Wait for that to stick
            time.sleep(0.01)
            try:
                # Kill it harder. Hope the PID hasn't already been reused.
                # If we succeeded the first time, this will OSError
                os.kill(killTarget, signal.SIGKILL)
            except OSError:
                pass
            # Exit without doing any of Toil's cleanup
            os._exit(0)

        # We don't need to reap the child. Either it kills us, or we finish
        # before it does. Either way, init will have to clean it up for us.

    ##########################################
    #Load the environment for the jobGraph
    ##########################################

    #First load the environment for the jobGraph.
    with jobStore.readSharedFileStream("environment.pickle") as fileHandle:
        environment = safeUnpickleFromStream(fileHandle)
    env_blacklist = {
        "TMPDIR", "TMP", "HOSTNAME", "HOSTTYPE", "HOME", "LOGNAME", "USER",
        "DISPLAY", "JAVA_HOME"
    }
    for i in environment:
        if i == "PATH":
            # Handle path specially. Sometimes e.g. leader may not include
            # /bin, but the Toil appliance needs it.
            if i in os.environ and os.environ[i] != '':
                # Use the provided PATH and then the local system's PATH
                os.environ[i] = environment[i] + ':' + os.environ[i]
            else:
                # Use the provided PATH only
                os.environ[i] = environment[i]
        elif i not in env_blacklist:
            os.environ[i] = environment[i]
    # sys.path is used by __import__ to find modules
    if "PYTHONPATH" in environment:
        for e in environment["PYTHONPATH"].split(':'):
            if e != '':
                sys.path.append(e)

    toilWorkflowDir = Toil.getLocalWorkflowDir(config.workflowID,
                                               config.workDir)

    ##########################################
    #Setup the temporary directories.
    ##########################################

    # Dir to put all this worker's temp files in.
    localWorkerTempDir = tempfile.mkdtemp(dir=toilWorkflowDir)
    os.chmod(localWorkerTempDir, 0o755)

    ##########################################
    #Setup the logging
    ##########################################

    #This is mildly tricky because we don't just want to
    #redirect stdout and stderr for this Python process; we want to redirect it
    #for this process and all children. Consequently, we can't just replace
    #sys.stdout and sys.stderr; we need to mess with the underlying OS-level
    #file descriptors. See <http://stackoverflow.com/a/11632982/402891>

    #When we start, standard input is file descriptor 0, standard output is
    #file descriptor 1, and standard error is file descriptor 2.

    # Do we even want to redirect output? Let the config make us not do it.
    redirectOutputToLogFile = redirectOutputToLogFile and not config.disableWorkerOutputCapture

    #What file do we want to point FDs 1 and 2 to?
    tempWorkerLogPath = os.path.join(localWorkerTempDir, "worker_log.txt")

    if redirectOutputToLogFile:
        # Announce that we are redirecting logging, and where it will now go.
        # This is important if we are trying to manually trace a faulty worker invocation.
        logger.info("Redirecting logging to %s", tempWorkerLogPath)
        sys.stdout.flush()
        sys.stderr.flush()

        # Save the original stdout and stderr (by opening new file descriptors
        # to the same files)
        origStdOut = os.dup(1)
        origStdErr = os.dup(2)

        # Open the file to send stdout/stderr to.
        logFh = os.open(tempWorkerLogPath,
                        os.O_WRONLY | os.O_CREAT | os.O_APPEND)

        # Replace standard output with a descriptor for the log file
        os.dup2(logFh, 1)

        # Replace standard error with a descriptor for the log file
        os.dup2(logFh, 2)

        # Since we only opened the file once, all the descriptors duped from
        # the original will share offset information, and won't clobber each
        # others' writes. See <http://stackoverflow.com/a/5284108/402891>. This
        # shouldn't matter, since O_APPEND seeks to the end of the file before
        # every write, but maybe there's something odd going on...

        # Close the descriptor we used to open the file
        os.close(logFh)

    debugging = logging.getLogger().isEnabledFor(logging.DEBUG)
    ##########################################
    #Worker log file trapped from here on in
    ##########################################

    jobAttemptFailed = False
    statsDict = MagicExpando()
    statsDict.jobs = []
    statsDict.workers.logsToMaster = []
    blockFn = lambda: True
    listOfJobs = [jobName]
    job = None
    try:

        #Put a message at the top of the log, just to make sure it's working.
        logger.info("---TOIL WORKER OUTPUT LOG---")
        sys.stdout.flush()

        logProcessContext(config)

        ##########################################
        #Connect to the deferred function system
        ##########################################
        deferredFunctionManager = DeferredFunctionManager(toilWorkflowDir)

        ##########################################
        #Load the jobGraph
        ##########################################

        jobGraph = jobStore.load(jobStoreID)
        listOfJobs[0] = str(jobGraph)
        logger.debug("Parsed job wrapper")

        ##########################################
        #Cleanup from any earlier invocation of the jobGraph
        ##########################################

        if jobGraph.command == None:
            logger.debug("Wrapper has no user job to run.")
            # Cleanup jobs already finished
            f = lambda jobs: [
                z for z in [[y for y in x if jobStore.exists(y.jobStoreID)]
                            for x in jobs] if len(z) > 0
            ]
            jobGraph.stack = f(jobGraph.stack)
            jobGraph.services = f(jobGraph.services)
            logger.debug(
                "Cleaned up any references to completed successor jobs")

        #This cleans the old log file which may
        #have been left if the job is being retried after a job failure.
        oldLogFile = jobGraph.logJobStoreFileID
        if oldLogFile != None:
            jobGraph.logJobStoreFileID = None
            jobStore.update(jobGraph)  #Update first, before deleting any files
            jobStore.deleteFile(oldLogFile)

        ##########################################
        # If a checkpoint exists, restart from the checkpoint
        ##########################################

        # The job is a checkpoint, and is being restarted after previously completing
        if jobGraph.checkpoint != None:
            logger.debug("Job is a checkpoint")
            # If the checkpoint still has extant jobs in its
            # (flattened) stack and services, its subtree didn't
            # complete properly. We handle the restart of the
            # checkpoint here, removing its previous subtree.
            if len([i for l in jobGraph.stack
                    for i in l]) > 0 or len(jobGraph.services) > 0:
                logger.debug("Checkpoint has failed.")
                # Reduce the retry count
                assert jobGraph.remainingRetryCount >= 0
                jobGraph.remainingRetryCount = max(
                    0, jobGraph.remainingRetryCount - 1)
                jobGraph.restartCheckpoint(jobStore)
            # Otherwise, the job and successors are done, and we can cleanup stuff we couldn't clean
            # because of the job being a checkpoint
            else:
                logger.debug(
                    "The checkpoint jobs seems to have completed okay, removing any checkpoint files to delete."
                )
                #Delete any remnant files
                list(
                    map(
                        jobStore.deleteFile,
                        list(
                            filter(jobStore.fileExists,
                                   jobGraph.checkpointFilesToDelete))))

        ##########################################
        #Setup the stats, if requested
        ##########################################

        if config.stats:
            startClock = getTotalCpuTime()

        startTime = time.time()
        while True:
            ##########################################
            #Run the jobGraph, if there is one
            ##########################################

            if jobGraph.command is not None:
                assert jobGraph.command.startswith("_toil ")
                logger.debug("Got a command to run: %s" % jobGraph.command)
                #Load the job
                job = Job._loadJob(jobGraph.command, jobStore)
                # If it is a checkpoint job, save the command
                if job.checkpoint:
                    jobGraph.checkpoint = jobGraph.command

                # Create a fileStore object for the job
                fileStore = AbstractFileStore.createFileStore(
                    jobStore,
                    jobGraph,
                    localWorkerTempDir,
                    blockFn,
                    caching=not config.disableCaching)
                with job._executor(jobGraph=jobGraph,
                                   stats=statsDict if config.stats else None,
                                   fileStore=fileStore):
                    with deferredFunctionManager.open() as defer:
                        with fileStore.open(job):
                            # Get the next block function to wait on committing this job
                            blockFn = fileStore.waitForCommit

                            job._runner(jobGraph=jobGraph,
                                        jobStore=jobStore,
                                        fileStore=fileStore,
                                        defer=defer)

                            # When the job succeeds, start committing files immediately.
                            fileStore.startCommit(jobState=False)

                # Accumulate messages from this job & any subsequent chained jobs
                statsDict.workers.logsToMaster += fileStore.loggingMessages

            else:
                #The command may be none, in which case
                #the jobGraph is either a shell ready to be deleted or has
                #been scheduled after a failure to cleanup
                logger.debug("No user job to run, so finishing")
                break

            if AbstractFileStore._terminateEvent.isSet():
                raise RuntimeError("The termination flag is set")

            ##########################################
            #Establish if we can run another jobGraph within the worker
            ##########################################
            successorJobGraph = nextChainableJobGraph(jobGraph, jobStore)
            if successorJobGraph is None or config.disableChaining:
                # Can't chain any more jobs.
                # TODO: why don't we commit the last job's file store? Won't
                # its async uploads never necessarily finish?
                # If we do call startCommit here it messes with the job
                # itself and Toil thinks the job needs to run again.
                break

            ##########################################
            #We have a single successor job that is not a checkpoint job.
            #We transplant the successor jobGraph command and stack
            #into the current jobGraph object so that it can be run
            #as if it were a command that were part of the current jobGraph.
            #We can then delete the successor jobGraph in the jobStore, as it is
            #wholly incorporated into the current jobGraph.
            ##########################################

            # add the successor to the list of jobs run
            listOfJobs.append(str(successorJobGraph))

            #Clone the jobGraph and its stack
            jobGraph = copy.deepcopy(jobGraph)

            #Remove the successor jobGraph
            jobGraph.stack.pop()

            #Transplant the command and stack to the current jobGraph
            jobGraph.command = successorJobGraph.command
            jobGraph.stack += successorJobGraph.stack
            # include some attributes for better identification of chained jobs in
            # logging output
            jobGraph.unitName = successorJobGraph.unitName
            jobGraph.jobName = successorJobGraph.jobName
            assert jobGraph.memory >= successorJobGraph.memory
            assert jobGraph.cores >= successorJobGraph.cores

            #Build a fileStore to update the job
            fileStore = AbstractFileStore.createFileStore(
                jobStore,
                jobGraph,
                localWorkerTempDir,
                blockFn,
                caching=not config.disableCaching)

            #Update blockFn
            blockFn = fileStore.waitForCommit

            #Add successorJobGraph to those to be deleted
            fileStore.jobsToDelete.add(successorJobGraph.jobStoreID)

            #This will update the job once the previous job is done
            fileStore.startCommit(jobState=True)

            #Clone the jobGraph and its stack again, so that updates to it do
            #not interfere with this update
            jobGraph = copy.deepcopy(jobGraph)

            logger.debug("Starting the next job")

        ##########################################
        #Finish up the stats
        ##########################################
        if config.stats:
            totalCPUTime, totalMemoryUsage = getTotalCpuTimeAndMemoryUsage()
            statsDict.workers.time = str(time.time() - startTime)
            statsDict.workers.clock = str(totalCPUTime - startClock)
            statsDict.workers.memory = str(totalMemoryUsage)

        # log the worker log path here so that if the file is truncated the path can still be found
        if redirectOutputToLogFile:
            logger.info(
                "Worker log can be found at %s. Set --cleanWorkDir to retain this log",
                localWorkerTempDir)

        logger.info(
            "Finished running the chain of jobs on this node, we ran for a total of %f seconds",
            time.time() - startTime)

    ##########################################
    #Trapping where worker goes wrong
    ##########################################
    except:  #Case that something goes wrong in worker
        traceback.print_exc()
        logger.error("Exiting the worker because of a failed job on host %s",
                     socket.gethostname())
        AbstractFileStore._terminateEvent.set()

    ##########################################
    #Wait for the asynchronous chain of writes/updates to finish
    ##########################################

    blockFn()

    ##########################################
    #All the asynchronous worker/update threads must be finished now,
    #so safe to test if they completed okay
    ##########################################

    if AbstractFileStore._terminateEvent.isSet():
        jobGraph = jobStore.load(jobStoreID)
        jobAttemptFailed = True

    ##########################################
    #Cleanup
    ##########################################

    # Close the worker logging
    # Flush at the Python level
    sys.stdout.flush()
    sys.stderr.flush()
    if redirectOutputToLogFile:
        # Flush at the OS level
        os.fsync(1)
        os.fsync(2)

        # Close redirected stdout and replace with the original standard output.
        os.dup2(origStdOut, 1)

        # Close redirected stderr and replace with the original standard error.
        os.dup2(origStdErr, 2)

        # sys.stdout and sys.stderr don't need to be modified at all. We don't
        # need to call redirectLoggerStreamHandlers since they still log to
        # sys.stderr

        # Close our extra handles to the original standard output and standard
        # error streams, so we don't leak file handles.
        os.close(origStdOut)
        os.close(origStdErr)

    # Now our file handles are in exactly the state they were in before.

    # Copy back the log file to the global dir, if needed.
    # Note that we work with bytes instead of characters so we can seek
    # relative to the end (since Python won't decode Unicode backward, or even
    # interpret seek offsets in characters for us). TODO: We may get invalid or
    # just different Unicode by breaking up a character at the boundary!
    if jobAttemptFailed and redirectOutputToLogFile:
        jobGraph.logJobStoreFileID = jobStore.getEmptyFileStoreID(
            jobGraph.jobStoreID, cleanup=True)
        jobGraph.chainedJobs = listOfJobs
        with jobStore.updateFileStream(jobGraph.logJobStoreFileID) as w:
            with open(tempWorkerLogPath, 'rb') as f:
                if os.path.getsize(
                        tempWorkerLogPath) > logFileByteReportLimit != 0:
                    if logFileByteReportLimit > 0:
                        f.seek(-logFileByteReportLimit,
                               2)  # seek to last tooBig bytes of file
                    elif logFileByteReportLimit < 0:
                        f.seek(logFileByteReportLimit,
                               0)  # seek to first tooBig bytes of file
                # Dump the possibly-invalid-Unicode bytes into the log file
                w.write(f.read())  # TODO load file using a buffer
        jobStore.update(jobGraph)

    elif ((debugging or (config.writeLogsFromAllJobs
                         and not jobName.startswith(CWL_INTERNAL_JOBS)))
          and redirectOutputToLogFile):  # write log messages
        with open(tempWorkerLogPath, 'rb') as logFile:
            if os.path.getsize(
                    tempWorkerLogPath) > logFileByteReportLimit != 0:
                if logFileByteReportLimit > 0:
                    logFile.seek(-logFileByteReportLimit,
                                 2)  # seek to last tooBig bytes of file
                elif logFileByteReportLimit < 0:
                    logFile.seek(logFileByteReportLimit,
                                 0)  # seek to first tooBig bytes of file
            # Make sure lines are Unicode so they can be JSON serialized as part of the dict.
            # We may have damaged the Unicode text by cutting it at an arbitrary byte so we drop bad characters.
            logMessages = [
                line.decode('utf-8', 'skip')
                for line in logFile.read().splitlines()
            ]
        statsDict.logs.names = listOfJobs
        statsDict.logs.messages = logMessages

    if (debugging or config.stats or statsDict.workers.logsToMaster
        ) and not jobAttemptFailed:  # We have stats/logging to report back
        if USING_PYTHON2:
            jobStore.writeStatsAndLogging(
                json.dumps(statsDict, ensure_ascii=True))
        else:
            jobStore.writeStatsAndLogging(
                json.dumps(statsDict, ensure_ascii=True).encode())

    #Remove the temp dir
    cleanUp = config.cleanWorkDir
    if cleanUp == 'always' or (cleanUp == 'onSuccess' and
                               not jobAttemptFailed) or (cleanUp == 'onError'
                                                         and jobAttemptFailed):
        shutil.rmtree(localWorkerTempDir)

    #This must happen after the log file is done with, else there is no place to put the log
    if (not jobAttemptFailed) and jobGraph.command == None and len(
            jobGraph.stack) == 0 and len(jobGraph.services) == 0:
        # We can now safely get rid of the jobGraph
        jobStore.delete(jobGraph.jobStoreID)

    if jobAttemptFailed:
        return 1
    else:
        return 0
Пример #49
0
    def _clone_local(self, meter, size_bytes):
        if self._input_path == "/dev/null":  # pragma: no cover
            # Not really sure why this check is here,
            # but keeping for compat
            log.debug("Source dev was /dev/null. Skipping")
            return
        if self._input_path == self._output_path:
            log.debug("Source and destination are the same. Skipping.")
            return

        # If a destination file exists and sparse flag is True,
        # this priority takes an existing file.
        if (not os.path.exists(self._output_path) and self._sparse):
            clone_block_size = 4096
            sparse = True
            fd = None
            try:
                fd = os.open(self._output_path, os.O_WRONLY | os.O_CREAT,
                             0o640)
                os.ftruncate(fd, size_bytes)
            finally:
                if fd:
                    os.close(fd)
        else:
            clone_block_size = 1024 * 1024 * 10
            sparse = False

        log.debug("Local Cloning %s to %s, sparse=%s, block_size=%s",
                      self._input_path, self._output_path,
                      sparse, clone_block_size)

        zeros = b'\0' * 4096

        src_fd, dst_fd = None, None
        try:
            try:
                src_fd = os.open(self._input_path, os.O_RDONLY)
                dst_fd = os.open(self._output_path,
                                os.O_WRONLY | os.O_CREAT, 0o640)
                if (True):
                    #size = int(self.get_size() * 1024 * 1024 * 1024 * 10)
                    size = _get_size(self._path)
                    log.debug(("USING REF {size}", size), exc_info=True)
                    while size > 0:
                        ret = os.copy_file_range(src_fd, dst_fd, size)
                        size -= ret
                else:
                    i = 0
                    while 1:
                        l = os.read(src_fd, clone_block_size)
                        s = len(l)
                        if s == 0:
                            meter.end(size_bytes)
                            break
                        # check sequence of zeros
                        if sparse and zeros == l:
                            os.lseek(dst_fd, s, 1)
                        else:
                            b = os.write(dst_fd, l)
                            if s != b:  # pragma: no cover
                                meter.end(i)
                                break
                        i += s
                        if i < size_bytes:
                            meter.update(i)
            except OSError as e:  # pragma: no cover
                log.debug("Error while cloning", exc_info=True)
                msg = (_("Error cloning diskimage "
                         "%(inputpath)s to %(outputpath)s: %(error)s") %
                         {"inputpath": self._input_path,
                          "outputpath": self._output_path,
                          "error": str(e)})
                raise RuntimeError(msg) from None
        finally:
            if src_fd is not None:
                os.close(src_fd)
            if dst_fd is not None:
                os.close(dst_fd)
Пример #50
0
	def _instance_cleanup(self, instance):
		if instance._first_instance:
			if self._has_pm_qos:
				os.close(self._cpu_latency_fd)
			if instance._load_monitor is not None:
				self._monitors_repository.delete(instance._load_monitor)
Пример #51
0
 def postForkChild(self):
     "post fork handling in child"
     os.close(self.rfd)
     self.rfd = None
Пример #52
0
 def close(self):
     """
     Close the write end of the pipe.
     """
     os.close(self.fd_write)
Пример #53
0
 def close_source_fd(self):
     # FIXME: Only close source fds that come from pipes instead of this hack...
     if isinstance(self.source, int) and not self.borrowed:
         log.log("CLOSE SOURCE %s" % (self.source,), "fd")
         os.close(self.source)
Пример #54
0
 def __exit__(self, exc_type, exc_value, traceback):
     self._new_stdout.flush()
     os.dup2(self.oldstdout_fno, 1)
     os.close(self.oldstdout_fno)
Пример #55
0
 def tearDown(self):
     dbo.reset()
     os.close(self.version_fd)
     os.remove(self.version_file)
Пример #56
0
 def postForkParent(self):
     "post fork handling in parent"
     os.close(self.wfd)
     self.wfd = None
Пример #57
0
def _communicate(ioproc_ref, proc, readPipe, writePipe):
    real_ioproc = ioproc_ref()
    if real_ioproc is None:
        return

    # Keeps the name for logging in this thread.
    ioproc_name = real_ioproc.name

    real_ioproc._started.set()

    dataSender = None
    pendingRequests = {}
    responseReader = ResponseReader(readPipe)

    err = proc.stderr.fileno()

    poller = poll()

    # When closing the ioprocess there might be race for closing this fd
    # using a copy solves this
    try:
        try:
            evtReciever = os.dup(real_ioproc._eventFdReciever)
        except OSError:
            evtReciever = -1
            return

        poller.register(err, INPUT_READY_FLAGS)
        poller.register(evtReciever, INPUT_READY_FLAGS)
        poller.register(readPipe, INPUT_READY_FLAGS)
        poller.register(writePipe, ERROR_FLAGS)

        while True:
            real_ioproc = None

            pollres = NoIntrPoll(poller.poll, 5)

            real_ioproc = ioproc_ref()
            if real_ioproc is None:
                break

            if not real_ioproc._isRunning:
                _log.info("(%s) Shutdown requested", ioproc_name)
                break

            for fd, event in pollres:
                if event & ERROR_FLAGS:
                    raise PollError(fd, event & ERROR_FLAGS)

                if fd == err:
                    real_ioproc._processLogs(os.read(fd, 1024))
                    continue

                if fd == readPipe:
                    if not responseReader.process():
                        continue

                    res = responseReader.pop()
                    reqId = res['id']
                    pendingReq = pendingRequests.pop(reqId, None)
                    if pendingReq is not None:
                        pendingReq.result = res
                        pendingReq.event.set()
                    else:
                        _log.warning("(%s) Unknown request id %d",
                                     ioproc_name, reqId)
                    continue

                if fd == evtReciever:
                    os.read(fd, 1)
                    if dataSender:
                        continue

                    try:
                        cmd, resObj = real_ioproc._commandQueue.get_nowait()
                    except Empty:
                        continue

                    reqId = real_ioproc._getRequestId()
                    pendingRequests[reqId] = resObj
                    reqString = real_ioproc._requestToBytes(cmd, reqId)
                    dataSender = DataSender(writePipe, reqString)
                    poller.modify(writePipe, OUTPUT_READY_FLAGS)
                    continue

                if fd == writePipe:
                    if dataSender.process():
                        dataSender = None
                        poller.modify(writePipe, ERROR_FLAGS)
                        real_ioproc._pingPoller()
    except PollError as e:
        # Normal during shutdown - don't log an error.
        _log.info("(%s) %s", ioproc_name, e)
        _cleanup(pendingRequests)
    except:
        # Unexpected error.
        _log.exception("(%s) Communication thread failed", ioproc_name)
        _cleanup(pendingRequests)
    finally:
        os.close(readPipe)
        os.close(writePipe)
        if (evtReciever >= 0):
            os.close(evtReciever)

        rc = proc.poll()

        if rc is None:
            _log.info("(%s) Killing ioprocess", ioproc_name)
            if IOProcess._DEBUG_VALGRIND:
                os.kill(proc.pid, signal.SIGTERM)
            else:
                proc.kill()
            rc = proc.wait()

        if rc < 0:
            _log.info("(%s) ioprocess was terminated by signal %s",
                      ioproc_name, -rc)
        else:
            _log.info("(%s) ioprocess terminated with code %s",
                      ioproc_name, rc)

        real_ioproc = ioproc_ref()
        if real_ioproc is not None:
            with real_ioproc._lock:
                if real_ioproc._isRunning:
                    real_ioproc._run()
Пример #58
0
def run(config=None, plugin_providers=None, product_name='ajenti', dev_mode=False,
        debug_mode=False, autologin=False):
    """
    A global entry point for Ajenti.

    :param config: config file implementation instance to use
    :type  config: :class:`aj.config.BaseConfig`
    :param plugin_providers: list of plugin providers to load plugins from
    :type  plugin_providers: list(:class:`aj.plugins.PluginProvider`)
    :param str product_name: a product name to use
    :param bool dev_mode: enables dev mode (automatic resource recompilation)
    :param bool debug_mode: enables debug mode (verbose and extra logging)
    :param bool autologin: disables authentication and logs everyone in as the user running the panel. This is EXTREMELY INSECURE.
    """
    if config is None:
        raise TypeError('`config` can\'t be None')

    reload_module(sys)
    if hasattr(sys, 'setdefaultencoding'):
        sys.setdefaultencoding('utf8')

    aj.product = product_name
    aj.debug = debug_mode
    aj.dev = dev_mode
    aj.dev_autologin = autologin

    aj.init()
    aj.log.set_log_params(tag='master', master_pid=os.getpid())
    aj.context = Context()
    aj.config = config
    aj.plugin_providers = plugin_providers or []
    logging.info('Loading config from %s', aj.config)
    aj.config.load()
    aj.config.ensure_structure()

    if aj.debug:
        logging.warn('Debug mode')
    if aj.dev:
        logging.warn('Dev mode')

    try:
        locale.setlocale(locale.LC_ALL, '')
    except locale.Error:
        logging.warning('Couldn\'t set default locale')

    # install a passthrough gettext replacement since all localization is handled in frontend
    # and _() is here only for string extraction
    __builtins__['_'] = lambda x: x

    logging.info('Ajenti Core %s', aj.version)
    logging.info('Detected platform: %s / %s', aj.platform, aj.platform_string)

    # Load plugins
    PluginManager.get(aj.context).load_all_from(aj.plugin_providers)
    if len(PluginManager.get(aj.context)) == 0:
        logging.warn('No plugins were loaded!')

    if aj.config.data['bind']['mode'] == 'unix':
        path = aj.config.data['bind']['socket']
        if os.path.exists(path):
            os.unlink(path)
        listener = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        try:
            listener.bind(path)
        except OSError:
            logging.error('Could not bind to %s', path)
            sys.exit(1)

    if aj.config.data['bind']['mode'] == 'tcp':
        host = aj.config.data['bind']['host']
        port = aj.config.data['bind']['port']
        listener = socket.socket(
            socket.AF_INET6 if ':' in host else socket.AF_INET, socket.SOCK_STREAM
        )
        if aj.platform not in ['freebsd', 'osx']:
            try:
                listener.setsockopt(socket.IPPROTO_TCP, socket.TCP_CORK, 1)
            except socket.error:
                logging.warn('Could not set TCP_CORK')
        listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        logging.info('Binding to [%s]:%s', host, port)
        try:
            listener.bind((host, port))
        except socket.error as e:
            logging.error('Could not bind: %s', str(e))
            sys.exit(1)

    # Fix stupid socketio bug (it tries to do *args[0][0])
    socket.socket.__getitem__ = lambda x, y: None

    listener.listen(10)

    gateway = GateMiddleware.get(aj.context)
    application = HttpRoot(HttpMiddlewareAggregator([gateway])).dispatch

    aj.server = SocketIOServer(
        listener,
        log=open(os.devnull, 'w'),
        application=application,
        handler_class=RequestHandler,
        policy_server=False,
        transports=[
            str('websocket'),
            str('flashsocket'),
            str('xhr-polling'),
            str('jsonp-polling'),
        ],
    )

    if aj.config.data['ssl']['enable'] and aj.config.data['bind']['mode'] == 'tcp':
        aj.server.ssl_args = {'server_side': True}
        cert_path = aj.config.data['ssl']['certificate']
        if aj.config.data['ssl']['fqdn_certificate']:
            fqdn_cert_path = aj.config.data['ssl']['fqdn_certificate']
        else:
            fqdn_cert_path = cert_path

        context = gevent.ssl.SSLContext(ssl.PROTOCOL_TLS)
        context.load_cert_chain(certfile=fqdn_cert_path, keyfile=fqdn_cert_path)
        context.options |= ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
        context.set_ciphers('ALL:!ADH:!EXP:!LOW:!RC2:!3DES:!SEED:!RC4:+HIGH:+MEDIUM')

        if aj.config.data['ssl']['client_auth']['enable']:

            logging.info('Enabling SSL client authentication')
            context.load_verify_locations(cafile=cert_path)
            if aj.config.data['ssl']['client_auth']['force']:
                context.verify_mode = ssl.CERT_REQUIRED
            else:
                context.verify_mode = ssl.CERT_OPTIONAL

            ## Test callback : client_certificate_callback must return None to get forward
            # context.set_servername_callback(AuthenticationService.get(aj.context).client_certificate_callback)

        aj.server.wrap_socket = lambda socket, **args:context.wrap_socket(sock=socket, server_side=True)
        logging.info('SSL enabled')

    # auth.log
    try:
        syslog.openlog(
            ident=str(aj.product),
            facility=syslog.LOG_AUTH,
        )
    except:
        syslog.openlog(aj.product)

    def cleanup():
        if hasattr(cleanup, 'started'):
            return
        cleanup.started = True
        logging.info('Process %s exiting normally', os.getpid())
        gevent.signal(signal.SIGINT, lambda: None)
        gevent.signal(signal.SIGTERM, lambda: None)
        if aj.master:
            gateway.destroy()

        p = psutil.Process(os.getpid())
        for c in p.children(recursive=True):
            try:
                os.killpg(c.pid, signal.SIGTERM)
                os.killpg(c.pid, signal.SIGKILL)
            except OSError:
                pass

    def signal_handler():
        cleanup()
        sys.exit(0)

    gevent.signal(signal.SIGINT, signal_handler)
    gevent.signal(signal.SIGTERM, signal_handler)

    aj.server.serve_forever()

    if not aj.master:
        # child process, server is stopped, wait until killed
        gevent.wait()

    if hasattr(aj.server, 'restart_marker'):
        logging.warn('Restarting by request')
        cleanup()

        fd = 20  # Close all descriptors. Creepy thing
        while fd > 2:
            try:
                os.close(fd)
                logging.debug('Closed descriptor #%i', fd)
            except OSError:
                pass
            fd -= 1

        logging.warn('Will restart the process now')
        if '-d' in sys.argv:
            sys.argv.remove('-d')
        os.execv(sys.argv[0], sys.argv)
    else:
        if aj.master:
            logging.debug('Server stopped')
            cleanup()
Пример #59
0
def preprocess(path):
    with open(path, "rb") as fh:
        lines = []
        it = iter(fh)

        for l in it:
            # zstd.h includes <stddef.h>, which is also included by cffi's
            # boilerplate. This can lead to duplicate declarations. So we strip
            # this include from the preprocessor invocation.
            #
            # The same things happens for including zstd.h, so give it the same
            # treatment.
            #
            # We define ZSTD_STATIC_LINKING_ONLY, which is redundant with the inline
            # #define in zstdmt_compress.h and results in a compiler warning. So drop
            # the inline #define.
            if l.startswith((
                    b"#include <stddef.h>",
                    b'#include "zstd.h"',
                    b"#define ZSTD_STATIC_LINKING_ONLY",
            )):
                continue

            # The preprocessor environment on Windows doesn't define include
            # paths, so the #include of limits.h fails. We work around this
            # by removing that import and defining INT_MAX ourselves. This is
            # a bit hacky. But it gets the job done.
            # TODO make limits.h work on Windows so we ensure INT_MAX is
            # correct.
            if l.startswith(b"#include <limits.h>"):
                l = b"#define INT_MAX 2147483647\n"

            # ZSTDLIB_API may not be defined if we dropped zstd.h. It isn't
            # important so just filter it out.
            if l.startswith(b"ZSTDLIB_API"):
                l = l[len(b"ZSTDLIB_API "):]

            lines.append(l)

    fd, input_file = tempfile.mkstemp(suffix=".h")
    os.write(fd, b"".join(lines))
    os.close(fd)

    try:
        env = dict(os.environ)
        # cffi attempts to decode source as ascii. And the preprocessor
        # may insert non-ascii for some annotations. So try to force
        # ascii output via LC_ALL.
        env["LC_ALL"] = "C"

        if getattr(compiler, "_paths", None):
            env["PATH"] = compiler._paths
        process = subprocess.Popen(args + [input_file],
                                   stdout=subprocess.PIPE,
                                   env=env)
        output = process.communicate()[0]
        ret = process.poll()
        if ret:
            raise Exception("preprocessor exited with error")

        return output
    finally:
        os.unlink(input_file)
Пример #60
0
    elif thisarg == 'write_flash':
        write_addr = sys.argv.pop(0)
        binary = sys.argv.pop(0)
    elif thisarg:
        cmdline = cmdline + [thisarg]

cmdline = cmdline + ['write_flash']
if write_option:
    cmdline = cmdline + [write_option]
cmdline = cmdline + ['--flash_size', 'detect']
cmdline = cmdline + [write_addr, binary]

erase_file = ''
if erase_addr:
    # Generate temporary empty (0xff) file
    eraser = tempfile.mkstemp()
    erase_file = eraser[1]
    os.write(eraser[0], bytearray([0xff] * int(erase_len, 0)))
    os.close(eraser[0])
    cmdline = cmdline + [erase_addr, erase_file]

try:
    esptool.main(cmdline)
except esptool.FatalError as e:
    sys.stderr.write('\nA fatal esptool.py error occurred: %s' % e)
finally:
    if erase_file:
        os.remove(erase_file)
    if any(sys.exc_info()):
        sys.exit(2)