Пример #1
0
    def _connect_action(self):
        @timedRetryOnExcept(max_retries=1,
                            timeout=self._locktimeout,
                            ex=CacheLockError)
        def trylock():
            return self.lock('elog')

        try:
            trylock()
        except CacheLockError as err:
            self.log.info('another elog is already active: %s',
                          sessionInfo(err.locked_by))
            sys.exit(-1)
        else:
            self._islocked = True

        # request current directory for the handler to start up correctly
        self._socket.sendall(
            to_utf8('logbook/directory%s\n###%s\n' % (OP_ASK, OP_ASK)))

        # read response
        data, n = b'', 0
        while not data.endswith(b'###!\n') and n < 1000:
            data += self._socket.recv(8192)
            n += 1

        self.storeSysInfo('elog')

        # send request for all relevant updates
        self._socket.sendall(to_utf8('@logbook/%s\n' % OP_SUBSCRIBE))

        self._process_data(data)
Пример #2
0
    def _connect_action(self):
        self._keys_expired = False
        BaseCacheClient._connect_action(self)
        if self.showwatchdog:
            # also ask for and subscribe to all watchdog events
            self._socket.sendall(to_utf8('@watchdog/%s\n' % OP_WILDCARD))
            self._socket.sendall(to_utf8('@watchdog/%s\n' % OP_SUBSCRIBE))

        # use appname to distinguish between different instances
        self.storeSysInfo(session.appname)
Пример #3
0
 def emit(self, html, suffix=u''):
     html = to_utf8(html)
     suffix = to_utf8(suffix)
     if self.fd:
         self.fd.write(html)
         # write suffix now, but place file pointer so that it's overwritten
         # on subsequent writes in the same state -- this way we can
         # guarantee that tags don't stay open
         if suffix:
             self.fd.write(suffix)
             self.fd.flush()
             self.fd.seek(-len(suffix), 2)
         else:
             self.fd.flush()
Пример #4
0
 def _write_instrument(self, valuelist):
     # self.log.info('Instrument: %r', valuelist)
     instrument = self._data['instrument']
     for device, key, value in valuelist:
         if device not in ['demo', 'DEMO']:
             if key in ['facility', 'website']:
                 instrument[key] = to_utf8(value)
             elif key == 'instrument':
                 instrument['name'] = value
             elif key == 'operators':
                 instrument[key] = []
                 for operator in value:
                     instrument[key].append(to_utf8(operator))
             elif key == 'doi':
                 instrument['references'] = []
                 instrument['references'].append(to_utf8(value))
Пример #5
0
def encode(key, entry):
    # Start with a buffer which can automatically grow
    builder = flatbuffers.Builder(136)

    # Create the strings - this has to be done before starting to build
    value_fb_str = None
    if entry.value is not None:
        value_fb_str = builder.CreateString(entry.value)
    key_fb_str = builder.CreateString(key)

    # Start building the buffer.
    # Flatbuffer must be constructed in the reverse order of the schema.
    # This might be a bug in flatbuffers.
    CacheEntryFB.CacheEntryStart(builder)
    if value_fb_str:
        CacheEntryFB.CacheEntryAddValue(builder, value_fb_str)
    CacheEntryFB.CacheEntryAddExpired(builder, entry.expired)
    # Do not write ttl if it is None
    if entry.ttl is not None:
        CacheEntryFB.CacheEntryAddTtl(builder, entry.ttl)
    CacheEntryFB.CacheEntryAddTime(builder, entry.time)
    CacheEntryFB.CacheEntryAddKey(builder, key_fb_str)
    fb_entry = CacheEntryFB.CacheEntryEnd(builder)
    builder.Finish(fb_entry, file_identifier=to_utf8(file_identifier))

    # Generate the output and replace the file_identifier
    fb_array = builder.Output()

    return bytes(fb_array)
Пример #6
0
    def add_legacy(self, pv_details):
        """ Configure given PVs to start forwarding. Specific topics and
        schemas can be provided. If not, default instrument topic and
        schemas will be used.
        :param pv_details: PVs and the tuple of (topic, schema)
        :type pv_details: dict(pvname, (kafka-topic, schema))
        """
        streams = []
        for pv in pv_details:
            topic, schema = pv_details[pv]
            if not topic:
                topic = self.instpvtopic
            if not schema:
                schema = self.instpvschema

            self._issued[pv] = (topic, schema)

            for broker in self.brokers:
                converter = {
                    "topic": f"{broker}/{topic}",
                    "schema": schema,
                }
                stream = {
                    "converter": converter,
                    "channel_provider_type": "ca",
                    "channel": pv,
                }
                streams.append(stream)

        cmd = {"cmd": "add", "streams": streams}

        self.send(self.cmdtopic, to_utf8(json.dumps(cmd)))
Пример #7
0
 def _hash(self, password):
     password = to_utf8(from_maybe_utf8(password))
     if self.hashing == 'sha1':
         password = hashlib.sha1(password).hexdigest()
     elif self.hashing == 'md5':
         password = hashlib.md5(password).hexdigest()
     return password
Пример #8
0
 def _sender_thread(self):
     while not self.stoprequest:
         data = self.send_queue.get()
         # self.log.debug('sending: %r', data)
         if self.sock is None:  # connection already closed
             return
         while True:
             try:
                 self.sock.sendall(to_utf8(data))
             except socket.timeout:
                 self.log.warning('send timed out, shutting down')
                 self.closedown()
             except socket.error as err:
                 if err.args[0] == EAGAIN:
                     sleep(CYCLETIME)
                     continue
                 self.log.warning('other end closed, shutting down',
                                  exc=err)
                 self.closedown()
             except Exception:
                 # if we can't write (or it would be blocking), there is some
                 # serious problem: forget writing and close down
                 self.log.warning('other end closed, shutting down')
                 self.closedown()
             break
Пример #9
0
 def _process_data(self,
                   data,
                   sync_str=to_utf8(SYNC_MARKER + OP_TELLOLD),
                   lmatch=line_pattern.match,
                   mmatch=msg_pattern.match):
     # n = 0
     i = 0  # avoid making a string copy for every line
     match = lmatch(data, i)
     while match:
         line = match.group(1)
         i = match.end()
         if sync_str in line:
             self.log.debug('process data: received sync: %r', line)
             self._synced = True
         else:
             msgmatch = mmatch(from_utf8(line))
             # ignore invalid lines
             if msgmatch:
                 # n += 1
                 try:
                     self._handle_msg(**msgmatch.groupdict())
                 except Exception:
                     self.log.exception('error handling message %r',
                                        msgmatch.group())
         # continue loop
         match = lmatch(data, i)
     # self.log.debug('processed %d items', n)
     return data[i:]
Пример #10
0
 def _receiver_thread(self):
     # we will never read any more data: just process what we got and send
     # any needed responses synchronously
     try:
         self._process_data(self.data,
                            lambda reply: self._sendall(to_utf8(reply)))
     except Exception as err:
         self.log.warning('error handling UDP data %r', self.data, exc=err)
     self.closedown()
Пример #11
0
def ListAuth(request):
    passwds = []
    for (user, pw, level) in request.function.passwd:
        # note: we currently allow empty password to match any password!
        # pylint: disable=compare-to-empty-string
        hashed = hashlib.sha1(to_utf8(pw)).hexdigest() if pw != '' else pw
        passwds.append((user, hashed, level))

    Auth = ListAuthenticator('authenicator', hashing='sha1', passwd=passwds)
    yield Auth
Пример #12
0
 def storeSysInfo(self, service):
     """Store info about the service in the cache."""
     if not self._socket:
         return
     try:
         key, res = getSysInfo(service)
         msg = '%s@%s%s%s\n' % (currenttime(), key, OP_TELL,
                                cache_dump(res))
         self._socket.sendall(to_utf8(msg))
     except Exception:
         self.log.exception('storing sysinfo failed')
Пример #13
0
 def _writeLogs(self):
     if not self._logfile:
         return
     loglines = []
     loglines.append('%-15s\tmean\tstdev\tmin\tmax' % '# dev')
     for dev in self.dataset.valuestats:
         loglines.append('%-15s\t%.3f\t%.3f\t%.3f\t%.3f' %
                         ((dev, ) + self.dataset.valuestats[dev]))
     self._logfile.seek(0)
     for line in loglines:
         self._logfile.write(to_utf8('%s\n' % line))
     self._logfile.flush()
Пример #14
0
 def on_saveBtn_clicked(self):
     initialdir = self.client.eval('session.experiment.proposalpath', '')
     fn = QFileDialog.getSaveFileName(self, 'Save protocol', initialdir,
                                      'Text files (*.txt)')[0]
     if not fn:
         return
     try:
         text = self.outText.toPlainText()
         with open(fn, 'wb') as fp:
             fp.write(to_utf8(text))
     except Exception as err:
         self.showError('Could not save: %s' % err)
Пример #15
0
 def _write_exptype(self, l):
     data = 'EXPTYPE'
     self._defcmd(data)
     self._string(data)
     buf = b'\x80'
     sbuf = b''
     for item in l:
         item = to_utf8(item)
         buf += pack('BBBB', 0x80, CHARTYPE, 0x81, len(item))
         sbuf += item
     buf += b'\x81\x01'
     self._file_write(buf + sbuf)
Пример #16
0
 def mainLoop(self):
     while not self._stoprequest:
         try:
             if self._content:
                 content = u''.join(ct.getHTML() for ct in self._content)
                 safeWriteFile(self.filename, to_utf8(content), mode='wb',
                               maxbackups=0)
         except Exception:
             self.log.error('could not write status to %r', self.filename,
                            exc=1)
         else:
             self.log.debug('wrote status to %r', self.filename)
         sleep(self.interval)
Пример #17
0
 def doFinish(self):
     """Automatic protocol generation before finishing a user experiment."""
     if self.proptype != 'user':
         return
     proto_path = path.join(self.proposalpath, 'protocol.txt')
     try:
         text = self._generate_protocol(with_ts=True)
         with open(proto_path, 'wb') as fp:
             fp.write(to_utf8(text))
     except Exception:
         self.log.warning('Error during protocol generation', exc=1)
     else:
         self.log.info('Protocol generated at %s', proto_path)
Пример #18
0
    def _connect_action(self):
        # send request for all keys and updates....
        # (send a single request for a nonexisting key afterwards to
        # determine the end of data)
        msg = '@%s%s\n%s%s\n' % (self._prefix, OP_WILDCARD, END_MARKER, OP_ASK)
        self._socket.sendall(to_utf8(msg))

        # read response
        data, n = b'', 0
        sentinel = to_utf8(END_MARKER + OP_TELLOLD + '\n')
        while not data.endswith(sentinel) and n < 1000:
            data += self._socket.recv(BUFSIZE)
            n += 1

        # send request for all updates
        msg = '@%s%s\n' % (self._prefix, OP_SUBSCRIBE)
        self._socket.sendall(to_utf8(msg))
        for prefix in self._prefixcallbacks:
            msg = '@%s%s\n' % (prefix, OP_SUBSCRIBE)
            self._socket.sendall(to_utf8(msg))

        self._process_data(data)
Пример #19
0
    def _connect_action(self):
        # like for BaseCacheClient, but without request for updates
        msg = '@%s%s\n###%s\n' % (self._prefix, OP_WILDCARD, OP_ASK)
        self._socket.sendall(to_utf8(msg))

        # read response
        data, n = b'', 0
        while not data.endswith(b'###!\n') and n < 1000:
            data += self._socket.recv(BUFSIZE)
            n += 1

        self._process_data(data)

        # stop immediately after reading data
        self._stoprequest = True
Пример #20
0
 def authenticate(self, username, password):
     try:
         pam.authenticate(username, password, resetcred=0)
         entry = pwd.getpwnam(username)
         idx = access_re.search(to_utf8(entry.pw_gecos))
         if idx:
             access = int(idx.group('level'))
             if access in (GUEST, USER, ADMIN):
                 return User(username, access)
         return User(username, self.defaultlevel)
     except pam.PAMError as err:
         raise AuthenticationError('PAM authentication failed: %s' % err)
     except Exception as err:
         raise AuthenticationError(
             'exception during PAM authentication: %s' % err)
Пример #21
0
    def writeData(self, fp, image):
        _metainfo = self.dataset.metainfo
        detectors = self.sink.detectors
        detector = detectors[0] if detectors else 'adet'
        _resosteps = _metainfo[detector, 'resosteps'][0]
        _range = _metainfo[detector, 'range'][0]
        _stepsize = _range / _resosteps
        _startpos = _metainfo[detector, '_startpos'][0]
        _start = _startpos - (_resosteps - 1) * _stepsize
        fp.write(b'position\tcounts\n')
        for i, v in enumerate(image):
            _pos = _start + i * _stepsize
            fp.write(to_utf8('%.2f\t%d\n' % (_pos, v.sum())))

        fp.flush()
Пример #22
0
    def _update_topic(self, key, entry):
        # This method is responsible to communicate and update all the
        # topics that should be updated. Subclasses can (re)implement it
        # if there are messages to be produced to other topics
        self.log.debug('writing: %s -> %s', key, entry.value)

        # For the log-compacted topic key deletion happens when None is
        # passed as the value for the key
        value = None
        if entry.value is not None:
            # Only when the key deletion is not required
            value = self._attached_serializer.encode(key, entry)

        self._producer.send(topic=self.currenttopic,
                            value=value,
                            key=to_utf8(key),
                            timestamp_ms=int(entry.time * 1000))

        # clear all local buffers and produce pending messages
        self._producer.flush()
Пример #23
0
def txtplot(x, y, xlab, ylab, xterm_mode=False):
    """Plot data with gnuplot's dumb ASCII terminal."""
    if not x.size:
        raise ValueError('Empty plot')
    if len(x) != len(y):
        raise ValueError('Unequal lengths of X and Y values')

    try:
        gnuplot = createSubprocess(['gnuplot', '--persist'], shell=False,
                                   stdin=subprocess.PIPE,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE)

        if xterm_mode:
            cmd = ['set term xterm']
        else:
            cmd = ['set term dumb']
        cmd.append('set xlabel "' + xlab + '"')
        cmd.append('set ylabel "' + ylab + '"')
        cmd.append('plot "-" with points notitle')
        for xy in zip(x, y):
            cmd.append('%s %s' % xy)
        cmd.append('e\n')

        cmd = '\n'.join(cmd)
        out = gnuplot.communicate(to_utf8(cmd))[0]
        lines = [line for line in out.splitlines() if line]
        if xterm_mode:
            lines += ['Plotting in xterm Tektronix window.',
                      '\x1b_If you can only see a lot of incomprehensible '
                      'text, use xterm instead of your current terminal '
                      'emulator.\x1b\\']
        return lines

    except OSError:
        raise RuntimeError('Could execute gnuplot for text plot')
Пример #24
0
def ftpUpload(filename, logger=None):
    """Uploads the given file to an user-accessible location

    returns a http download link for download purposes.
    """
    # we like to obscure the data at least a little bit.
    subdir = md5(to_utf8(filename + str(time.time()))).hexdigest()
    basename = path.basename(filename)

    try:
        with open(filename, 'rb') as fp:
            ftp = FTP()

            ftp.connect(FTP_SERVER, FTP_PORT)
            ftp.login(FTP_USER, FTP_P)

            try:
                ftp.mkd(
                    subdir)  # may raise if dir already exists. Should be rare!
            except Exception:
                pass
            ftp.cwd(subdir)

            ftp.storbinary('STOR %s' % basename, fp)

            ftp.quit()
            ftp.close()
    except Exception:
        if logger:
            logger.error(
                'Uploading ftp-file failed! Please check config and '
                'log files',
                exc=1)
        raise

    return 'http://ftp.frm2.tum.de/outgoing/mdata/%s/%s' % (subdir, basename)
Пример #25
0
    def _single_request(self, tosend, sentinel=b'\n', retry=2, sync=False):
        """Communicate over the secondary socket."""
        if not self._socket:
            self._disconnect('single request: no socket')
            if not self._socket:
                raise CacheError('cache not connected')
        if sync:
            # sync has to be false for lock requests, as these occur during startup
            self._queue.join()
        with self._sec_lock:
            if not self._secsocket:
                try:
                    self._secsocket = tcpSocket(self.cache, DEFAULT_CACHE_PORT)
                except Exception as err:
                    self.log.warning(
                        'unable to connect secondary socket '
                        'to %s: %s', self.cache, err)
                    self._secsocket = None
                    self._disconnect('secondary socket: could not connect')
                    raise CacheError('secondary socket could not be created')

            try:
                # write request
                # self.log.debug("get_explicit: sending %r", tosend)
                self._secsocket.sendall(to_utf8(tosend))

                # give 10 seconds time to get the whole reply
                timeout = currenttime() + 10
                # read response
                data = b''
                while not data.endswith(sentinel):
                    newdata = self._secsocket.recv(BUFSIZE)  # blocking read
                    if not newdata:
                        raise socket.error('cache closed connection')
                    if currenttime() > timeout:
                        # do not just break, we need to reopen the socket
                        raise socket.error('getting response took too long')
                    data += newdata
            except socket.error:
                self.log.warning('error during cache query', exc=1)
                closeSocket(self._secsocket)
                self._secsocket = None
                if retry:
                    for m in self._single_request(tosend, sentinel, retry - 1):
                        yield m
                    return
                raise

        lmatch = line_pattern.match
        mmatch = msg_pattern.match
        i = 0
        # self.log.debug("get_explicit: data =%r", data)
        match = lmatch(data, i)
        while match:
            line = match.group(1)
            i = match.end()
            msgmatch = mmatch(from_utf8(line))
            if not msgmatch:
                # ignore invalid lines
                continue
            # self.log.debug('line processed: %r', line)
            yield msgmatch
            match = lmatch(data, i)
Пример #26
0
    def _worker_inner(self):
        data = b''
        process = self._process_data

        while not self._stoprequest:
            if self._should_connect:
                if not self._socket:
                    self._connect()
                    if not self._socket:
                        self._wait_retry()
                        continue
            else:
                if self._socket:
                    self._disconnect()
                self._wait_retry()
                continue

            # process data so far
            data = process(data)

            # wait for a whole line of data to arrive
            while b'\n' not in data and self._socket and self._should_connect \
                  and not self._stoprequest:

                # optionally do some action while waiting
                self._wait_data()

                if self._queue.empty():
                    # NOTE: the queue.empty() check is not 100% reliable, but
                    # that is not important here: all we care is about not
                    # having the select always return immediately for writing
                    writelist = []
                else:
                    writelist = [self._socket]

                # read or write some data
                while 1:
                    try:
                        res = select.select([self._socket], writelist, [],
                                            self._selecttimeout)
                    except EnvironmentError as e:
                        if e.errno == errno.EINTR:
                            continue
                        raise
                    except TypeError:
                        # socket was None, let the outer loop handle that
                        res = ([], [], [])
                    break

                if res[1]:
                    # determine if something needs to be sent
                    tosend = ''
                    itemcount = 0
                    try:
                        # bunch a few messages together, but not unlimited
                        for _ in xrange(10):
                            tosend += self._queue.get(False)
                            itemcount += 1
                    except queue.Empty:
                        pass
                    # write data
                    try:
                        self._socket.sendall(to_utf8(tosend))
                    except Exception:
                        self._disconnect('disconnect: send failed')
                        # report data as processed, but then re-queue it to send
                        # after reconnect
                        for _ in range(itemcount):
                            self._queue.task_done()
                        data = b''
                        self._queue.put(tosend)
                        break
                    for _ in range(itemcount):
                        self._queue.task_done()
                if res[0]:
                    # got some data
                    try:
                        newdata = self._socket.recv(BUFSIZE)
                    except Exception:
                        newdata = b''
                    if not newdata:
                        # no new data from blocking read -> abort
                        self._disconnect('disconnect: recv failed')
                        data = b''
                        break
                    data += newdata

        if self._socket:
            # send rest of data
            tosend = ''
            itemcount = 0
            try:
                while 1:
                    tosend += self._queue.get(False)
                    itemcount += 1
            except queue.Empty:
                pass
            try:
                self._socket.sendall(to_utf8(tosend))
            except Exception:
                self.log.debug('exception while sending last batch of updates',
                               exc=1)
                # no reraise, we'll disconnect below anyways
            for _ in range(itemcount):
                self._queue.task_done()

        # end of while loop
        self._disconnect()
Пример #27
0
    def writeHeader(self, fp, metainfo, image):
        shape = image.shape

        try:
            SD = '%.4f' % ((session.getDevice('det1_z').read() -
                            session.getDevice('st1_x').read()) / 1000)
        except Exception:
            self.log.warning(
                "can't determine SD (detector distance), "
                "using 0 instead",
                exc=1)
            SD = 0

        finished = currenttime()
        # totalTime = finished - self.dataset.started
        Sum = image.sum()
        Moni1 = 0
        Moni2 = 0
        Time = 0
        try:
            Moni1 = float(session.getDevice('det1_mon1').read()[0])
            Moni2 = float(session.getDevice('det1_mon2').read()[0])
            Time = float(session.getDevice('det1_timer').read()[0])
        except Exception:
            self.log.warning(
                "can't determine all monitors, "
                "using 0.0 instead", exc=1)

        try:
            # Setupfile = session.getDevice('det1_image').histogramfile
            Histfile = metainfo['det1_image', 'histogramfile'][1]
        except Exception:
            Histfile = ''

        try:
            # Listfile = session.getDevice('det1_image').listmodefile.split('\'')[1]
            Listfile = metainfo['det1_image', 'listmodefile'][1].split('\'')[1]
        except Exception:
            Listfile = ''

        try:
            # Setupfile = session.getDevice('det1_image').configfile
            Setupfile = metainfo['det1_image', 'configfile'][1]
        except Exception:
            Setupfile = 'setup'

        try:
            # LookUpTable = session.getDevice('det1_image').calibrationfile
            LookUpTable = metainfo['det1_image', 'calibrationfile'][1]
        except Exception:
            LookUpTable = 'lookup'

        time_format = '%I:%M:%S %p'
        date_format = '%m/%d/%Y'

        metadata = DeviceValueDict(
            fileName=os.path.basename(self._file.filepath),
            fileDate=strftime(date_format, localtime(self.dataset.started)),
            fileTime=strftime(time_format, localtime(self.dataset.started)),
            FromDate=strftime(date_format, localtime(self.dataset.started)),
            FromTime=strftime(time_format, localtime(self.dataset.started)),
            ToDate=strftime(date_format, localtime(finished)),
            ToTime=strftime(time_format, localtime(finished)),
            DataSize=shape[0] * shape[1],
            DataSizeX=shape[1],
            DataSizeY=shape[0],
            Environment='_'.join(session.explicit_setups),
            SD=SD,
            Sum='%d' % Sum,
            Moni1='%d' % Moni1,
            Moni2='%d' % Moni2,
            Sum_Time='%.6f' % (Sum / Time) if Time else 'Inf',
            Sum_Moni1='%.6f' % (Sum / Moni1) if Moni1 else 'Inf',
            Sum_Moni2='%.6f' % (Sum / Moni2) if Moni2 else 'Inf',
            Histfile=Histfile,
            Listfile=Listfile,
            Setupfile=Setupfile,
            LookUpTable=LookUpTable,
            Command=self.dataset.info,
        )

        nicosheader = []

        # no way to map nicos-categories to BerSANS sections :(
        # also ignore some keys :(
        ignore = ('det1_lastlistfile', 'det1_lasthistfile')
        for (dev, param), (value, strvalue, _unit, _category) in \
                iteritems(self.dataset.metainfo):
            devname_key = '%s_%s' % (dev, param)
            if devname_key in ignore:
                continue
            metadata[devname_key] = value
            nicosheader.append('%s=%s' % (devname_key, strvalue))

        nicosheader = b'\n'.join(sorted(map(to_ascii_escaped, nicosheader)))
        self.log.debug('nicosheader starts with: %40s', nicosheader)

        # write Header
        header = BERSANSHEADER
        if 'tisane' in session.explicit_setups:
            header += TISANEHEADER
        for line in header.split('\n'):
            self.log.debug('testing header line: %r', line)
            self.log.debug(line % metadata)
            fp.write(to_utf8(line % metadata))
            fp.write(b'\n')

        # also append nicos header
        fp.write(nicosheader.replace(b'\\n', b'\n'))  # why needed?
        fp.write(b'\n\n%Counts\n')
        fp.flush()
Пример #28
0
    def connect(self, conndata, eventmask=None):
        """Connect to a NICOS daemon.

        *conndata* is a ConnectionData object.

        *eventmask* is a tuple of event names that should not be sent to this
        client.
        """
        self.disconnecting = False
        if self.isconnected:
            raise RuntimeError('client already connected')

        try:
            self.transport.connect(conndata)
        except socket.error as err:
            msg = err.args[1] if len(err.args) >= 2 else str(err)
            self.signal('failed', 'Server connection failed: %s.' % msg, err)
            return
        except Exception as err:
            self.signal('failed', 'Server connection failed: %s.' % err, err)
            return

        # read banner
        try:
            success, banner = self.transport.recv_reply()
            if not success:
                raise ProtocolError('invalid response format')
            if 'daemon_version' not in banner:
                raise ProtocolError('daemon version missing from response')
            daemon_proto = banner.get('protocol_version', 0)
            if daemon_proto != PROTO_VERSION:
                if daemon_proto in COMPATIBLE_PROTO_VERSIONS:
                    self.compat_proto = daemon_proto
                else:
                    raise ProtocolError('daemon uses protocol %d, but this '
                                        'client requires protocol %d, do you '
                                        'need to update NICOS?' %
                                        (daemon_proto, PROTO_VERSION))
        except Exception as err:
            self.signal(
                'failed', 'Server (%s:%d) handshake failed: %s.' %
                (conndata.host, conndata.port, err), err)
            return

        # log-in sequence
        self.isconnected = True
        password = conndata.password
        pw_hashing = banner.get('pw_hashing', 'sha1')

        if pw_hashing[0:4] == 'rsa,':
            if rsa is not None:
                encodedkey = banner.get('rsakey', None)
                if encodedkey is None:
                    raise ProtocolError(
                        'rsa requested, but rsakey missing in banner')
                if not PY2 and not isinstance(encodedkey, bytes):
                    encodedkey = bytes(encodedkey, 'utf-8')
                pubkey = rsa.PublicKey.load_pkcs1(b64decode(encodedkey))
                password = rsa.encrypt(to_utf8(password), pubkey)
                password = '******' + b64encode(password).decode()
            else:
                pw_hashing = pw_hashing[4:]
        if pw_hashing == 'sha1':
            password = hashlib.sha1(to_utf8(password)).hexdigest()
        elif pw_hashing == 'md5':
            password = hashlib.md5(to_utf8(password)).hexdigest()

        credentials = {
            'login': conndata.user,
            'passwd': password,
            'display': '',
        }

        response = self.ask('authenticate', credentials)
        if not response:
            self._close()
            return
        self.user_level = response['user_level']

        if eventmask:
            self.tell('eventmask', eventmask)

        self.transport.connect_events(conndata)

        # start event handler
        self.event_thread = createThread('event handler', self.event_handler)

        self.host, self.port = conndata.host, conndata.port
        self.login = conndata.user
        self.viewonly = conndata.viewonly

        self.daemon_info = banner
        self.signal('connected')
Пример #29
0
 def send_command(self, cmdname, args):
     data = self.serializer.serialize_cmd(cmdname, args)
     self.sock.send_multipart([to_utf8(cmdname), '', data])
Пример #30
0
 def _string(self, value):
     value = to_utf8(value)
     buf = pack('B', CHARTYPE) + self._len(len(value)) + value.upper()
     self._file_write(buf)