def __init__(self, init=None, *args, **kwargs): """ Initialization of the NIST Object. :param init: Initialization data. Can be a NIST object, a file link, or a raw string. :type init: NIST or str All biometric information are stored in the self.data recursive default dictionary object. The information is stored as following: self.data[ ntype ][ idc ][ tagid ] To get and set data, use the :func:`~NIST.traditional.NIST.get_field` and :func:`~NIST.traditional.NIST.set_field()` functions. """ debug.info("Initialization of the NIST object") self.stdver = "" self.fileuri = None self.filename = None self.data = defDict() self.date = datetime.datetime.now().strftime("%Y%m%d") self.timestamp = int(time.time()) if init != None: self.load_auto(init)
def read(self, infile): """ Open the 'infile' file and transmit the data to the 'load' function. :param infile: URI of the NIST file to read and load. :type infile: str Usage: >>> from NIST import NIST >>> n = NIST() >>> n.read( "./sample/pass-type-9-13-m1.an2" ) >>> n NIST object, Type-01, Type-02, Type-09, Type-13 """ debug.info("Reading from file : %s" % infile) self.fileuri = infile self.filename = os.path.splitext(os.path.basename(infile))[0] with open(infile, "rb") as fp: data = fp.read() if data[0] == "{" and data[-1] == "}": self.from_json(data) else: self.load(data)
def dumpbin( self ): """ Return a binary dump of the NIST object. Writable in a file ("wb" mode). :return: Binary representation of the NIST object. :rtype: str """ debug.info( "Dumping NIST in binary" ) self.clean() self.patch_to_standard() outnist = [] for ntype in self.get_ntype(): for idc in self.get_idc( ntype ): if ntype == 4: outnist.append( int_to_binstring( int( self.data[ ntype ][ idc ][ 1 ] ), 4 * 8 ) ) outnist.append( int_to_binstring( int( self.data[ ntype ][ idc ][ 2 ] ), 1 * 8 ) ) outnist.append( int_to_binstring( int( self.data[ ntype ][ idc ][ 3 ] ), 1 * 8 ) ) outnist.append( int_to_binstring( int( self.data[ ntype ][ idc ][ 4 ] ), 1 * 8 ) ) outnist.append( ( chr( 0xFF ) * 5 ) ) outnist.append( int_to_binstring( int( self.data[ ntype ][ idc ][ 5 ] ), 1 * 8 ) ) outnist.append( int_to_binstring( int( self.data[ ntype ][ idc ][ 6 ] ), 2 * 8 ) ) outnist.append( int_to_binstring( int( self.data[ ntype ][ idc ][ 7 ] ), 2 * 8 ) ) outnist.append( int_to_binstring( int( self.data[ ntype ][ idc ][ 8 ] ), 1 * 8 ) ) outnist.append( self.data[ ntype ][ idc ][ 999 ] ) else: od = OrderedDict( sorted( self.data[ ntype ][ idc ].items() ) ) outnist.append( join( GS, [ tagger( ntype, tagid ) + value for tagid, value in od.iteritems() ] ) + FS ) return "".join( outnist )
def clean(self): """ Function to clean all unused fields in the self.data variable. This function should check the content of the NIST file only for fields described in the NIST standard. For all particular implementations and implementation specific fields, overload this function in a new class. Check done in this function: * Delete all empty records, IDC and fields * Recalculate the content of the field 1.003 * Check the IDC field for every ntype (fields x.002) * Reset all lengths (fields x.001) """ debug.info("Cleaning the NIST object") # Delete all empty data. for ntype in self.get_ntype(): for idc in self.data[ntype].keys(): # Fields for tagid in self.data[ntype][idc].keys(): value = self.get_field("%d.%03d" % (ntype, tagid), idc) if value == "" or value == None: debug.debug( "Field %02d.%03d IDC %d deleted" % (ntype, tagid, idc), 1) del (self.data[ntype][idc][tagid]) # IDC if len(self.data[ntype][idc]) == 0: debug.debug("%02d IDC %d deleted" % (ntype, idc), 1) del (self.data[ntype][idc]) # ntype if len(self.data[ntype]) == 0: debug.debug("%02d deleted" % (ntype), 1) del (self.data[ntype]) # Recheck the content of the NIST object and udpate the 1.003 field content = [] for ntype in self.get_ntype()[1:]: for idc in self.get_idc(ntype): debug.debug("Type-%02d, IDC %d present" % (ntype, idc), 1) content.append("%s%s%s" % (ntype, US, idc)) content.insert(0, "%s%s%s" % (1, US, len(content))) self.set_field("1.003", join(RS, content)) # Check the IDC values for all records for ntype in self.get_ntype()[1:]: for idc in self.get_idc(ntype): debug.debug( "Type-%02d, IDC %d: update the IDC field (%02d.%03d)" % (ntype, idc, ntype, 2), 1) self.set_field((ntype, 2), idc, idc)
def write(self, outfile): """ Write the NIST object to a specific file. :param outfile: URI of the file to write to. :type outfile: str """ debug.info("Write the NIST object to '%s'" % outfile) if not os.path.isdir(os.path.dirname(os.path.realpath(outfile))): os.makedirs(os.path.dirname(os.path.realpath(outfile))) with open(outfile, "wb+") as fp: fp.write(self.dumpbin())
def load(self, data): """ Load from the data passed in parameter, and populate all internal dictionaries. This function is the main function doing the decoding of the NIST file. :param data: Raw data read from file. :type data: str """ debug.info("Loading object") records = data.split(FS) # NIST Type01 debug.debug("Type-01 parsing", 1) t01 = records[0].split(GS) record01 = {} ntypeInOrder = [] for field in t01: tag, ntype, tagid, value = fieldSplitter(field) if tagid == 1: LEN = int(value) if tagid == 3: ntypeInOrder = self.process_fileContent(value) debug.debug("%d.%03d:\t%s" % (ntype, tagid, value), 2) record01[tagid] = value self.data[1][ 0] = record01 # Store in IDC = 0 even if the standard implies no IDC for Type-01 data = data[LEN:] # NIST Type02 and after debug.debug("Expected Types : %s" % ", ".join(map(str, ntypeInOrder)), 1) for ntype in ntypeInOrder: debug.debug("Type-%02d parsing" % ntype, 1) LEN = 0 if ntype in [2, 9, 10, 13, 14, 15, 16, 17, 18, 19, 20, 21, 98, 99]: current_type = data.split(FS) tx = current_type[0].split(GS) recordx = {} offset = 0 idc = -1 for t in tx: try: tag, ntype, tagid, value = fieldSplitter(t) except: tagid = 999 tag = "%s.%s" % (ntype, tagid) if tagid == 1: LEN = int(value) elif tagid == 2: idc = int(value) elif tagid == 999: if ntype == 9: end = LEN else: end = LEN - 1 offset += len(tag) + 1 value = data[offset:end] debug.debug( "%d.%03d:\t%s" % (ntype, tagid, bindump(value)), 2) recordx[tagid] = value break debug.debug("%d.%03d:\t%s" % (ntype, tagid, value), 2) recordx[tagid] = value offset += len(t) + 1 self.data[ntype][idc] = recordx elif ntype == 4: iter = stringIterator(data) LEN = binstring_to_int(iter.take(4)) IDC = binstring_to_int(iter.take(1)) IMP = binstring_to_int(iter.take(1)) FGP = binstring_to_int(iter.take(1)) iter.take(5) ISR = binstring_to_int(iter.take(1)) HLL = binstring_to_int(iter.take(2)) VLL = binstring_to_int(iter.take(2)) GCA = binstring_to_int(iter.take(1)) DAT = iter.take(LEN - 18) LEN = str(LEN) IDC = str(IDC) IMP = str(IMP) FGP = str(FGP) ISR = str(ISR) HLL = str(HLL) VLL = str(VLL) GCA = str(GCA) debug.debug("Parsing Type-04 IDC %s" % IDC, 2) debug.debug("LEN: %s" % LEN, 3) debug.debug("IDC: %s" % IDC, 3) debug.debug("IMP: %s" % IMP, 3) debug.debug("FGP: %s" % FGP, 3) debug.debug("ISR: %s" % ISR, 3) debug.debug("HLL: %s" % HLL, 3) debug.debug("VLL: %s" % VLL, 3) debug.debug("GCA: %s (%s)" % (GCA, decode_gca(GCA)), 3) debug.debug("DAT: %s" % bindump(DAT), 3) nist04 = { 1: LEN, 2: IDC, 3: IMP, 4: FGP, 5: ISR, 6: HLL, 7: VLL, 8: GCA, 999: DAT } IDC = int(IDC) self.data[ntype][IDC] = nist04 LEN = int(LEN) else: debug.critical( boxer( "Unknown Type-%02d" % ntype, "The Type-%02d is not supported. It will be skipped in the pasing process. Contact the developer for more information." % ntype)) if data.startswith(str(ntype)): _, _, _, LEN = fieldSplitter(data[0:data.find(GS)]) LEN = int(LEN) else: LEN = binstring_to_int(data[0:4]) data = data[LEN:]
def dump(self, fullname=False, maxwidth=None): """ Return a readable version of the NIST object. Printable on screen. :param fullname: Get the fullname of the fields. :type fullname: boolean :return: Printable representation of the NIST object. :rtype: str Usage: >>> dump = n.dump() >>> print( dump ) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE Informations about the NIST object: Obj ID: Doctester NIST object Records: Type-01, Type-02 Class: NISTf <BLANKLINE> NIST Type-01 01.001 LEN: 00000136 01.002 VER: 0300 01.003 CNT: 1<US>1<RS>2<US>0 01.004 TOT: USA 01.005 DAT: ... 01.006 PRY: 1 01.007 DAI: FILE 01.008 ORI: UNIL 01.009 TCN: ... 01.011 NSR: 00.00 01.012 NTR: 00.00 NIST Type-02 (IDC 0) 02.001 LEN: 00000038 02.002 IDC: 0 02.004 : ... """ debug.info("Dumping NIST") self.clean() ret = [ "Informations about the NIST object:", ] if self.fileuri != None: ret.append(leveler("File: " + self.fileuri, 1)) if self.get_identifier() != None: ret.append(leveler("Obj ID: " + self.get_identifier(), 1)) ret.extend([ leveler( "Records: " + ", ".join(["Type-%02d" % x for x in self.get_ntype()]), 1), leveler("Class: " + self.__class__.__name__, 1), "" ]) for ntype in self.get_ntype(): debug.debug("NIST Type-%02d" % ntype, 1) for idc in self.get_idc(ntype): ret.append(self.dump_record(ntype, idc, fullname, maxwidth)) return join("\n", ret)