def test_extend(self): trace_count = 100 sample_count = 1000 with trsfile.open(self.tmp_path, 'w', padding_mode=TracePadding.AUTO) as trs_traces: # Extend empty list trs_traces.extend([]) self.assertEqual(len(trs_traces), 0) # Extend non empty list trs_traces.extend([ Trace( SampleCoding.FLOAT, [0] * sample_count, data = b'\x00' * 8 ) ] ) self.assertEqual(len(trs_traces), 1) # Extend non empty list trs_traces.extend([ Trace( SampleCoding.FLOAT, [0] * sample_count, data = i.to_bytes(8, byteorder='big') ) for i in range(0, trace_count)] ) self.assertEqual(len(trs_traces), trace_count + 1)
def test_extend(self): trace_count = 100 sample_count = 1000 with trsfile.open(self.tmp_path, 'w', padding_mode=TracePadding.AUTO) as trs_traces: # Extend empty list trs_traces.extend([]) self.assertEqual(len(trs_traces), 0) # Extend non empty list trs_traces.extend([ Trace( SampleCoding.FLOAT, [0] * sample_count, TraceParameterMap({'LEGACY_DATA': ByteArrayParameter(bytes(8))}) ) ] ) self.assertEqual(len(trs_traces), 1) # Extend non empty list trs_traces.extend([ Trace( SampleCoding.FLOAT, [0] * sample_count, TraceParameterMap({'LEGACY_DATA': ByteArrayParameter(i.to_bytes(8, byteorder='big'))}) ) for i in range(0, trace_count)] ) self.assertEqual(len(trs_traces), trace_count + 1)
def test_padding(self): trace_count = 100 sample_count = 1000 fmt = SampleCoding.FLOAT with trsfile.open(self.tmp_path, 'w', padding_mode=TracePadding.AUTO) as trs_traces: # This is the length everything should be padded/clipped to trs_traces.extend( Trace( fmt, b'\xDE' * (sample_count * fmt.size), TraceParameterMap({'LEGACY_DATA': ByteArrayParameter(bytes(8))}) ) ) # Padding mode trs_traces.extend([ Trace( fmt, b'\xDE' * (sample_count + i) * fmt.size, TraceParameterMap({'LEGACY_DATA': ByteArrayParameter(abs(i).to_bytes(8, byteorder='big'))}) ) for i in range(0, -trace_count, -1)] ) # Clipping mode trs_traces.extend([ Trace( fmt, b'\xDE' * (sample_count + i) * fmt.size, TraceParameterMap({'LEGACY_DATA': ByteArrayParameter(i.to_bytes(8, byteorder='big'))}) ) for i in range(0, trace_count)] ) with trsfile.open(self.tmp_path, 'r') as trs_traces: self.assertEqual(len(trs_traces), trace_count * 2 + 1) # Check that all traces are of the same size for trs_trace in trs_traces: self.assertEqual(len(trs_trace), sample_count) # Check that all padding is zero for i, trs_trace in enumerate(trs_traces[1:101]): # Difficult case :) if i == 0: continue for si, sample in enumerate(trs_trace[-i:]): self.assertEqual(sample, 0.0 if fmt == SampleCoding.FLOAT else 0, str(i)) # Test that this is indeed not zero self.assertNotEqual(trs_trace[-i - 1], 0)
def test_padding_none(self): sample_count = 1000 with trsfile.open( self.tmp_path, 'w', padding_mode = TracePadding.NONE, headers = { Header.NUMBER_SAMPLES: sample_count, Header.LENGTH_DATA: 8, Header.SAMPLE_CODING: SampleCoding.FLOAT } ) as trs_traces: # This is the length of the trace trs_traces.extend( Trace( SampleCoding.FLOAT, [0] * sample_count, TraceParameterMap({'LEGACY_DATA': ByteArrayParameter(bytes(8))}) ) ) # Length is smaller with self.assertRaises(ValueError): trs_traces.extend( Trace( SampleCoding.FLOAT, [0] * (sample_count - 1), TraceParameterMap({'LEGACY_DATA': ByteArrayParameter(b'\x10' * 8)}) ) ) self.assertEqual(len(trs_traces), 1) # Length is bigger with self.assertRaises(ValueError): trs_traces.extend( Trace( SampleCoding.FLOAT, [0] * (sample_count + 1), TraceParameterMap({'LEGACY_DATA': ByteArrayParameter(b'\x01' * 8)}) ) ) self.assertEqual(len(trs_traces), 1) # Length is equal trs_traces.extend( Trace( SampleCoding.FLOAT, [0] * sample_count, TraceParameterMap({'LEGACY_DATA': ByteArrayParameter(bytes(8))}) ) ) self.assertEqual(len(trs_traces), 2)
def test_read(self): trace_count = 100 sample_count = 1000 original_traces = [ Trace( SampleCoding.FLOAT, [get_sample(i) for i in range(0, sample_count)], TraceParameterMap() ) for i in range(0, trace_count) ] # Create a trace with trsfile.open(self.tmp_path, 'w', headers = { Header.LABEL_X: 'Testing X', Header.LABEL_Y: 'Testing Y', Header.DESCRIPTION: 'Testing trace creation', }, padding_mode = TracePadding.AUTO ) as trs_traces: trs_traces.extend(original_traces) # Make sure length is equal self.assertEqual(len(original_traces), len(trs_traces)) # Read the trace and check if everything is good with trsfile.open(self.tmp_path, 'r') as trs_traces: # Check if lengths are still good :) self.assertEqual(len(original_traces), len(trs_traces)) # Check if every trace is saved correctly for original_trace, trs_trace in zip(trs_traces, original_traces): self.assertEqual(original_trace, trs_trace)
def test_append(self): trace_count = 100 sample_count = 1000 # Append to a non-existing file, behaves same as normal "write" with trsfile.open(self.tmp_path, 'a', padding_mode=TracePadding.AUTO) as trs_traces: self.assertEqual(len(trs_traces), 0) # Extend the trace file with 100 traces with each 1000 samples trs_traces.extend([ Trace( SampleCoding.FLOAT, [0] * sample_count, TraceParameterMap({'LEGACY_DATA': ByteArrayParameter(i.to_bytes(8, byteorder='big'))}) ) for i in range(0, trace_count)] ) self.assertEqual(len(trs_traces), trace_count) # Now open and close for a few times while adding some number of traces expected_length = trace_count for t in range(0, 10): trace_count = (t + 1) * 10 with trsfile.open(self.tmp_path, 'a', padding_mode=TracePadding.AUTO) as trs_traces: self.assertEqual(len(trs_traces), expected_length) # Extend the trace file with 100 traces with each 1000 samples trs_traces.extend([ Trace( SampleCoding.FLOAT, [0] * sample_count, TraceParameterMap({'LEGACY_DATA': ByteArrayParameter(i.to_bytes(8, byteorder='big'))}) ) for i in range(0, trace_count)] ) expected_length += trace_count self.assertEqual(len(trs_traces), expected_length)
def sha256(word, trs_file): assert len(word) == 16, "16 words are needed for full cycle of sha-256" input_word = [item for item in word] h0 = 0x6a09e667 h1 = 0xbb67ae85 h2 = 0x3c6ef372 h3 = 0xa54ff53a h4 = 0x510e527f h5 = 0x9b05688c h6 = 0x1f83d9ab h7 = 0x5be0cd19 a, b, c, d, e, f, g, h = h0, h1, h2, h3, h4, h5, h6, h7 # extend words to 64 rounds for i in range(16, 64): s0 = (RR(word[i - 15], 7) ^ RR(word[i - 15], 18) ^ (word[i - 15] >> 3)) & mask32bit s1 = (RR(word[i - 2], 17) ^ RR(word[i - 2], 19) ^ (word[i - 2] >> 10)) & mask32bit word.append((word[i - 16] + s0 + word[i - 7] + s1) & mask32bit) # main cycle one_trace = [] for i in range(64): a, b, c, d, e, f, g, h, hamming_distance = do_round( a, b, c, d, e, f, g, h, K[i], word[i], bool(i % 2)) if i % 2 == 0: one_trace.append(hamming_distance) h0 = (h0 + a) & mask32bit h1 = (h1 + b) & mask32bit h2 = (h2 + c) & mask32bit h3 = (h3 + d) & mask32bit h4 = (h4 + e) & mask32bit h5 = (h5 + f) & mask32bit h6 = (h6 + g) & mask32bit h7 = (h7 + h) & mask32bit trs_file.append( Trace( SampleCoding.INT, one_trace, bytes( word2bytes(input_word) + word2bytes([h0, h1, h2, h3, h4, h5, h6, h7])))) return h0, h1, h2, h3, h4, h5, h6, h7,
def test_write_closed(self): trace_count = 100 sample_count = 1000 with trsfile.open(self.tmp_path, 'w', padding_mode=TracePadding.AUTO) as trs_traces: trs_traces.extend([ Trace( SampleCoding.FLOAT, [0] * sample_count, TraceParameterMap({'LEGACY_DATA': ByteArrayParameter(i.to_bytes(8, byteorder='big'))}) ) for i in range(0, trace_count)] ) # Should raise an "ValueError: I/O operation on closed trace set" with self.assertRaises(ValueError): print(trs_traces)
def test_write(self): trace_count = 100 sample_count = 1000 try: with trsfile.open(self.tmp_path, 'w', headers = { Header.LABEL_X: 'Testing X', Header.LABEL_Y: 'Testing Y', Header.DESCRIPTION: 'Testing trace creation', }, padding_mode = TracePadding.AUTO) as trs_traces: trs_traces.extend([ Trace( SampleCoding.FLOAT, [0] * sample_count, TraceParameterMap({'LEGACY_DATA': ByteArrayParameter(i.to_bytes(8, byteorder='big'))}) ) for i in range(0, trace_count)] ) except Exception as e: self.fail('Exception occurred: ' + str(e))
def test_write(self): trace_count = 100 sample_count = 1000 try: with trsfile.open(self.tmp_path, 'w', headers = { Header.LABEL_X: 'Testing X', Header.LABEL_Y: 'Testing Y', Header.DESCRIPTION: 'Testing trace creation', }, padding_mode = TracePadding.AUTO) as trs_traces: trs_traces.extend([ Trace( SampleCoding.FLOAT, [0] * sample_count, data = i.to_bytes(8, byteorder='big') ) for i in range(0, trace_count)] ) except Exception: self.assertTrue(False)
def test_exclusive(self): trace_count = 100 sample_count = 1000 # Write to file exclusively with trsfile.open(self.tmp_path, 'x', padding_mode=TracePadding.AUTO) as trs_traces: self.assertEqual(len(trs_traces), 0) # Extend the trace file with 100 traces with each 1000 samples trs_traces.extend([ Trace( SampleCoding.FLOAT, [0] * sample_count, TraceParameterMap({'LEGACY_DATA': ByteArrayParameter(i.to_bytes(8, byteorder='big'))}) ) for i in range(0, trace_count)] ) self.assertEqual(len(trs_traces), trace_count) # Now try again (this should throw an exception) with self.assertRaises(FileExistsError): with trsfile.open(self.tmp_path, 'x') as trs_traces: self.assertEqual(len(trs_traces), trace_count)
#print('processing '+filename) # reading matlab files in python, using scipy: # https://docs.scipy.org/doc/scipy/reference/tutorial/io.html matfile = loadmat(INCOMING_FOLDER + '/' + filename) # this code returns a NUMPY ndarray: # https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html trace = matfile['trace'] # for debug #print('Trace: {0:d} samples, type {1}, name {2}'.format(matfile['trace'].size, filename, matfile['trace'].dtype)) #print(' initial 10 samples: {0}'.format(trace[0:10])) # pdb.set_trace() # for debug. continue with 'c' # saves data as TRS file trs_file.append( Trace( SampleCoding.FLOAT, trace[0].tolist( ), #trsfile expects a list object, matfile returns a numpy ndArray title=filename)) # display a message each 100 traces processed ntraces += 1 if ntraces % 100 == 0 or ntraces < 10: print(' ' + str(ntraces) + ' traces processed !') print('done!')
# Adding one Trace # trs_file.append( # Trace( # SampleCoding.FLOAT, # [random.uniform(-255, 255) for _ in range(0, 1000)], # data = os.urandom(16) # ) # ) # Adding one Trace #trs_file.append( trs_file.extend( Trace( SampleCoding.INT, [1, 10], # data = os.urandom(16), # data = b"\x91+\x98'Q\xfaw\xe4\xbcM;!\x0e\xb5\xaf\xca", title='INTEIRO')) # We cannot delete traces with the TrsEngine, other engines do support this feature #del trs_file[40:50] # We can only change headers with a value that has the same length as the previous value # with the TrsEngine, other engines can support dynamically adding, deleting or changing # headers. #trs_file.update_header(Header.LABEL_X, 'Time') #trs_file.update_header(Header.LABEL_Y, 'Voltage') #trs_file.update_header(Header.DESCRIPTION, 'Traces created for some purpose!') print('Total length of new trace set: {0:d}'.format(len(trs_file)))