def test_inc_zeropage(self): tokens = [ self._token('INC', lexer.TOK_OPCODE), self._token('$00', lexer.TOK_OPER_ZP) ] codes = opcodes.Opcodes(tokens) self.assertEqual(b'\xe6\x00', codes.as_bytes())
def test_ascii(self): tokens = [ self._token('LDA', lexer.TOK_OPCODE), self._token("'A'", lexer.TOK_ASCII) ] codes = opcodes.Opcodes(tokens) assembly = codes.as_bytes() self.assertEqual(b'\xA9\x41', assembly)
def test_brk(self): # arrange tokens = [self._token('BRK', lexer.TOK_OPCODE)] # act codes = opcodes.Opcodes(tokens) assembly = codes.as_bytes() # assert self.assertEqual(b'\x00', assembly)
def test_nop(self): # arrange tokens = [self._token('NOP', 'OPCODE')] # act codes = opcodes.Opcodes(tokens) assembly = codes.as_bytes() # assert self.assertEqual(assembly, b'\xea')
def test_jmp_label(self): # arrange tokens = [ self._token('JMP', lexer.TOK_OPCODE), self._token(':loop1', lexer.TOK_LABEL) ] # act codes = opcodes.Opcodes(tokens, {":loop1": "$8004"}) # assert self.assertEqual(b'\x4c\x04\x80', codes.as_bytes())
def test_jmp_absolute(self): # arrange tokens = [ self._token('JMP', lexer.TOK_OPCODE), self._token('$8004', lexer.TOK_OPER_ABSOLUTE) ] # act codes = opcodes.Opcodes(tokens) # assert self.assertEqual(b'\x4c\x04\x80', codes.as_bytes())
def test_length(self): # arrange tokens = [ self._token('JMP', lexer.TOK_OPCODE), self._token('$8004', lexer.TOK_OPER_ABSOLUTE) ] # act codes = opcodes.Opcodes(tokens) # assert self.assertEqual(3, codes.length())
def test_sta_zeropage(self): # arrange tokens = [ self._token('STA', lexer.TOK_OPCODE), self._token('$00', lexer.TOK_OPER_ZP) ] # act codes = opcodes.Opcodes(tokens) assembly = codes.as_bytes() # assert self.assertEqual(b'\x85\x00', assembly)
def test_lda_direct(self): # arrange tokens = [ self._token('LDA', lexer.TOK_OPCODE), self._token('#$1', lexer.TOK_OPER_IMMEDIATE) ] # act codes = opcodes.Opcodes(tokens) assembly = codes.as_bytes() # assert self.assertEqual(b'\xA9\x01', assembly)
def test_sta_absolute(self): # arrange tokens = [ self._token('STA', lexer.TOK_OPCODE), self._token('$7ffa', lexer.TOK_OPER_ABSOLUTE) ] # act codes = opcodes.Opcodes(tokens) assembly = codes.as_bytes() # assert self.assertEqual(b'\x8d\xfa\x7f', assembly)
with open(args.outputfile, 'wb') as ofile: mylexer = lexer.AsmLexer() lineno = 1 address = starting_address line = '' labels = {} linenumber = 1 try: first_opcode_address = None # first process labels and string names for line in lines: result = list(mylexer.tokenize(line)) codes = opcodes.Opcodes(result) if len(result) > 0 and (result[0].type == lexer.TOK_LABEL or result[0].type == lexer.TOK_STRINGNAME): labels[result[0].value] = f"{address:04x}" if len(result) > 0 and result[0].type == lexer.TOK_OPCODE and first_opcode_address == None: first_opcode_address = address address += codes.length() linenumber += 1 if first_opcode_address == None: raise SyntaxError('No opcodes found. This ASM file is useless.') # then preprocess the lines, replacing all labels and string names with their hex operand
def test_to_bytes_two_byte(self): codes = opcodes.Opcodes(None) b = codes._to_bytes('$7ffa') self.assertEqual(b'\xfa\x7f', b)
def test_to_bytes_one_byte(self): codes = opcodes.Opcodes(None) b = codes._to_bytes('#$1') self.assertEqual(b'\x01', b)
def test_empty(self): tokens = [] codes = opcodes.Opcodes(tokens) self.assertEqual(b'', codes.as_bytes())