def mnemonic_int(operands, errors=None): opcode = None opcode_operands = bytearray() if validate_operands_count(operands, 1, errors): operand = operands[0].lower() if data.is_valid_str(operand) or data.is_valid_chr(operand): if errors is not None: errors.append({ 'name': 'UNSUPPORTED_OPERAND', 'info': [operand] }) elif validate_operand_data_size(operand, 8, errors): # call interrupt is supported with max. 8-bit data with a value up to 63 (for 64 interrupts) opcode = 0b11011111 data_value = data.get_value(operand) if data_value > 63: if errors is not None: errors.append({ 'name': 'INVALID_INT', 'info': [data_value] }) else: opcode_operands.append(data_value) if errors: return None else: machine_code = bytearray() machine_code.append(opcode) machine_code.extend(opcode_operands) return {'machine_code': machine_code, 'relocation_table': []}
def get_addr_value(addr): if is_valid_name(addr): return None # this will then add an item to the relocation table elif is_valid_addr(addr): return data.get_value(addr) else: return None
def read_flags_file(file_name, errors=None): global current_file_name, current_line_num, current_line_str if os.path.isfile(file_name): flags = {} flags_bits = 0 with open(file_name, 'r') as file: current_file_name = file_name line_num = 0 current_line_num = line_num for line_str in file.readlines(): current_line_str = line_str line_num += 1 current_line_num = line_num line_str = line_str.strip() if line_str: i = line_str.split(';') flag_name = i[0].strip() if is_valid_name(flag_name): if len(i) > 1: flag_value = i[1].strip() if data.is_valid( flag_value ) and not data.is_valid_str(flag_value): flag_value = data.get_value(flag_value) else: if errors is not None: errors.append({ 'name': 'INVALID_FLAG_VALUE', 'info': [flag_value] }) return None, None else: flag_value = int(math.pow(2, flags_bits)) flags[flag_name] = flag_value flags_bits = max(flags_bits, flag_value.bit_length()) else: if errors is not None: errors.append({ 'name': 'INVALID_FLAG_NAME', 'info': [flag_name] }) return None, None return flags, flags_bits else: if errors is not None: errors.append({'name': 'FILE_NOT_FOUND', 'info': [file_name]}) return None, None
def directive_base(operands, errors=None): global link_base if link_base is not None: if errors is not None: errors.append({'name': 'DUPLICATE_DIRECTIVE', 'info': ['base']}) elif validate_operands_count(operands, 1, errors): operand = operands[0] if is_valid_name(operand): if errors is not None: errors.append({ 'name': 'UNSUPPORTED_OPERAND', 'info': [operand] }) elif validate_operand_addr_size(operand, 16, errors): link_base = data.get_value(operand)
def mnemonics_add_sub_cmp_adc_sbb_and_or_xor(mnemonic, operands, errors=None): opcode = None opcode_operands = bytearray() if validate_operands_count(operands, 1, errors): # add (with carry), subtract (with borrow), compare, and, or and xor are supported with M, any 8-bit register or # max. 8-bit data or a single character but no string operand = operands[0].lower() if 'm' == operand: opcode = 0b110 elif is_valid_register(operand): if validate_operand_register_size(operand, 8, errors): register_opcode = get_register_opcode(operand) opcode = register_opcode elif data.is_valid_str(operand): if errors is not None: errors.append({'name': 'INCOMPATIBLE_DATA_TYPE', 'info': []}) elif validate_operand_data_size(operand, 8, errors): opcode = 0b111 data_value = data.get_value(operand) opcode_operands.append(data_value) if opcode is not None: if 'add' == mnemonic: opcode = 0b01100000 | (opcode << 1) elif 'sub' == mnemonic: opcode = 0b01100001 | (opcode << 1) elif 'cmp' == mnemonic: opcode = 0b01110000 | (opcode << 1) elif 'adc' == mnemonic: opcode = 0b01010000 | opcode elif 'sbb' == mnemonic: opcode = 0b01011000 | opcode elif 'and' == mnemonic: opcode = 0b00110000 | opcode elif 'or' == mnemonic: opcode = 0b00111000 | opcode elif 'xor' == mnemonic: opcode = 0b01000000 | opcode if errors: return None else: machine_code = bytearray() machine_code.append(opcode) machine_code.extend(opcode_operands) return {'machine_code': machine_code, 'relocation_table': []}
from __future__ import print_function, division, absolute_import from data import get_value import tensorflow as tf import edward as ed from edward.models import Beta, Bernoulli theta = Beta(a=1.0, b=1.0) # 100-dimensional Bernoulli x = Bernoulli(p=tf.ones(12) * theta) # ====== sampling from each marginal variables theta_sample = theta.sample() x_sample = x.sample() print("Marginal theta samples:", get_value(theta_sample)) print("Marginal X samples:", get_value(x_sample)) # ====== sampling from the joint distribution samples = get_value([x.value(), theta.value()]) print("From joint distribution:") print("- X:", samples[0]) print("- theta:", samples[1])
def mnemonics_db_dw(mnemonic, operands, errors=None): opcode_operands = bytearray() _relocation_table = [] if operands: for operand in operands: operand_splits = operand.rsplit(None, 3) if len(operand_splits) == 4 and '(' == operand_splits[ -3] and ')' == operand_splits[-1]: operand = operand_splits[0] multiplier = operand_splits[-2] if data.is_valid_str(multiplier) or data.is_valid_chr( multiplier): if errors is not None: errors.append({ 'name': 'UNSUPPORTED_MULTIPLIER', 'info': [multiplier] }) opcode_operands.clear() break elif data.get_size(multiplier) is None or data.get_value( multiplier) < 1: if errors is not None: errors.append({ 'name': 'INVALID_MULTIPLIER', 'info': [multiplier] }) opcode_operands.clear() break elif data.get_size(multiplier) > 16: if errors is not None: errors.append({ 'name': 'UNSUPPORTED_MULTIPLIER_SIZE', 'info': [data.get_size(multiplier), 16] }) opcode_operands.clear() break multiplier_value = data.get_value(multiplier) else: multiplier_value = 1 if 'db' == mnemonic: # bytes support max. 8-bit data, a single character or a string if validate_operand_data_size(operand, 8, errors): if data.is_valid_str(operand): data_values = data.get_value( operand) * multiplier_value opcode_operands.extend(data_values) else: data_value = data.get_value(operand) opcode_operands.extend([data_value] * multiplier_value) else: opcode_operands.clear() break elif 'dw' == mnemonic: # words support a symbol name (using relocation), max. 16-bit data, a single character or a string both # including unicode if is_valid_name(operand): operand = expand_local_symbol_name(operand) opcode_operands.extend([0, 0]) _relocation_table.append({ 'machine_code_offset': len(opcode_operands) - 2, 'symbol_index': symbol_table.get_index(operand) }) elif validate_operand_data_size(operand, 16, errors): if data.is_valid_str(operand): data_values = data.get_value( operand) * multiplier_value for data_value in data_values: opcode_operands.extend( binutils.word_to_le(data_value)) else: data_value = data.get_value(operand) opcode_operands.extend( binutils.word_to_le(data_value) * multiplier_value) else: opcode_operands.clear() break else: if errors is not None: errors.append({'name': 'NO_DATA', 'info': []}) if errors: return None else: machine_code = bytearray() machine_code.extend(opcode_operands) return { 'machine_code': machine_code, 'relocation_table': _relocation_table }
def mnemonic_mov(operands, errors=None): opcode = None opcode_operands = bytearray() _relocation_table = [] if validate_operands_count(operands, 2, errors): operand1 = operands[0].lower() operand2 = operands[1].lower() if 'm' == operand1: # move into M is only supported from an 8-bit register register1_opcode = 0b110 if validate_operand_register_size(operand2, 8, errors): register2_opcode = get_register_opcode(operand2) opcode = 0b10000000 | (register1_opcode << 4) | ( register2_opcode << 1) elif validate_operand_register(operand1, errors): register1_size = get_register_size(operand1) register1_opcode = get_register_opcode(operand1) if 8 == register1_size: # move into an 8-bit register is supported from M, another 8-bit register or using max. 8-bit data or a # single character but no string register2_opcode = None if 'm' == operand2: register2_opcode = 0b110 elif is_valid_register(operand2): if validate_operand_register_size(operand2, register1_size, errors): register2_opcode = get_register_opcode(operand2) elif data.is_valid_str(operand2): if errors is not None: errors.append({ 'name': 'INCOMPATIBLE_DATA_TYPE', 'info': [] }) elif validate_operand_data_size(operand2, register1_size, errors): register2_opcode = 0b111 data_value = data.get_value(operand2) opcode_operands.append(data_value) if register2_opcode is not None: opcode = 0b10000000 | (register1_opcode << 4) | ( register2_opcode << 1) elif 16 == register1_size: # move into a 16-bit register is supported from a symbol name (using relocation), another 16-bit # register or using max. 16-bit data or a single character including unicode but no string register2_opcode = None if is_valid_name(operand2): operand2 = expand_local_symbol_name(operand2) register2_opcode = 0b111 opcode_operands.extend([0, 0]) _relocation_table.append({ 'machine_code_offset': 1, 'symbol_index': symbol_table.get_index(operand2) }) elif is_valid_register(operand2): if validate_operand_register_size(operand2, register1_size, errors): register2_opcode = get_register_opcode(operand2) elif data.is_valid_str(operand2): errors.append({ 'name': 'INCOMPATIBLE_DATA_TYPE', 'info': [] }) elif validate_operand_data_size(operand2, register1_size, errors): register2_opcode = 0b111 data_value = data.get_value(operand2) opcode_operands.extend(binutils.word_to_le(data_value)) if register2_opcode is not None: opcode = (register1_opcode << 4) | (register2_opcode << 1) if errors: return None else: machine_code = bytearray() machine_code.append(opcode) machine_code.extend(opcode_operands) return { 'machine_code': machine_code, 'relocation_table': _relocation_table }
) # shape=(M, 2) # ====== ellipticity of the galaxies ====== # # e ~ Normal(∑ 1 / distance * log(mass), sigma) # I don't know what is the value of sigma, so why not give it # an Uniform distribution, we give each galaxy a different sigma :D sigma = Uniform( a=np.full(shape=(M, 2), fill_value=0.12, dtype='float32'), b=np.full(shape=(M, 2), fill_value=0.33, dtype='float32') ) galaxies_elp = Normal( mu=mean, sigma=sigma, ) # ====== happy sampling ====== # galXY, halXY, halMAS, galE, sigma = get_value( [galaxies_pos.value(), halos_pos.value(), halos_mass.value(), galaxies_elp.value(), sigma.value()] ) print("Galaxies position:", galXY.shape) print("Galaxies ellipticity:", galE.shape) print("Halos position:", halXY.shape) print("Halos mass:", halMAS.shape) print("Sigma:", sigma.shape) # ====== visualize the generated sky ====== # plt.figure(figsize=(8, 8), dpi=180) draw_sky(galaxies=np.concatenate([galXY, galE], axis=-1), halos=[N] + halXY.ravel().tolist()) plt.show(block=True)
def parse_csv_line(line_str, errors=None): columns = line_str.split(';') addr_value = '' data_value = '' for column, bits in addr_config.items(): if column <= len(columns): column_value = columns[column - 1].strip() if data.is_valid( column_value) and not data.is_valid_str(column_value): column_value = format(data.get_value(column_value), 'b') if bits is not None: column_value = column_value.zfill(bits) elif is_valid_bits(column_value): column_value = get_bits_value(column_value) if bits is not None: column_value = column_value.zfill(bits) else: if errors is not None: errors.append({ 'name': 'INVALID_ADDR_VALUE', 'info': [column_value] }) return None, None addr_value += column_value else: if errors is not None: errors.append({ 'name': 'ADDR_COLUMN_NOT_FOUND', 'info': [column] }) return None, None if addr_config_bits is not None and len(addr_value) > addr_config_bits: if errors is not None: errors.append({ 'name': 'INCOMPATIBLE_ADDR_SIZE', 'info': [len(addr_value), addr_config_bits] }) return None, None if data_config_column <= len(columns): column_value = columns[data_config_column - 1].strip() if data.is_valid(column_value) and not data.is_valid_str(column_value): column_value = format(data.get_value(column_value), 'b') if data_config_bits is not None: column_value = column_value.zfill(data_config_bits) elif data_config_flags: flag_names = column_value.split(',') column_value = 0 for flag_name in flag_names: flag_name = flag_name.strip() if flag_exists(flag_name): column_value |= get_flag_value(flag_name) else: if errors is not None: errors.append({ 'name': 'UNKNOWN_FLAG', 'info': [flag_name] }) return None, None column_value = format(column_value, 'b').zfill(data_config_bits) else: if errors is not None: errors.append({ 'name': 'INVALID_DATA_VALUE', 'info': [column_value] }) return None, None data_value = column_value else: if errors is not None: errors.append({ 'name': 'DATA_COLUMN_NOT_FOUND', 'info': [data_config_column] }) return None, None if data_config_bits is not None and len(data_value) > data_config_bits: if errors is not None: errors.append({ 'name': 'INCOMPATIBLE_DATA_SIZE', 'info': [len(data_value), data_config_bits] }) return None, None return addr_value, data_value
def parse_zero_fill(zero_fill_str): if data.is_valid(zero_fill_str) and not data.is_valid_str(zero_fill_str): return data.get_value(zero_fill_str) else: show_error({'name': 'INVALID_ZERO_FILL', 'info': [zero_fill_str]}) return None