예제 #1
0
    def make_tick(self):
        updates = OrderedDict()
        updates.update(self.update())
        self.theano_tick = theano.function([], [], updates=updates)

        # introduce 1-time-tick delay
        for o in self.origin.values():
            if o.func is not None and self.mode == 'direct': continue
            o.tick()
예제 #2
0
    def update(self, dt):
        """Update the input and output of all the theano variables.

        """

        updates = OrderedDict()

        for input in self.input.values():
            updates.update(input.update(dt))

        for origin in self.origin.values():
            if origin.func is None:
                updates.update({origin.decoded_output: origin.method()})

        return updates
예제 #3
0
    def update(self, dt):
        """Update the input and output of all the theano variables.

        """
        
        updates = OrderedDict()

        for input in self.input.values():
            updates.update(input.update(dt))

        for origin in self.origin.values():
            if origin.func is None: 
                updates.update(
                    {origin.decoded_output: origin.method()})

        return updates
예제 #4
0
    def make_theano_tick(self):
        """Generate the theano function for running the network simulation.
        
        :returns: theano function
        """
        # dictionary for all variables
        # and the theano description of how to compute them 
        updates = OrderedDict()

        # for every node in the network
        for node in self.nodes.values():
            # if there is some variable to update
            if hasattr(node, 'update'):
                # add it to the list of variables to update every time step
                updates.update(node.update(self.dt))

        # create graph and return optimized update function
        return theano.function([], [], updates=updates.items())#, mode='ProfileMode')
예제 #5
0
def schema2dict(xmlcode: str) -> Dict:
    elements_dict = OrderedDict()
    tree = etree.fromstring(xmlcode)
    ns_dict = {_globals.schemainfo.ns_prefix: _globals.schemainfo.ns}
    _globals.schemainfo.ns_dict = ns_dict
    # resource (entity)
    resource, resource_dict = parse_resource(
        tree=tree,
        resource_root_xpath='.//xs:element[@name="resource"]'
    )
    elements_dict.update(resource_dict)
    # pprint(elements_dict)

    # properties
    for prop in resource.findall('./xs:complexType/xs:all/xs:element',
                                 namespaces=_globals.schemainfo.ns_dict):
        prop_dict = parse_property(tree=tree, prop_el=prop)
        elements_dict.update(prop_dict)
    return elements_dict
예제 #6
0
    def make_theano_tick(self):
        """Generate the theano function for running the network simulation.
        
        :returns: theano function
        """

        # dictionary for all variables
        # and the theano description of how to compute them
        updates = OrderedDict()

        # for every node in the network
        for node in self.nodes.values():
            # if there is some variable to update
            if hasattr(node, 'update'):
                # add it to the list of variables to update every time step
                updates.update(node.update(self.dt))

        # create graph and return optimized update function
        return theano.function([], [], updates=updates.items())
예제 #7
0
def writer(cls, argv):
    conf_file_path = get_env_file_path(cls)
    if os.path.exists(conf_file_path):
        raise Exception(f"Found {conf_file_path} existing already")

    vals = OrderedDict()
    vals.update(_normalize_prefix(cls._default_prefix))
    for k in cls.__dict__.keys():
        if k in os.environ:
            vals[k] = os.environ[k]

    for arg in argv:
        if "=" in arg:
            k, val = arg.split("=")
            vals[k] = val

    dump = os.environ.get("VARS_DUMP",
                          cls._dump)  # type: Union[bool, List[str]]
    if dump:
        for k in cls.__dict__.keys():
            if isinstance(dump, (list, tuple)) and k not in dump:
                continue
            if k not in vals and not k.startswith("_"):
                vals[k] = getattr(cls, k)

    if vals:
        ordered_vals = OrderedDict()
        for k in cls.__dict__.keys():
            if k in vals:
                ordered_vals[k] = vals.pop(k)

        ordered_vals.update(vals)
        _write(conf_file_path, ordered_vals)
    else:
        log.info("Dynamic-Conf: No variables available.")  # pragma: no cover

    return vals
예제 #8
0
    def update(self, dt):
        """Compute the set of theano updates needed for this ensemble.

        Returns a dictionary with new neuron state,
        termination, and origin values.

        :param float dt: the timestep of the update
        """
        
        ### find the total input current to this population of neurons

        # set up matrix to store accumulated decoded input
        X = None 
        # updates is an ordered dictionary of theano variables to update
        updates = OrderedDict()

        for ii, di in enumerate(self.decoded_input.values()):
            # add its values to the total decoded input
            if ii == 0: X = di.value
            else: X += di.value
            updates.update(di.update(dt))

        # if we're in spiking mode, then look at the input current and 
        # calculate new neuron activities for output
        if self.mode == 'spiking':

            # apply respective biases to neurons in the population 
            J = TT.as_tensor_variable(np.array(self.bias))

            for ei in self.encoded_input.values():
                # add its values directly to the input current
                J += (ei.value.T * self.alpha.T).T
                updates.update(ei.update(dt))

            # only do this if there is decoded_input
            if X is not None:
                # add to input current for each neuron as
                # represented input signal x preferred direction

                for i in range(self.array_size): #len(self.bias)):
                    J = TT.basic.inc_subtensor(J[i], 
                        TT.dot(X[i], self.shared_encoders[i].T))

            # if noise has been specified for this neuron,
            if self.noise: 
                # generate random noise values, one for each input_current element, 
                # with standard deviation = sqrt(self.noise=std**2)
                # When simulating white noise, the noise process must be scaled by
                # sqrt(dt) instead of dt. Hence, we divide the std by sqrt(dt).
                if self.noise_type.lower() == 'gaussian':
                    J += self.srng.normal(
                        size=self.bias.shape, std=np.sqrt(self.noise/dt))
                elif self.noise_type.lower() == 'uniform':
                    J += self.srng.uniform(
                        size=self.bias.shape, 
                        low=-self.noise/np.sqrt(dt), 
                        high=self.noise/np.sqrt(dt))

            # pass that total into the neuron model to produce
            # the main theano computation
            updates.update(self.neurons.update(J, dt))
        
            for l in self.learned_terminations:
                # also update the weight matrices on learned terminations
                updates.update(l.update(dt))

            # and compute the decoded origin decoded_input from the neuron output
            for o in self.origin.values():
                updates.update(o.update(dt, updates[self.neurons.output]))

        if self.mode == 'direct': 

            # if we're in direct mode then just directly pass the decoded_input 
            # to the origins for decoded_output
            for o in self.origin.values(): 
                if o.func is None:
                    if len(self.decoded_input) > 0:
                        updates.update(OrderedDict({o.decoded_output: 
                            TT.flatten(X).astype('float32')}))
        return updates
예제 #9
0
    def update(self):
        """Compute the set of theano updates needed for this ensemble.

        Returns a dictionary with new neuron state,
        termination, and origin values.
        """

        ### find the total input current to this population of neurons

        # set up matrix to store accumulated decoded input
        X = None
        # updates is an ordered dictionary of theano variables to update
        updates = OrderedDict()

        for ii, di in enumerate(self.decoded_input.values()):
            # add its values to the total decoded input
            if ii == 0:
                X = di.value
            else:
                X += di.value

            updates.update(di.update(self.dt))

        # if we're in spiking mode, then look at the input current and
        # calculate new neuron activities for output
        if self.mode == 'spiking':

            # apply respective biases to neurons in the population
            J = TT.as_tensor_variable(np.array(self.bias))

            for ei in self.encoded_input.values():
                # add its values directly to the input current
                J += (ei.value.T * self.alpha.T).T
                updates.update(ei.update(self.dt))

            # only do this if there is decoded_input
            if X is not None:
                # add to input current for each neuron as
                # represented input signal x preferred direction
                J = map_gemv(1.0, self.shared_encoders, X, 1.0, J)

            # if noise has been specified for this neuron,
            if self.noise:
                # generate random noise values, one for each input_current element,
                # with standard deviation = sqrt(self.noise=std**2)
                # When simulating white noise, the noise process must be scaled by
                # sqrt(dt) instead of dt. Hence, we divide the std by sqrt(dt).
                if self.noise_type.lower() == 'gaussian':
                    J += self.srng.normal(
                        size=self.bias.shape, std=np.sqrt(self.noise/self.dt))
                elif self.noise_type.lower() == 'uniform':
                    J += self.srng.uniform(
                        size=self.bias.shape,
                        low=-self.noise/np.sqrt(self.dt),
                        high=self.noise/np.sqrt(self.dt))

            # pass that total into the neuron model to produce
            # the main theano computation
            updates.update(self.neurons.update(J, self.dt))

            for l in self.learned_terminations:
                # also update the weight matrices on learned terminations
                updates.update(l.update(self.dt))

            # and compute the decoded origin decoded_input from the neuron output
            for o in self.origin.values():
                updates.update(o.update(self.dt, updates[self.neurons.output]))

        if self.mode == 'direct':
            # if we're in direct mode then just directly pass the decoded_input
            # to the origins for decoded_output
            for o in self.origin.values():
                if o.func is None:
                    if len(self.decoded_input) > 0:
                        updates.update(OrderedDict({o.decoded_output:
                            TT.flatten(X).astype(FLOAT_TYPE)}))
        return updates
# Имеется log-файл с ip-адресами, проанализировать последние N адресов и
# сохранить в новый файл пары значений "ip-адрес - кол-во запросов"

from _collections import OrderedDict, defaultdict, deque

N = 3000
with open('big_log.txt', 'r', encoding='utf-8') as f:
    log = deque(f, N)

print(log)

data = OrderedDict()
spam = defaultdict(int)

for item in log:
    ip = item[:-1]

    if not ip.startswith('192.168'):
        spam[ip] += 1
        data[ip] = 1

print(spam)
print(data)

data.update(spam)
print(data)

with open('data.txt', 'w', encoding='utf-8') as f:
    for key, value in data.items()():
        f.write(f'{key} - {value}\n')
예제 #11
0
class ParseDownlink:

    def __init__(self, hexstr='', dlim=''):
        from _collections import OrderedDict

        self._errmsg = ''
        self.compileddata = OrderedDict()
        self._parse(hexstr, dlim)

    @classmethod
    def parse(cls, hexstr='', dlim=''):
        obj = cls(hexstr, dlim)
        return obj.compileddata

    @classmethod
    def parserecord(cls, hexstr='', logpath='[$HOME]/ss2logs/ss2beacon_parsed_[$TIMESTAMP].json', dlim=''):
        obj = cls(hexstr, dlim)
        if obj._errmsg == '':
            obj.record(logpath)
        return obj.compileddata

    def display(self):
        import json
        print(json.dumps(self.compileddata, indent=4))

    def get(self):
        return self.compileddata

    def record(self, logpath='[$HOME]/ss2logs/ss2beacon_parsed_[$TIMESTAMP].json'):
        import json
        import datetime
        import os

        # Replace the $HOME placeholder with the OS/user corrected home folder
        logpath = logpath.replace(os.path.normcase('[$HOME]'), os.path.expanduser('~'))

        # Add a timestamp to the file if the "[$TIMESTAMP]" placeholder exists
        logpath = logpath.replace(os.path.normcase('[$TIMESTAMP]'), datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))

        # Create path directory tree if it doesn't already exist
        try:
            os.makedirs(os.path.split(logpath)[0], exist_ok=True)
        except OSError:
            pass

        # Create file
        with open(logpath, 'at', encoding='utf-8') as lfile:
            json.dump(self.compileddata, lfile, indent=4)
            lfile.write('\n')

    def _parse(self, hexstr, dlim=''):
        from collections import OrderedDict
        import datetime

        # Expected packet lengths
        packetlens = {'eps': 116, 'battery': 15, 'vutrx': 28, 'ants': 4, 'stx': 22}

        # Get timestamp
        timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S ') + datetime.datetime.now().astimezone().tzname()

        # Clean input
        hexstr_cleaned, errmsg = ParseDownlink._cleaninput(hexstr, dlim)
        length = len(hexstr_cleaned)
        if length == 0:

            self._errmsg = errmsg

        # Check if the downlink is contains the acknowledgement
        elif ''.join(['47', '61', '74', '6f', '72', '20', '4e', '61',
                      '74', '69', '6f', '6e', '20', '49', '73', '20',
                      '45', '76', '65', '72', '79', '77', '68', '65',
                      '72', '65', '21', '20', '46', '72', '6f', '6d',
                      '20', '53', '77', '61', '6d', '70', '53', '61',
                      '74', '20', '49', '49']) in ''.join(hexstr_cleaned):

            self.compileddata = OrderedDict()
            self.compileddata['timestamp'] = timestamp
            self.compileddata['msgtype'] = 0
            self.compileddata['messagenum'] = 1
            self.compileddata['messagetotal'] = 1
            self.compileddata['message'] = 'Gator Nation Is Everywhere! From SwampSat II'

        # Flight mode 1 second beacon
        elif length == 163:

            # Separate data packets
            epslist = hexstr_cleaned[:packetlens['eps']]

            batterylist = hexstr_cleaned[
                          packetlens['eps']:
                          packetlens['eps'] +
                          packetlens['battery']
                          ]

            vutrxlist = hexstr_cleaned[
                        packetlens['eps'] +
                        packetlens['battery']:
                        packetlens['eps'] +
                        packetlens['battery'] +
                        packetlens['vutrx']
                        ]

            antslist = hexstr_cleaned[packetlens['eps'] + packetlens['battery'] + packetlens['vutrx']:]

            epsdata = self._eps(epslist)
            batterydata = self._battery(batterylist)
            vutrxdata = self._vutrx(vutrxlist)
            antsdata = self._ants(antslist)

            self.compileddata = OrderedDict()
            self.compileddata['timestamp'] = timestamp
            self.compileddata['msgtype'] = 3
            self.compileddata['messagenum'] = 2
            self.compileddata['messagetotal'] = 2
            self.compileddata.update(epsdata)
            self.compileddata.update(batterydata)
            self.compileddata.update(vutrxdata)
            self.compileddata.update(antsdata)

        # Flight mode 2 second beacon
        elif length == 185:

            # Separate data packets
            epslist = hexstr_cleaned[:packetlens['eps']]

            batterylist = hexstr_cleaned[
                          packetlens['eps']:
                          packetlens['eps'] +
                          packetlens['battery']
                          ]

            vutrxlist = hexstr_cleaned[
                        packetlens['eps'] +
                        packetlens['battery']:
                        packetlens['eps'] +
                        packetlens['battery'] +
                        packetlens['vutrx']
                        ]

            antslist = hexstr_cleaned[
                       packetlens['eps'] +
                       packetlens['battery'] +
                       packetlens['vutrx']:
                       packetlens['eps'] +
                       packetlens['battery'] +
                       packetlens['vutrx'] +
                       packetlens['ants']
                       ]

            stxlist = hexstr_cleaned[
                      packetlens['eps'] +
                      packetlens['battery'] +
                      packetlens['vutrx'] +
                      packetlens['ants']:
                      ]

            epsdata = self._eps(epslist)
            batterydata = self._battery(batterylist)
            vutrxdata = self._vutrx(vutrxlist)
            antsdata = self._ants(antslist)
            stxdata = self._stx(stxlist)

            self.compileddata = OrderedDict()
            self.compileddata['timestamp'] = timestamp
            self.compileddata['msgtype'] = 4
            self.compileddata['messagenum'] = 2
            self.compileddata['messagetotal'] = 2
            self.compileddata.update(epsdata)
            self.compileddata.update(batterydata)
            self.compileddata.update(vutrxdata)
            self.compileddata.update(antsdata)
            self.compileddata.update(stxdata)

        else:

            self._errmsg = '\t\t  Not a valid SS2 beacon'

        if self._errmsg != '':

            print(self._errmsg)
            self.compileddata = OrderedDict()

        return self.compileddata

    @staticmethod
    def _cleaninput(hstr, dlim):

        # Clean input
        hstr_cleaned = hstr.lower().strip().replace(' ', '').replace(dlim, '').replace('\t', '').replace('\r', '').replace('\n', '')

        # Check length
        if len(hstr_cleaned) == 0:
            return [], '\t\t  String is empty'

        # Check if the string contains anything except for hex values and the delimiter
        if ParseDownlink._validatehex(hstr_cleaned, dlim):
            return [], '\t\t  Invalid character found in string'

        # Split hex string into list of bytes
        return [hstr_cleaned[i:i + 2] for i in range(0, len(hstr_cleaned), 2)], ''

    @staticmethod
    def _validatehex(hstr, dl=''):

        validhex = '0123456789abcdef' + dl.lower()
        return any(c not in validhex for c in hstr)  # Returns true if the str is not valid

    @staticmethod
    def _parsebinary(data, dtype, numbytes=1):

        # Bytes are read assuming little endian
        dtype = dtype.lower()
        if dtype == 'uint8':
            hexnum = ''.join([data[i] for i in reversed(range(1))])  # Read and concatenate hex bytes in reverse order
            [data.pop(i) for i in reversed(range(1))]  # Remove used elements
            return int(hexnum, 16)  # Convert hex to numeric
        elif dtype == 'uint16':
            hexnum = ''.join([data[i] for i in reversed(range(2))])  # Read and concatenate hex bytes in reverse order
            [data.pop(i) for i in reversed(range(2))]  # Remove used elements
            return int(hexnum, 16)  # Convert hex to numeric
        elif dtype == 'uint32':
            hexnum = ''.join([data[i] for i in reversed(range(4))])  # Read and concatenate hex bytes in reverse order
            [data.pop(i) for i in reversed(range(4))]  # Remove used elements
            return int(hexnum, 16)  # Convert hex to numeric
        elif dtype == 'uint':  # Allows for a specified number of bytes (defaults to 1 byte bool)
            hexnum = ''.join(
                [data[i] for i in reversed(range(numbytes))])  # Read and concatenate hex bytes in reverse order
            [data.pop(i) for i in reversed(range(numbytes))]  # Remove used elements
            return int(hexnum, 16)  # Convert hex to numeric
        elif dtype == 'int8':
            hexnum = ''.join([data[i] for i in reversed(range(1))])  # Read and concatenate hex bytes in reverse order
            [data.pop(i) for i in reversed(range(1))]  # Remove used elements
            num = int(hexnum, 16)  # Convert hex to integer
            if num >= 2 ** 7:  # Check for the sign
                num -= 2 ** 8  # Account for the sign
            return num
        elif dtype == 'int16':
            hexnum = ''.join([data[i] for i in reversed(range(2))])  # Read and concatenate hex bytes in reverse order
            [data.pop(i) for i in reversed(range(2))]  # Remove used elements
            num = int(hexnum, 16)  # Convert hex to integer
            if num >= 2 ** 15:  # Check for the sign
                num -= 2 ** 16  # Account for the sign
            return num
        elif dtype == 'int32':
            hexnum = ''.join([data[i] for i in reversed(range(4))])  # Read and concatenate hex bytes in reverse order
            [data.pop(i) for i in reversed(range(4))]  # Remove used elements
            num = int(hexnum, 16)  # Convert hex to integer
            if num >= 2 ** 31:  # Check for the sign
                num -= 2 ** 32  # Account for the sign
            return num
        elif dtype == 'int':  # Allows for a specified number of bytes (defaults to 1 byte bool)
            hexnum = ''.join(
                [data[i] for i in reversed(range(numbytes))])  # Read and concatenate hex bytes in reverse order
            [data.pop(i) for i in reversed(range(numbytes))]  # Remove used elements
            num = int(hexnum, 16)  # Convert hex to integer
            if num >= 2 ** (numbytes * 8 - 1):  # Check for the sign
                num -= 2 ** (numbytes * 8)  # Account for the sign
            return num
        elif dtype == 'bool8':
            hexnum = ''.join([data[i] for i in reversed(range(1))])  # Read and concatenate hex bytes in reverse order
            [data.pop(i) for i in reversed(range(1))]  # Remove used elements
            num = int(hexnum, 16)  # Convert hex to numeric
            nleadingzeros = numbytes * 8 + 2 - len(
                bin(num))  # Count the number of leading zeros necessary to complete the list of bits
            return [int(char) for char in
                    list('0' * nleadingzeros + bin(num)[2:])[::-1]]  # Place bit flag values in a list
        elif dtype == 'bool16':
            hexnum = ''.join([data[i] for i in reversed(range(2))])  # Read and concatenate hex bytes in reverse order
            [data.pop(i) for i in reversed(range(2))]  # Remove used elements
            num = int(hexnum, 16)  # Convert hex to numeric
            nleadingzeros = numbytes * 8 + 2 - len(
                bin(num))  # Count the number of leading zeros necessary to complete the list of bits
            return [int(char) for char in
                    list('0' * nleadingzeros + bin(num)[2:])[::-1]]  # Place bit flag values in a list
        elif dtype == 'bool32':
            hexnum = ''.join([data[i] for i in reversed(range(4))])  # Read and concatenate hex bytes in reverse order
            [data.pop(i) for i in reversed(range(4))]  # Remove used elements
            num = int(hexnum, 16)  # Convert hex to numeric
            nleadingzeros = numbytes * 8 + 2 - len(
                bin(num))  # Count the number of leading zeros necessary to complete the list of bits
            return [int(char) for char in
                    list('0' * nleadingzeros + bin(num)[2:])[::-1]]  # Place bit flag values in a list
        elif dtype == 'bool':  # Allows for a specified number of bytes (defaults to 1 byte bool)
            hexnum = ''.join(
                [data[i] for i in reversed(range(numbytes))])  # Read and concatenate hex bytes in reverse order
            [data.pop(i) for i in reversed(range(numbytes))]  # Remove used elements
            num = int(hexnum, 16)  # Convert hex to numeric
            nleadingzeros = numbytes * 8 + 2 - len(
                bin(num))  # Count the number of leading zeros necessary to complete the list of bits
            return [int(char) for char in
                    list('0' * nleadingzeros + bin(num)[2:])[::-1]]  # Place bit flag values in a list
        elif dtype == 'single':
            hexnum = ''.join([data[i] for i in reversed(range(4))])  # Read and concatenate hex bytes in reverse order
            [data.pop(i) for i in reversed(range(4))]  # Remove used elements
            return ParseDownlink.hextofloat(hexnum)  # Convert hex to numeric
        elif dtype == 'double':
            hexnum = ''.join([data[i] for i in reversed(range(8))])  # Read and concatenate hex bytes in reverse order
            [data.pop(i) for i in reversed(range(8))]  # Remove used elements
            return ParseDownlink.hextodouble(hexnum)  # Convert hex to numeric

    @staticmethod
    def _eps(hexarray):
        from collections import OrderedDict

        ordict = OrderedDict()
        ordict['eps_output_current_bcr'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 14.662757
        ordict['eps_output_voltage_bcr'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.008993157
        ordict['eps_output_current_12v'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.00207
        ordict['eps_output_voltage_12v'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.01349
        ordict['eps_output_current_bat'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.005237
        ordict['eps_output_voltage_bat'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.008978
        ordict['eps_output_current_5v'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.005237
        ordict['eps_output_voltage_5v'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.005865
        ordict['eps_output_current_3v3'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.005237
        ordict['eps_output_voltage_3v3'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.004311
        ordict['eps_temperature_motherboard'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.372434 - 273.15
        ordict['eps_temperature_daughterboard'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.372434 - 273.15
        ordict['eps_currentdraw_3v3'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.001327547
        ordict['eps_currentdraw_5v'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.001327547
        ordict['eps_switchbus_voltage_motor'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.01349
        ordict['eps_switchbus_current_motor'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.001328
        ordict['eps_switchbus_voltage_hstx'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.008993
        ordict['eps_switchbus_current_hstx'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.006239
        ordict['eps_switchbus_voltage_camera'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.005865
        ordict['eps_switchbus_current_camera'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.001328
        ordict['eps_switchbus_voltage_adac5'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.005865
        ordict['eps_switchbus_current_adac5'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.001328
        ordict['eps_switchbus_voltage_vlf'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.005865
        ordict['eps_switchbus_current_vlf'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.001328
        ordict['eps_switchbus_voltage_ants'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.004311
        ordict['eps_switchbus_current_ants'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.001328
        ordict['eps_switchbus_voltage_adac3'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.004311
        ordict['eps_switchbus_current_adac3'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.001328
        ordict['eps_switchbus_voltage_gps'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.004311
        ordict['eps_switchbus_current_gps'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.001328
        ordict['eps_bcr1_temperature_a'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.4963 - 273.15
        ordict['eps_bcr1_temperature_b'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.4963 - 273.15
        ordict['eps_bcr1_voltage'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.0322581
        ordict['eps_bcr1_current'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.0009775
        ordict['eps_bcr2_temperature_a'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.4963 - 273.15
        ordict['eps_bcr2_temperature_b'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.4963 - 273.15
        ordict['eps_bcr2_voltage'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.0322581
        ordict['eps_bcr2_current_a'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.0009775
        ordict['eps_bcr2_current_b'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.0009775
        ordict['eps_bcr3_temperature_a'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.4963 - 273.15
        ordict['eps_bcr3_temperature_b'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.4963 - 273.15
        ordict['eps_bcr3_voltage'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.0099706
        ordict['eps_bcr3_current_a'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.0009775
        ordict['eps_bcr3_current_b'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.0009775
        ordict['eps_bcr4_voltage'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.0322581
        ordict['eps_bcr4_current_a'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.0009775
        ordict['eps_bcr4_current_b'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.0009775
        ordict['eps_bcr6_voltage'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.0322581
        ordict['eps_bcr6_current_a'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.0009775
        ordict['eps_bcr6_current_b'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.0009775

        # 2 Byte of bit flags
        bitflags_pdmstate = ParseDownlink._parsebinary(hexarray, 'bool', 2)
        ordict['eps_pdmstate_vlf_12v'] = bitflags_pdmstate[1]
        ordict['eps_pdmstate_stx_bat'] = bitflags_pdmstate[3]
        ordict['eps_pdmstate_camera'] = bitflags_pdmstate[5]
        ordict['eps_pdmstate_adac_5v'] = bitflags_pdmstate[6]
        ordict['eps_pdmstate_vlf_5v'] = bitflags_pdmstate[7]
        ordict['eps_pdmstate_ants'] = bitflags_pdmstate[8]
        ordict['eps_pdmstate_adac_3v3'] = bitflags_pdmstate[9]
        ordict['eps_pdmstate_gps_3v3'] = bitflags_pdmstate[10]

        ordict['eps_reset_brownout_motherboard'] = ParseDownlink._parsebinary(hexarray, 'uint16')
        ordict['eps_reset_brownout_daughterboard'] = ParseDownlink._parsebinary(hexarray, 'uint16')
        ordict['eps_reset_software_motherboard'] = ParseDownlink._parsebinary(hexarray, 'uint16')
        ordict['eps_reset_software_daughterboard'] = ParseDownlink._parsebinary(hexarray, 'uint16')
        ordict['eps_reset_manual_motherboard'] = ParseDownlink._parsebinary(hexarray, 'uint16')
        ordict['eps_reset_manual_daughterboard'] = ParseDownlink._parsebinary(hexarray, 'uint16')
        ordict['eps_reset_watchdog'] = ParseDownlink._parsebinary(hexarray, 'uint16')

        return ordict

    @staticmethod
    def _battery(hexarray):
        from collections import OrderedDict

        ordict = OrderedDict()
        ordict['battery_voltage'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.008993
        ordict['battery_current'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 14.662757 / 1000
        ordict['battery_temperature_motherboard'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.372434 - 273.15
        ordict['battery_temperature_daughterboard_1'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.3976 - 238.57
        ordict['battery_temperature_daughterboard_2'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.3976 - 238.57
        ordict['battery_temperature_daughterboard_3'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.3976 - 238.57
        ordict['battery_temperature_daughterboard_4'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 0.3976 - 238.57

        # 1 Byte of bit flags
        bitflags_heaterstatus = ParseDownlink._parsebinary(hexarray, 'bool', 1)
        ordict['battery_heaterstatus_1'] = bitflags_heaterstatus[0]
        ordict['battery_heaterstatus_2'] = bitflags_heaterstatus[1]
        ordict['battery_heaterstatus_3'] = bitflags_heaterstatus[2]
        ordict['battery_heaterstatus_4'] = bitflags_heaterstatus[3]

        return ordict

    @staticmethod
    def _vutrx(hexarray):
        from collections import OrderedDict

        def getkbits8(num, k, p):
            binary = bin(num)[2:]  # convert number into binary first
            leadingzeros = 8 - len(binary)  # Count the necessary leading zeros to fill byte
            binary = '0' * leadingzeros + binary  # Fill byte with leading zeros
            end = 8 - p - 1
            start = end - k + 1
            k_bit_sub_str = binary[start: end + 1]  # extract k  bit sub-string
            return int(k_bit_sub_str, 2)  # convert extracted sub-string into decimal again

        ordict = OrderedDict()
        ordict['vutrx_rx_failedpackage'] = ParseDownlink._parsebinary(hexarray, 'uint8')
        ordict['vutrx_rx_crcfailedpackage'] = ParseDownlink._parsebinary(hexarray, 'uint16')
        ordict['vutrx_rx_packagecounter'] = ParseDownlink._parsebinary(hexarray, 'uint16')
        frequentlock = ParseDownlink._parsebinary(hexarray, 'uint8')  # Get whole register
        ordict['vutrx_rx_frequentlock'] = getkbits8(frequentlock, 1, 0)  # Split register by bit position
        ordict['vutrx_tx_frequentlock'] = getkbits8(frequentlock, 1, 1)
        ordict['vutrx_rssi'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 3 / 4096
        ordict['vutrx_smps_temperature'] = ParseDownlink._parsebinary(hexarray, 'int8')
        ordict['vutrx_poweramplifier_temperature'] = ParseDownlink._parsebinary(hexarray, 'int8')
        ordict['vutrx_poweramplifier_power'] = ParseDownlink._parsebinary(hexarray, 'uint8')
        ordict['vutrx_frequencyoffset_tx'] = ParseDownlink._parsebinary(hexarray, 'uint16')
        ordict['vutrx_frequencyoffset_rx'] = ParseDownlink._parsebinary(hexarray, 'uint16')
        dtmf = ParseDownlink._parsebinary(hexarray, 'uint8')  # Get whole register
        ordict['vutrx_dtmf_tone'] = getkbits8(dtmf, 4, 0)  # Split register by bit position
        ordict['vutrx_dtmf_counter'] = getkbits8(dtmf, 4, 4)
        ordict['vutrx_current_3v3'] = ParseDownlink._parsebinary(hexarray, 'int16') * 3e-6
        ordict['vutrx_current_5v'] = ParseDownlink._parsebinary(hexarray, 'int16') * 62e-6
        ordict['vutrx_voltage_3v3'] = ParseDownlink._parsebinary(hexarray, 'int16') * 4e-3
        ordict['vutrx_voltage_5v'] = ParseDownlink._parsebinary(hexarray, 'int16') * 4e-3
        ordict['vutrx_poweramplifier_forwardpower'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 3 / 4096
        ordict['vutrx_poweramplifier_reversepower'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 3 / 4096

        return ordict

    @staticmethod
    def _stx(hexarray):
        from collections import OrderedDict

        def getkbits8(num, k, p):
            binary = bin(num)[2:]  # convert number into binary first
            leadingzeros = 8 - len(binary)  # Count the necessary leading zeros to fill byte
            binary = '0' * leadingzeros + binary  # Fill byte with leading zeros
            end = 8 - p - 1
            start = end - k + 1
            k_bit_sub_str = binary[start: end + 1]  # extract k  bit sub-string
            return int(k_bit_sub_str, 2)  # convert extracted sub-string into decimal again

        ordict = OrderedDict()
        ordict['stx_voltage_battery'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 4e-3
        ordict['stx_current_battery'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 40e-6
        ordict['stx_voltage_poweramplifier'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 4e-3
        ordict['stx_current_poweramplifier'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 40e-6
        ordict['stx_temperature_top'] = getkbits8(ParseDownlink._parsebinary(hexarray, 'int16'), 12, 4) * 0.0625
        ordict['stx_temperature_bottom'] = getkbits8(ParseDownlink._parsebinary(hexarray, 'int16'), 12, 4) * 0.0625
        ordict['stx_temperature_poweramplifier'] = \
            ((ParseDownlink._parsebinary(hexarray, 'uint8') * 3 / 4096) - 0.5) * 100
        ordict['stx_synth_offset'] = ParseDownlink._parsebinary(hexarray, 'uint8') * 0.5 + 2400
        ordict['stx_buffer_overrun'] = ParseDownlink._parsebinary(hexarray, 'uint16')
        ordict['stx_buffer_underrun'] = ParseDownlink._parsebinary(hexarray, 'uint16')

        # 1 Byte of bit flags
        bitflags_pastatus = ParseDownlink._parsebinary(hexarray, 'bool', 1)
        ordict['stx_poweramplifier_status_frequencylock'] = bitflags_pastatus[0]
        ordict['stx_poweramplifier_status_powergood'] = bitflags_pastatus[1]

        ordict['stx_rf_poweroutput'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 3 * 28 / 4096 / 18

        return ordict

    @staticmethod
    def _ants(hexarray):
        from collections import OrderedDict

        ordict = OrderedDict()
        ordict['ants_temperature'] = ParseDownlink._parsebinary(hexarray, 'uint16') * 3.3 / 1023

        # 2 Bytes of bit flags
        bitflags_antsstatus = ParseDownlink._parsebinary(hexarray, 'bool', 2)
        ordict['ants_status_armed'] = bitflags_antsstatus[0]
        ordict['ants_status_deploymentactive_4'] = bitflags_antsstatus[1]
        ordict['ants_status_stopcriteria_4'] = bitflags_antsstatus[2]
        ordict['ants_status_deploymentflag_4'] = bitflags_antsstatus[3]
        ordict['ants_status_independentburn'] = bitflags_antsstatus[4]
        ordict['ants_status_deploymentactive_3'] = bitflags_antsstatus[5]
        ordict['ants_status_stopcriteria_3'] = bitflags_antsstatus[6]
        ordict['ants_status_deploymentflag_3'] = bitflags_antsstatus[7]
        ordict['ants_status_ignoreswitches'] = bitflags_antsstatus[8]
        ordict['ants_status_deploymentactive_2'] = bitflags_antsstatus[9]
        ordict['ants_status_stopcriteria_2'] = bitflags_antsstatus[10]
        ordict['ants_status_deploymentflag_2'] = bitflags_antsstatus[11]
        ordict['ants_status_deploymentactive_1'] = bitflags_antsstatus[13]
        ordict['ants_status_stopcriteria_1'] = bitflags_antsstatus[14]
        ordict['ants_status_deploymentflag_1'] = bitflags_antsstatus[15]

        return ordict

    @staticmethod
    def hextofloat(h, swap=False):
        import string
        if not isinstance(h, str):
            raise TypeError
        if not h.startswith('0x'):
            h = '0x' + h
        if not all(c in string.hexdigits.lower() for c in h[2:]):
            raise ValueError
        if swap:
            h = '0x' + ''.join(reversed([h[2:][i:i + 2] for i in range(0, len(h[2:]), 2)]))

        i = int(h[2:], 16)  # Convert hex to int
        b = bin(i)  # Convert int to binary
        b = b[2:].zfill(32)  # Pad binary to fill all 32 bits

        sign = 1 if int(b[0], 2) == 0 else -1
        exponent = int(b[1:9], 2)
        mantissa = b[9:]
        mval = 1
        for i in range(len(mantissa)):
            if mantissa[i] == '1':
                mval += 2 ** (-(i + 1))

        return sign * 2 ** (exponent - 127) * mval

    @staticmethod
    def hextodouble(h, swap=False):
        import string
        if not isinstance(h, str):
            raise TypeError
        if not h.startswith('0x'):
            h = '0x' + h
        if not all(c in string.hexdigits.lower() for c in h[2:]):
            raise ValueError
        if swap:
            h = '0x' + ''.join(reversed([h[2:][i:i + 2] for i in range(0, len(h[2:]), 2)]))

        i = int(h[2:], 16)  # Convert hex to int
        b = bin(i)  # Convert int to binary
        b = b[2:].zfill(64)  # Pad binary to fill all 32 bits

        sign = 1 if int(b[0], 2) == 0 else -1
        exponent = int(b[1:12], 2)
        mantissa = b[12:]
        mval = 1
        for i in range(len(mantissa)):
            if mantissa[i] == '1':
                mval += 2 ** (-(i + 1))

        return sign * 2 ** (exponent - 1023) * mval
def result_summary(res_dict,
                   args_dict,
                   TNR_target=0.05,
                   skip_pattern=None,
                   include_pattern='.*',
                   pvalue_record=None):
    from utils.meters import simple_auc
    from _collections import OrderedDict
    ## if not configured setup logging for external caller
    if not logging.getLogger('').handlers:
        setup_logging()
    in_dist = args_dict['dataset']
    alphas = args_dict['alphas']
    logging.info(f'Report for {args_dict["model"]} - {in_dist}')
    logging.info(f'Tag: {args_dict["tag"]}')
    result_dict = OrderedDict(
        model=args_dict["model"],
        in_dist=args_dict['dataset'],
        LDA=args_dict.get('LDA'),
        joint=args_dict['measure_joint_distribution'],
        tag=args_dict['tag'],
        channles_sellect=args_dict.get('channel_selection_fn'))
    # read indist results to calibrate alpha value for target TNR
    rows = []
    accuracies = {'model': {}}
    for reduction_name, reduction_metrics in res_dict[in_dist].items():
        accuracies[reduction_name] = {}
        if reduction_name.endswith('_acc'):
            acc = reduction_metrics.mean.cpu().numpy()
            std = reduction_metrics.std.cpu().numpy()
            acc_name = reduction_name.replace('_acc', '')
            if acc_name == 'model':
                reduction_name = 'model'
            if acc_name.endswith('rescaled-smx'):
                reduction_name = acc_name[:-13]
                acc_name = 'model_rescaled_smx'
            elif acc_name.endswith('-pval'):
                reduction_name = acc_name[:-5]
                acc_name = 'pval'

            accuracies[reduction_name][f'{acc_name}_t1'] = acc[0]
            accuracies[reduction_name][f'{acc_name}_t5'] = acc[1]
            accuracies[reduction_name][f'{acc_name}_std_t1'] = std[0]

    for reduction_name, reduction_metrics in res_dict[in_dist].items():
        if skip_pattern and bool(re.match(
                skip_pattern, reduction_name)) or include_pattern and not bool(
                    re.match(include_pattern, reduction_name)):
            continue
        result_dict['reduction'] = reduction_name
        result_dict.update(**accuracies['model'])
        result_dict.update(**accuracies[reduction_name])
        logging.info(reduction_name)
        if type(reduction_metrics) != dict:
            # report simple metric
            logging.info(
                f'\t{reduction_metrics.mean}\t({reduction_metrics.std})')
            continue
        # report reduction specific metrics
        for metric_name, meter_object in reduction_metrics.items():
            metric_stats = MeterDict()
            if not metric_name.endswith('_roc'):
                logging.info(
                    f'\t{metric_name}: {meter_object.mean.numpy():0.3}')
                continue
            FPR = meter_object.mean.numpy()
            calibrated_alpha_id = min((FPR < TNR_target).sum() - 1, len(FPR))

            if calibrated_alpha_id == -1:
                # all pvalues are larger than alpha
                fpr_under_target_alpha = meter_object.mean[0]
                interp_alpha = FPR[0]
                calibrated_alpha_id = 0
            else:
                fpr_under_target_alpha = FPR[calibrated_alpha_id]
                # actual rejection threshold to use for TNR 95%
                interp_alpha = np.interp(0.05, FPR.squeeze(), alphas)

            result_dict.update(
                dict(metric_name=metric_name,
                     FPR_strict=fpr_under_target_alpha,
                     FPR_over=FPR[calibrated_alpha_id + 1],
                     chosen_alpha=interp_alpha))
            logging.info(
                f'\t{metric_name} - in-dist rejected: '
                # f'alpha-{indist_pvalues_roc[alphas.index(TNR_target)]:0.3f} ({TNR_target:0.3f}), '
                f'under-{fpr_under_target_alpha:0.3f} ({alphas[calibrated_alpha_id]:0.3f}), '
                f'interp-{TNR_target:0.3f} ({interp_alpha:0.3f}), '
                f'over-{FPR[calibrated_alpha_id + 1]:0.3f} ({alphas[calibrated_alpha_id + 1]})'
            )

            if pvalue_record and reduction_name in pvalue_record[in_dist]:
                if metric_name.startswith(
                        'class_cond'
                ) and 'predicted_id' in pvalue_record[in_dist]:
                    predicted_ids = pvalue_record[in_dist]['predicted_id']
                    in_cc_pval_pred = pvalue_record[in_dist][reduction_name][
                        th.arange(predicted_ids.shape[0]), predicted_ids]
                else:
                    in_cc_pval_pred = pvalue_record[in_dist][
                        reduction_name].max(1)[0]

            for target_dataset_name, reduction_metrics in res_dict.items():
                if target_dataset_name != in_dist and metric_name in reduction_metrics[
                        reduction_name]:
                    interp_rejected = np.interp(
                        interp_alpha, alphas, reduction_metrics[reduction_name]
                        [metric_name].mean.numpy())
                    TPR = reduction_metrics[reduction_name][
                        metric_name].mean.numpy()
                    raw_rejected = TPR[alphas.index(TNR_target)]
                    auroc = simple_auc(TPR, FPR)
                    logging.info(
                        f'\t\t{target_dataset_name}:\traw-{raw_rejected:0.3f}\tinterp-{interp_rejected:0.3f}\tAUROC:{auroc:0.3f}'
                    )
                    if pvalue_record and reduction_name in pvalue_record[
                            target_dataset_name]:
                        if metric_name.startswith(
                                'class_cond'
                        ) and 'predicted_id' in pvalue_record[
                                target_dataset_name]:
                            predicted_ids = pvalue_record[target_dataset_name][
                                'predicted_id']
                            out_cc_pval_pred = pvalue_record[
                                target_dataset_name][reduction_name][
                                    th.arange(predicted_ids.shape[0]),
                                    predicted_ids]
                        else:
                            out_cc_pval_pred = pvalue_record[
                                target_dataset_name][reduction_name].max(1)[0]

                        m = metric(in_cc_pval_pred.numpy(),
                                   out_cc_pval_pred.numpy())
                        logging.info(f'\t\t\tbenchmark metrics: {m}')
                        result_dict.update(**m)

                    result_dict.update(
                        dict(out_dist=target_dataset_name,
                             TPR95_raw=raw_rejected,
                             TPR95_interp=interp_rejected,
                             AUROC=auroc))
                    rows.append(result_dict.copy())

                    if in_dist.startswith(
                            'cifar') and target_dataset_name.startswith(
                                'cifar'):
                        continue
                    metric_stats.update(
                        dict(TPR95_raw=th.tensor([raw_rejected]),
                             TPR95_interp=th.tensor([interp_rejected]),
                             AUROC=th.tensor([auroc])))

            if target_dataset_name != in_dist and metric_name in reduction_metrics[
                    reduction_name]:
                result_dict['out_dist'] = 'avg'
                logging.info(
                    f'\tmetric avg stats: {[k + " " + str(float(v)) for k, v in metric_stats.get_mean_dict().items()]}'
                )
                result_dict.update(**metric_stats.get_mean_dict())
                rows.append(result_dict.copy())
    return rows