示例#1
0
 def _calibrate_adj_bat(batln_x, batln_y, factor, reverse_factor):
     """Employs substitution and substitution choice rules to calibrate between two adjacent battalions."""
     units_to_batln_y = _utils.round_up(factor *
                                        batln_x.get_deficient_units())
     batln_y.add_to_required_units(units_to_batln_y)
     batln_x.update_required_units(batln_x.base_units)
     if batln_y.is_deficient():
         units_to_batln_x = _utils.round_up(batln_y.get_deficient_units() *
                                            reverse_factor)
         batln_x.add_to_required_units(units_to_batln_x)
         batln_y.remove_from_required_units(batln_y.get_deficient_units())
示例#2
0
文件: elastic.py 项目: epfl-labos/DSS
    def elastic_possible(self, node, task):
        # Check that the task is not an MRAM container:
        if task.type is YarnContainerType.MRAM:
            return False

        # Check if the task has an associated penalty function.
        if task.penalty is None:
            LOG.error("Task " + str(task) + " has no associated penalty function. ELASTIC disabled.")
            return False

        # Check if elastic behavior has been disabled as part of symbex or PEEK.
        if self.disable_elastic:
            return False

        # Check that if node pools are set, and the current node is part
        # of a non-elastic pool, we don't attempt to be elastic.
        if self.elastic_pool is not None and node.node_id > (self.node_count * 1.0 * self.elastic_pool / 100):
            return False

        # Check that sufficient cores exist on this node.
        if node.available.vcores < task.resource.vcores:
            return False

        # Next check that the minimum memory required exists
        if node.available.memory_mb < round_up(task.resource.memory_mb * ELASTIC_MEM_MIN_RATIO / 100, MEM_INCREMENT):
            return False

        return True
示例#3
0
    def elastic_possible(self, node, task):
        # Check that the task is not an MRAM container:
        if task.type is YarnContainerType.MRAM:
            return False

        # Check if the task has an associated penalty function.
        if task.penalty is None:
            LOG.error("Task " + str(task) +
                      " has no associated penalty function. ELASTIC disabled.")
            return False

        # Check if elastic behavior has been disabled as part of symbex or PEEK.
        if self.disable_elastic:
            return False

        # Check that if node pools are set, and the current node is part
        # of a non-elastic pool, we don't attempt to be elastic.
        if self.elastic_pool is not None and node.node_id > (
                self.node_count * 1.0 * self.elastic_pool / 100):
            return False

        # Check that sufficient cores exist on this node.
        if node.available.vcores < task.resource.vcores:
            return False

        # Next check that the minimum memory required exists
        if node.available.memory_mb < round_up(
                task.resource.memory_mb * ELASTIC_MEM_MIN_RATIO / 100,
                MEM_INCREMENT):
            return False

        return True
示例#4
0
文件: elastic.py 项目: epfl-labos/DSS
    def create_container_from_task(self, node, allocated, job, task):
        # XXX: This code duplicates the code from YarnScheduler. Needs refactoring.
        err_adjusted_duration = duration = task.duration
        if task.penalty is not None:
            mem_error_adjustment = self.mem_errors[job.job_id] if job.job_id in self.mem_errors else 0
            ib_error_adjustment = self.ib_errors[job.job_id] if job.job_id in self.ib_errors else 0
            if LOG.isEnabledFor(logging.DEBUG):
                LOG.debug("YARN_ELASTIC_SCHEDULER: MEM_ERR_ADJUST: {}, IB_ERR_ADJUST: {}".format(mem_error_adjustment,
                                                                                                 ib_error_adjustment))
            resource = task.ideal_resource
            if mem_error_adjustment != 0:
                resource = YarnResource(task.ideal_resource.memory_mb, task.ideal_resource.vcores)
                resource.memory_mb += mem_error_adjustment
                resource.memory_mb = round_up(resource.memory_mb, MEM_INCREMENT)

            if allocated < resource:
                err_adjusted_duration = task.penalty.get_penalized_runtime(resource, allocated, task.duration,
                                                                           error_offset=ib_error_adjustment)

                duration = task.penalty.get_penalized_runtime(task.ideal_resource, allocated, task.duration)

        # Create YarnRunningContainer from YarnPrototypeContainer
        yarn_container = YarnRunningContainer(
            container_id=job.get_new_container_id(),
            duration=err_adjusted_duration,
            resource=allocated,
            priority=task.priority,
            container_type=task.type,
            job=task.job,
            node=node,
            task=task)

        yarn_container.duration_error = err_adjusted_duration - duration

        if yarn_container.is_elastic:
            # Update decision count
            self.stats_elastic_decisions_inc(task.job.job_id)

            if LOG.isEnabledFor(logging.DEBUG):
                LOG.debug("YARN_ELASTIC_SCHEDULER: Allocated {} elasticly for {}:{} on node {} "
                          "with penalized runtime of {} (err adjusted: {}) for a request of {} "
                          "and a duration of {}".format(
                            allocated.memory_mb, job.get_name(), job.next_container_id, node, duration,
                            err_adjusted_duration, task.resource.memory_mb, task.duration
                          ))
        else:
            if LOG.isEnabledFor(logging.DEBUG):
                LOG.debug("YARN_ELASTIC_SCHEDULER: Allocated {} regularly for {}:{} on node {} "
                          "with a duration of: {} (err adjusted: {})".format(
                            allocated.memory_mb, job.get_name(), job.next_container_id, node, task.duration,
                            err_adjusted_duration
                          ))

        return yarn_container
示例#5
0
 def prepare_battalions(self, counter_attack):
     """Prepares all the battalions in order using power rule."""
     if self.validate_counter_attack(counter_attack):
         # Helps to know what the counter attack is if needed later.
         self.counter_attack = counter_attack
         for batln_name in self.counter_attack:
             required_units = _utils.round_up(counter_attack[batln_name],
                                              half_it=True)
             batln = Battalion(
                 army_name=self.army_name,
                 battalion_initials=batln_name,
                 **army_data['army'][self.army_name][batln_name],
                 required_units=required_units)
             self.battalions.append(batln)
示例#6
0
def find_closest():
    how_long = 2.8

    start = time.time()
    first_distance = monster_distance()
    if first_distance is None:
        return None

    time_dist = [utils.round_up(time.time() - start, 1), first_distance]
    while True:
        keyboard.press('d')

        new_distance = monster_distance()
        if new_distance is None:
            return None

        if time_dist[1] > new_distance:
            time_dist = [utils.round_up(time.time() - start, 1), new_distance]
            continue

        if utils.round_up(time.time() - start, 1) == how_long:
            keyboard.release('d')
            break

    # turn to target
    turn_to_closest = time_dist[1]

    if time_dist[0] == 0.0:
        print('at begining')
        return time_dist[1]

    second_start = time.time()
    keyboard.press('d')

    while True:

        print(utils.round_up(time.time() - second_start, 1))
        second_new_distance = monster_distance()
        if second_new_distance is None:
            return None

        if utils.round_up(second_new_distance,
                          1) == utils.round_up(turn_to_closest, 1):
            print('found')
            time.sleep(0.2)
            keyboard.release('d')
            return time_dist[1]
        if utils.round_up(time.time() - second_start, 1) >= how_long:
            print('not found in time')
            keyboard.release('d')
            return find_closest()
示例#7
0
    def allocate_on_node(self, node, task):
        # First try being regular
        regular = YarnRegularScheduler.allocate_on_node(self, node, task)
        if regular is not None:
            return regular

        # Check if elasticity is possible
        if not self.elastic_possible(node, task):
            return None

        # Check what is the best running time in these resources
        min_mem_allocatable = round_up(
            task.resource.memory_mb * ELASTIC_MEM_MIN_RATIO / 100,
            MEM_INCREMENT)
        if min_mem_allocatable < YARN_MIN_ALLOCATION_MB:
            min_mem_allocatable = YARN_MIN_ALLOCATION_MB
        min_mem_for_runtime = node.available.memory_mb
        tmp_resource = YarnResource(min_mem_for_runtime, task.resource.vcores)
        min_runtime = YarnElasticScheduler.get_penalized_runtime(
            task.penalty, self.state.user_config.mem_overestimate_assume,
            task.resource, tmp_resource, task.duration)
        for mem in xrange(min_mem_allocatable, node.available.memory_mb,
                          MEM_INCREMENT):
            tmp_resource.memory_mb = mem
            penalized_runtime = YarnElasticScheduler.get_penalized_runtime(
                task.penalty, self.state.user_config.mem_overestimate_assume,
                task.resource, tmp_resource, task.duration)
            if penalized_runtime < min_runtime:
                min_runtime = penalized_runtime
                min_mem_for_runtime = mem
            elif penalized_runtime == min_runtime and mem < min_mem_for_runtime:
                min_mem_for_runtime = mem

        if LOG.isEnabledFor(logging.DEBUG):
            LOG.debug("YARN_SMARTG_SCHEDULER: SMARTG possible with " +
                      str(min_mem_for_runtime) + " for " +
                      task.job.get_name() + ":" +
                      str(task.job.next_container_id) + " on node " +
                      str(node) + " with penalized runtime of " +
                      str(min_runtime) + " for a request of " +
                      str(task.resource.memory_mb) + " and a duration of: " +
                      str(task.duration))
        return YarnResource(min_mem_for_runtime,
                            task.resource.vcores), min_runtime
示例#8
0
文件: elastic.py 项目: epfl-labos/DSS
    def allocate_on_node(self, node, task):
        # First try being regular
        regular = YarnRegularScheduler.allocate_on_node(self, node, task)
        if regular is not None:
            return regular

        # Check if elasticity is possible
        if not self.elastic_possible(node, task):
            return None

        # Check what is the best running time in these resources
        min_mem_allocatable = round_up(task.resource.memory_mb * ELASTIC_MEM_MIN_RATIO / 100, MEM_INCREMENT)
        if min_mem_allocatable < YARN_MIN_ALLOCATION_MB:
            min_mem_allocatable = YARN_MIN_ALLOCATION_MB
        min_mem_for_runtime = node.available.memory_mb
        tmp_resource = YarnResource(min_mem_for_runtime, task.resource.vcores)
        min_runtime = YarnElasticScheduler.get_penalized_runtime(task.penalty,
                                                                 self.state.user_config.mem_overestimate_assume,
                                                                 task.resource, tmp_resource, task.duration)
        for mem in xrange(min_mem_allocatable, node.available.memory_mb, MEM_INCREMENT):
            tmp_resource.memory_mb = mem
            penalized_runtime = YarnElasticScheduler.get_penalized_runtime(
                task.penalty, self.state.user_config.mem_overestimate_assume, task.resource, tmp_resource, task.duration
            )
            if penalized_runtime < min_runtime:
                min_runtime = penalized_runtime
                min_mem_for_runtime = mem
            elif penalized_runtime == min_runtime and mem < min_mem_for_runtime:
                min_mem_for_runtime = mem

        if LOG.isEnabledFor(logging.DEBUG):
            LOG.debug("YARN_SMARTG_SCHEDULER: SMARTG possible with " +
                      str(min_mem_for_runtime) + " for " + task.job.get_name() +
                      ":" + str(task.job.next_container_id) + " on node " +
                      str(node) + " with penalized runtime of " +
                      str(min_runtime) + " for a request of " +
                      str(task.resource.memory_mb) + " and a duration of: " +
                      str(task.duration))
        return YarnResource(min_mem_for_runtime, task.resource.vcores), min_runtime
示例#9
0
    def create_container_from_task(self, node, allocated, job, task):
        err_adjusted_duration = duration = task.duration
        if task.penalty is not None:
            mem_error_adjustment = self.mem_errors[
                job.job_id] if job.job_id in self.mem_errors else 0
            if LOG.isEnabledFor(logging.DEBUG):
                LOG.debug("YARN_REGULAR_SCHEDULER: MEM_ERR_ADJUST: {}".format(
                    mem_error_adjustment))
            resource = task.ideal_resource
            if mem_error_adjustment != 0:
                resource = YarnResource(task.ideal_resource.memory_mb,
                                        task.ideal_resource.vcores)
                resource.memory_mb += mem_error_adjustment
                resource.memory_mb = round_up(resource.memory_mb,
                                              MEM_INCREMENT)

            if allocated < resource:
                err_adjusted_duration = task.penalty.get_penalized_runtime(
                    resource, allocated, task.duration)

                duration = task.penalty.get_penalized_runtime(
                    task.ideal_resource, allocated, task.duration)

        # Create YarnRunningContainer from YarnPrototypeContainer
        yarn_container = YarnRunningContainer(
            container_id=job.get_new_container_id(),
            duration=err_adjusted_duration,
            resource=task.resource,
            priority=task.priority,
            container_type=task.type,
            job=task.job,
            node=node,
            task=task)

        yarn_container.duration_error = err_adjusted_duration - duration

        return yarn_container
示例#10
0
文件: irf.py 项目: cobiadigital/irf
    def run_pp(self, etdata, pp=True, **kwargs):
        '''Performs post-processing sanity check and hard-removes or replaces events
        Parameters:
            etdata  --  instance of ETData()
            pp      --  if True, post-processing is performed. Otherwise only counts cases
        Returns:
            Numpy array of updated event stream
            Numpy array of updated status
            Dictionary with event status accumulator
        '''

        _evt = etdata.calc_evt(fast=True)
        status = etdata.data['status']
        fs = etdata.fs

        check = self.check_accum

        #%%pp
        ### Saccade sanity check
        _sacc = _evt.query('evt==2')
        check['saccades']+=len(_sacc)

        #check isi
        _isi = (_sacc[1:]['s'].values-_sacc[:-1]['e'].values)/fs
        _isi_inds = np.where(_isi<kwargs['thres_isi']/1000.)[0]
        check['sacc_isi']+=len(_isi_inds)
        self.check_inds['sacc_isi'] = _sacc.index[_isi_inds].values

        #TODO: implement isi merging
#        if pp:
#
#            _etdata = copy.deepcopy(etdata)
#            _evt_unfold = [_e for _, e in _evt.iterrows()
#                          for _e in itertools.repeat(e['evt'],
#                                                     int(np.diff(e[['s', 'e']])))]
#
#        _etdata.data['evt'] = _evt_unfold
#        _etdata.calc_evt(fast=True)
#        _evt = _etdata.evt

        #pp: remove short saccades
#        _sdur_thres = max([0.006, float(3/etdata.fs)])
#        _sdur = _evt.query('evt==2 and dur<@_sdur_thres')

        thres_sd_lo=kwargs['thres_sd_lo']/1000.
        thres_sd_lo_s = round_up(thres_sd_lo*fs)

        _sdur = _evt.query('evt==2 and (dur<@thres_sd_lo or dur_s<@thres_sd_lo_s)')
        check['short_saccades']+=len(_sdur)
        self.check_inds['short_saccades'] = _sdur.index.values
        if pp:
            _evt.loc[_sdur.index, 'evt'] = 0

        #check long saccades.
        thres_sd_hi=kwargs['thres_sd_hi']/1000.
        thres_sd_hi_s = round_up(thres_sd_hi*fs)
        _sdur = _evt.query('evt==2 and dur_s>@thres_sd_hi_s')
        check['long_saccades']+=len(_sdur)
        self.check_inds['long_saccades'] = _sdur.index.values
        if pp:
            _evt.loc[_sdur.index, 'evt'] = 0

        #pp: find saccades surrounding undef;
        _sacc_check={'202':0, '20':0, '02':0}
        seq=''.join(map(str, _evt['evt']))
        for pattern in _sacc_check.keys():
            _check = np.array([m.start() for m in re.finditer('(?=%s)'%pattern, seq)])
            if not (len(_check)):
                continue

            _sacc_check[pattern]+=len(_check)
            self.check_inds['sacc%s'%pattern] = _check
#            #pp: remove saccades surrounding undef; not used anymore
#            if pp:
#                if (pattern=='202'):
#                    assert ((_evt.loc[_check+1, 'evt']==0).all() and
#                            (_evt.loc[_check+2, 'evt']==2).all())
#                    _evt.loc[_check, 'evt'] = 0
#                    _evt.loc[_check+2, 'evt'] = 0
#
##                if (pattern=='20'):
##                    assert (_evt.loc[_check+1, 'evt']==0).all()
##                    _evt.loc[_check, 'evt'] = 0
##                if (pattern=='02'):
##                    assert (_evt.loc[_check+1, 'evt']==2).all()
##                    _evt.loc[_check+1, 'evt'] = 0
#                seq=''.join(map(str, _evt['evt']))

        check['sacc202']+=_sacc_check['202']
        check['sacc20']+=_sacc_check['20']
        check['sacc02']+=_sacc_check['02']

        ###PSO sanity check
        check['pso']+=len(_evt.query('evt==3'))

        #pp: change short PSOs to fixations; not used
#        thres_pd = kwargs['thres_pd']/1000.
#        thres_pd_s = round_up(thres_pd*fs)
#        _pdur = _evt.query('evt==3 and (dur<@thres_pd or dur_s<@thres_pd_s)')
#        check['short_pso']+=len(_pdur)
#        self.check_inds['short_pso'] = _pdur.index.values
#        if pp:
#            _evt.loc[_pdur.index, 'evt'] = 1

        #pp: remove unreasonable psos
        _pso_check={'13':0, '03':0, '23':0 }
        seq=''.join(map(str, _evt['evt']))
        for pattern in _pso_check.keys():
            _check = np.array([m.start() for m in re.finditer('(?=%s)'%pattern, seq)])
            if not (len(_check)):
                continue

            _pso_check[pattern]+=len(_check)
            self.check_inds['pso%s'%pattern] = _check
            #pp: change PSOs after fixations to fixations
            if pp:
                if (pattern=='13'):
                    assert (_evt.loc[_check+1, 'evt']==3).all()
                    _evt.loc[_check+1, 'evt'] = 1
                #pp: change PSOs after undef to undef
                if (pattern=='03'):
                    assert (_evt.loc[_check+1, 'evt']==3).all()
                    _evt.loc[_check+1, 'evt'] = 0
        check['pso23']+=_pso_check['23']
        check['pso13']+=_pso_check['13']
        check['pso03']+=_pso_check['03']

        ###fixation sanity check
        #unfold and recalculate event data
        _evt_unfold = [_e for _, e in _evt.iterrows()
                          for _e in itertools.repeat(e['evt'],
                                                     int(np.diff(e[['s', 'e']])))]
        _etdata = copy.deepcopy(etdata)
        _etdata.data['evt'] = _evt_unfold
        _etdata.calc_evt(fast=True)
        _evt = _etdata.evt

        check['fixations']+=len(_evt.query('evt==1'))

        #pp: remove short fixations
        thres_fd=kwargs['thres_fd']/1000.
        thres_fd_s = round_up(thres_fd*fs)
        _fdur = _evt.query('evt==1 and (dur<@thres_fd or dur_s<@thres_fd_s)')
        check['short_fixations']+=len(_fdur)
        self.check_inds['short_fixations'] = _fdur.index.values

        #TODO:
        #check fixation merge
        if pp:
            _inds = np.array(_fdur.index)
            _evt.loc[_inds, 'evt'] = 0
#            #check if there are saccades or psos left around newly taged undef
#            #so basically +- 2 events around small fixation
#            _inds = np.unique(np.concatenate((_inds, _inds+1, _inds-1, _inds+2, _inds-2)))
#            _inds = _inds[(_inds>-1) & (_inds<len(_evt))]
#            _mask =_evt.loc[_inds, 'evt'].isin([2, 3])
#            _evt.loc[_inds[_mask.values], 'evt'] = 0

        ##return result
        _evt_unfold = [_e for _, e in _evt.iterrows()
                      for _e in itertools.repeat(e['evt'], int(np.diff(e[['s', 'e']])))]
        assert len(_evt_unfold) == len(status)

        status[np.array(_evt_unfold)==0] = False

        return np.array(_evt_unfold), status, check, self.check_inds
示例#11
0
def make_signature(arch_macho, arch_offset, arch_size, cmds, f,
                   entitlements_file, codesig_data_length, signer, ident):
    # NB: arch_offset is absolute in terms of file start.  Everything else is relative to arch_offset!

    # sign from scratch
    log.debug("signing from scratch")

    drs = None
    drs_lc = cmds.get('LC_DYLIB_CODE_SIGN_DRS')
    if drs_lc:
        drs = drs_lc.data.blob

    codesig_offset = utils.round_up(arch_size, 16)

    # generate code hashes
    log.debug("codesig offset: {}".format(codesig_offset))
    codeLimit = codesig_offset
    log.debug("new cL: {}".format(hex(codeLimit)))
    nCodeSlots = int(math.ceil(float(codesig_offset) / 0x1000))
    log.debug("new nCS: {}".format(nCodeSlots))

    # generate placeholder LC_CODE_SIGNATURE (like what codesign_allocate does)
    fake_hashes = ["\x00" * 20] * nCodeSlots

    codesig_cons = make_basic_codesig(entitlements_file, drs, codeLimit,
                                      fake_hashes, signer, ident)
    codesig_data = macho_cs.Blob.build(codesig_cons)

    cmd_data = construct.Container(dataoff=codesig_offset,
                                   datasize=codesig_data_length)
    cmd = construct.Container(cmd='LC_CODE_SIGNATURE',
                              cmdsize=16,
                              data=cmd_data,
                              bytes=macho.CodeSigRef.build(cmd_data))

    log.debug("CS blob before: {}".format(
        utils.print_structure(codesig_cons, macho_cs.Blob)))
    log.debug("len(codesig_data): {}".format(len(codesig_data)))

    codesig_length = codesig_data_length
    log.debug("codesig length: {}".format(codesig_length))

    log.debug("old ncmds: {}".format(arch_macho.ncmds))
    arch_macho.ncmds += 1
    log.debug("new ncmds: {}".format(arch_macho.ncmds))

    log.debug("old sizeofcmds: {}".format(arch_macho.sizeofcmds))
    arch_macho.sizeofcmds += cmd.cmdsize
    log.debug("new sizeofcmds: {}".format(arch_macho.sizeofcmds))

    arch_macho.commands.append(cmd)

    hashes = []
    if codesig_data_length > 0:
        # Patch __LINKEDIT
        for lc in arch_macho.commands:
            if lc.cmd == 'LC_SEGMENT_64' or lc.cmd == 'LC_SEGMENT':
                if lc.data.segname == '__LINKEDIT':
                    log.debug(
                        "found __LINKEDIT, old filesize {}, vmsize {}".format(
                            lc.data.filesize, lc.data.vmsize))

                    lc.data.filesize = utils.round_up(lc.data.filesize,
                                                      16) + codesig_length
                    if (lc.data.filesize > lc.data.vmsize):
                        lc.data.vmsize = utils.round_up(lc.data.filesize, 4096)

                    if lc.cmd == 'LC_SEGMENT_64':
                        lc.bytes = macho.Segment64.build(lc.data)
                    else:
                        lc.bytes = macho.Segment.build(lc.data)

                    log.debug("new filesize {}, vmsize {}".format(
                        lc.data.filesize, lc.data.vmsize))

        actual_data = macho.MachO.build(arch_macho)
        log.debug("actual_data length with codesig LC {}".format(
            len(actual_data)))

        # Now seek to the start of the actual data and read until the end of the arch.
        f.seek(arch_offset + len(actual_data))
        bytes_to_read = codesig_offset + arch_offset - f.tell()
        file_slice = f.read(bytes_to_read)
        if len(file_slice) < bytes_to_read:
            log.warn("expected {} bytes but got {}, zero padding.".format(
                bytes_to_read, len(file_slice)))
            file_slice += ("\x00" * (bytes_to_read - len(file_slice)))
        actual_data += file_slice

        for i in xrange(nCodeSlots):
            actual_data_slice = actual_data[(0x1000 * i):(0x1000 * i + 0x1000)]

            actual = hashlib.sha1(actual_data_slice).digest()
            log.debug("Slot {} (File page @{}): {}".format(
                i, hex(0x1000 * i), actual.encode('hex')))
            hashes.append(actual)
    else:
        hashes = fake_hashes

    # Replace placeholder with real one.
    codesig_cons = make_basic_codesig(entitlements_file, drs, codeLimit,
                                      hashes, signer, ident)
    codesig_data = macho_cs.Blob.build(codesig_cons)
    cmd_data = construct.Container(dataoff=codesig_offset,
                                   datasize=len(codesig_data))
    cmd = construct.Container(cmd='LC_CODE_SIGNATURE',
                              cmdsize=16,
                              data=cmd_data,
                              bytes=macho.CodeSigRef.build(cmd_data))
    arch_macho.commands[-1] = cmd
    cmds['LC_CODE_SIGNATURE'] = cmd
    return codesig_data
示例#12
0
def resblock_up_cond_deep(x_init,
                          z,
                          channels_out,
                          opt,
                          upscale=True,
                          use_bias=True,
                          scope='deep_resblock'):

    channels_in = int(x_init.get_shape()[-1])
    inner_channels = round_up((channels_in + channels_out) // 6, 8)

    with tf.variable_scope(scope):
        with tf.variable_scope('bottleneck'):
            x = cond_bn(x_init, z, opt=opt)
            x = opt["act"](x)
            x = conv(x,
                     inner_channels,
                     kernel=1,
                     stride=1,
                     use_bias=False,
                     opt=opt)

        with tf.variable_scope('upscale'):
            x = cond_bn(x, z, opt=opt)
            x = opt["act"](x)
            if upscale:
                x = upconv(x, inner_channels, use_bias=False, opt=opt)

        with tf.variable_scope('inner1'):
            x = g_conv(x, inner_channels, use_bias=False, opt=opt)
            x = cond_bn(x, z, opt=opt)
            x = opt["act"](x)

        with tf.variable_scope('inner2'):
            x = g_conv(x, inner_channels, use_bias=False, opt=opt)
            x = bn(x, opt=opt)
            x = opt["act"](x)

        with tf.variable_scope('proj'):
            x = conv(x,
                     channels_out,
                     kernel=1,
                     stride=1,
                     use_bias=use_bias,
                     opt=opt)

        with tf.variable_scope('skip'):
            if channels_in != channels_out:
                print(inner_channels, channels_in, channels_out,
                      channels_in - channels_out)
                kept, dropped = tf.split(x_init,
                                         num_or_size_splits=[
                                             channels_out,
                                             channels_in - channels_out
                                         ],
                                         axis=-1)
            else:
                kept = x_init

            if upscale:
                x_init = upconv(kept, channels_out, use_bias=use_bias, opt=opt)

    return x + x_init
示例#13
0
def write_register_data(reg, len, data):
    writer.write32(0xA2000000 | reg)
    writer.write32(((len - 1) << 8) | 0x20)
    writer.writeBits(data)


#
# Enumerate config chains
#
for chain_id in range(0, len(chip.configChain)):
    chain = chain_with_id(chain_id)
    chain_bits = chain['bits']
    chain_len = len(chain_bits)

    num_padding_bits = round_up(chain_len, 32) - chain_len

    write_register_data(0x20 | chain_id, chain_len,
                        chain_bits + ([0] * num_padding_bits))

#
# Write the bitstream
#
bitstream = []
for tile_row in range(chip.rows - 1, -1, -1):
    row_height = chip.max_row_height(tile_row)

    for row in range(0, row_height):
        row_bits = []
        for tile_col in range(chip.columns - 1, -1, -1):
            column_width = chip.column_width(tile_col)
示例#14
0
    def create_container_from_task(self, node, allocated, job, task):
        # XXX: This code duplicates the code from YarnScheduler. Needs refactoring.
        err_adjusted_duration = duration = task.duration
        if task.penalty is not None:
            mem_error_adjustment = self.mem_errors[
                job.job_id] if job.job_id in self.mem_errors else 0
            ib_error_adjustment = self.ib_errors[
                job.job_id] if job.job_id in self.ib_errors else 0
            if LOG.isEnabledFor(logging.DEBUG):
                LOG.debug(
                    "YARN_ELASTIC_SCHEDULER: MEM_ERR_ADJUST: {}, IB_ERR_ADJUST: {}"
                    .format(mem_error_adjustment, ib_error_adjustment))
            resource = task.ideal_resource
            if mem_error_adjustment != 0:
                resource = YarnResource(task.ideal_resource.memory_mb,
                                        task.ideal_resource.vcores)
                resource.memory_mb += mem_error_adjustment
                resource.memory_mb = round_up(resource.memory_mb,
                                              MEM_INCREMENT)

            if allocated < resource:
                err_adjusted_duration = task.penalty.get_penalized_runtime(
                    resource,
                    allocated,
                    task.duration,
                    error_offset=ib_error_adjustment)

                duration = task.penalty.get_penalized_runtime(
                    task.ideal_resource, allocated, task.duration)

        # Create YarnRunningContainer from YarnPrototypeContainer
        yarn_container = YarnRunningContainer(
            container_id=job.get_new_container_id(),
            duration=err_adjusted_duration,
            resource=allocated,
            priority=task.priority,
            container_type=task.type,
            job=task.job,
            node=node,
            task=task)

        yarn_container.duration_error = err_adjusted_duration - duration

        if yarn_container.is_elastic:
            # Update decision count
            self.stats_elastic_decisions_inc(task.job.job_id)

            if LOG.isEnabledFor(logging.DEBUG):
                LOG.debug(
                    "YARN_ELASTIC_SCHEDULER: Allocated {} elasticly for {}:{} on node {} "
                    "with penalized runtime of {} (err adjusted: {}) for a request of {} "
                    "and a duration of {}".format(allocated.memory_mb,
                                                  job.get_name(),
                                                  job.next_container_id, node,
                                                  duration,
                                                  err_adjusted_duration,
                                                  task.resource.memory_mb,
                                                  task.duration))
        else:
            if LOG.isEnabledFor(logging.DEBUG):
                LOG.debug(
                    "YARN_ELASTIC_SCHEDULER: Allocated {} regularly for {}:{} on node {} "
                    "with a duration of: {} (err adjusted: {})".format(
                        allocated.memory_mb, job.get_name(),
                        job.next_container_id, node, task.duration,
                        err_adjusted_duration))

        return yarn_container
示例#15
0
#######
# getActualIntervals.py: calculate the actual time interval in between each of the events in microseconds
# argv[1]: recordedEvents.txt
#######
import sys
from utils import round_up

recorded = open(sys.argv[1])
lines = recorded.readlines()
times = []  # array to store all of the times
for line in lines:
    if line[0] == "[":
        timeWithBraketAndSpace = line.split("]")
        timeWithBraket = timeWithBraketAndSpace[0].replace(" ", "")
        time = timeWithBraket.replace("[", "")
        times.append(float(time))

for i, time in enumerate(times):
    if i != 0:
        print("interval " + str(i) + ": " +
              str(round_up((times[i] - times[i - 1]) * 1000000)))
示例#16
0
def make_signature(arch_macho, arch_offset, arch_size, cmds, f, entitlements_file, codesig_data_length, signer, ident):
    # NB: arch_offset is absolute in terms of file start.  Everything else is relative to arch_offset!

    # sign from scratch
    log.debug("signing from scratch")

    drs = None
    drs_lc = cmds.get('LC_DYLIB_CODE_SIGN_DRS')
    if drs_lc:
        drs = drs_lc.data.blob

    codesig_offset = utils.round_up(arch_size, 16)

    # generate code hashes
    log.debug("codesig offset: {}".format(codesig_offset))
    codeLimit = codesig_offset
    log.debug("new cL: {}".format(hex(codeLimit)))
    nCodeSlots = int(math.ceil(float(codesig_offset) / 0x1000))
    log.debug("new nCS: {}".format(nCodeSlots))


    # generate placeholder LC_CODE_SIGNATURE (like what codesign_allocate does)
    fake_hashes = ["\x00" * 20]*nCodeSlots

    codesig_cons = make_basic_codesig(entitlements_file,
            drs,
            codeLimit,
            fake_hashes,
            signer,
            ident)
    codesig_data = macho_cs.Blob.build(codesig_cons)

    cmd_data = construct.Container(dataoff=codesig_offset,
            datasize=codesig_data_length)
    cmd = construct.Container(cmd='LC_CODE_SIGNATURE',
            cmdsize=16,
            data=cmd_data,
            bytes=macho.CodeSigRef.build(cmd_data))

    log.debug("CS blob before: {}".format(utils.print_structure(codesig_cons, macho_cs.Blob)))
    log.debug("len(codesig_data): {}".format(len(codesig_data)))

    codesig_length = codesig_data_length
    log.debug("codesig length: {}".format(codesig_length))

    log.debug("old ncmds: {}".format(arch_macho.ncmds))
    arch_macho.ncmds += 1
    log.debug("new ncmds: {}".format(arch_macho.ncmds))

    log.debug("old sizeofcmds: {}".format(arch_macho.sizeofcmds))
    arch_macho.sizeofcmds += cmd.cmdsize
    log.debug("new sizeofcmds: {}".format(arch_macho.sizeofcmds))

    arch_macho.commands.append(cmd)

    hashes = []
    if codesig_data_length > 0:
        # Patch __LINKEDIT
        for lc in arch_macho.commands:
            if lc.cmd == 'LC_SEGMENT_64' or lc.cmd == 'LC_SEGMENT':
                if lc.data.segname == '__LINKEDIT':
                    log.debug("found __LINKEDIT, old filesize {}, vmsize {}".format(lc.data.filesize, lc.data.vmsize))

                    lc.data.filesize = utils.round_up(lc.data.filesize, 16) + codesig_length
                    if (lc.data.filesize > lc.data.vmsize):
                        lc.data.vmsize = utils.round_up(lc.data.filesize, 4096)

                    if lc.cmd == 'LC_SEGMENT_64':
                        lc.bytes = macho.Segment64.build(lc.data)
                    else:
                        lc.bytes = macho.Segment.build(lc.data)

                    log.debug("new filesize {}, vmsize {}".format(lc.data.filesize, lc.data.vmsize))


        actual_data = macho.MachO.build(arch_macho)
        log.debug("actual_data length with codesig LC {}".format(len(actual_data)))

        # Now seek to the start of the actual data and read until the end of the arch.
        f.seek(arch_offset + len(actual_data))
        bytes_to_read = codesig_offset + arch_offset - f.tell()
        file_slice = f.read(bytes_to_read)
        if len(file_slice) < bytes_to_read:
            log.warn("expected {} bytes but got {}, zero padding.".format(bytes_to_read, len(file_slice)))
            file_slice += ("\x00" * (bytes_to_read - len(file_slice)))
        actual_data += file_slice

        for i in xrange(nCodeSlots):
            actual_data_slice = actual_data[(0x1000 * i):(0x1000 * i + 0x1000)]

            actual = hashlib.sha1(actual_data_slice).digest()
            log.debug("Slot {} (File page @{}): {}".format(i, hex(0x1000 * i), actual.encode('hex')))
            hashes.append(actual)
    else:
        hashes = fake_hashes

    # Replace placeholder with real one.
    codesig_cons = make_basic_codesig(entitlements_file,
            drs,
            codeLimit,
            hashes,
            signer,
            ident)
    codesig_data = macho_cs.Blob.build(codesig_cons)
    cmd_data = construct.Container(dataoff=codesig_offset,
            datasize=len(codesig_data))
    cmd = construct.Container(cmd='LC_CODE_SIGNATURE',
            cmdsize=16,
            data=cmd_data,
            bytes=macho.CodeSigRef.build(cmd_data))
    arch_macho.commands[-1] = cmd
    cmds['LC_CODE_SIGNATURE'] = cmd
    return codesig_data
示例#17
0
 def test_check_a_number_is_halfed_rouded_to_the_next_whole_number(self):
     observed = utils.round_up(5, half_it=True)
     self.assertEqual(3, observed)
示例#18
0
def resblock_down_deep(x_init,
                       channels_out,
                       opt,
                       downscale=True,
                       use_bias=True,
                       scope='deep_resblock'):

    channels_in = x_init.get_shape()[-1]
    inner_channels = round_up((channels_in + channels_out) // 6, 8)

    with tf.variable_scope(scope):
        with tf.variable_scope('bottleneck'):
            x = x_init
            if (opt["bn_in_d"]): x = bn(x, opt=opt)
            x = opt["act"](x)
            x = conv(x,
                     inner_channels,
                     kernel=1,
                     stride=1,
                     pad=0,
                     use_bias=use_bias,
                     opt=opt)

        with tf.variable_scope('inner1'):
            if (opt["bn_in_d"]): x = bn(x, opt=opt)
            x = opt["act"](x)
            x = conv(x,
                     inner_channels,
                     kernel=3,
                     stride=1,
                     pad=1,
                     use_bias=use_bias,
                     opt=opt)

        with tf.variable_scope('inner2'):
            if (opt["bn_in_d"]): x = bn(x, opt=opt)
            x = opt["act"](x)
            x = conv(x,
                     inner_channels,
                     kernel=3,
                     stride=1,
                     pad=1,
                     use_bias=use_bias,
                     opt=opt)

        with tf.variable_scope('downscale'):
            x = opt["act"](x)

            if downscale:
                x = downconv(x,
                             inner_channels,
                             use_bias=use_bias,
                             opt=opt,
                             method='pool_only')

        with tf.variable_scope('proj'):
            x = conv(x,
                     channels_out,
                     kernel=1,
                     stride=1,
                     pad=0,
                     use_bias=use_bias,
                     opt=opt)

        with tf.variable_scope('skip'):
            if downscale:
                x_init = downconv(x_init,
                                  channels_in,
                                  use_bias=use_bias,
                                  opt=opt,
                                  method='pool_only')
            if channels_in != channels_out:
                conv_ch = channels_out - channels_in
                dense = conv(x_init,
                             conv_ch,
                             kernel=1,
                             stride=1,
                             pad=0,
                             use_bias=use_bias,
                             opt=opt)
                x_init = tf.concat([x_init, dense], axis=-1)

    return x + x_init
示例#19
0
 def test_check_a_number_and_a_half_is_rouded_to_the_next_whole_number(
         self):
     observed = utils.round_up(6.5)
     self.assertEqual(7, observed)
示例#20
0
# Enumerate config chains
#
for chain_id in range(0, len(chip.configChain)):
    chain = chain_with_id(chain_id)
    chain_bits = chain['bits']
    chain_len = len(chain_bits)
    exp_len = len(chip.configChain[chain_id].empty_bits())

    if chip.device_id == 0x01500010 and exp_len == chain_len:
        chain_bits = [
            chain_bits[int(x / 4)] for x in range(0,
                                                  len(chain_bits) * 4)
        ]
        chain_len = len(chain_bits)

    num_padding_bits = round_up(chain_len, 32) - chain_len

    write_register_data(0x20 | chain_id, chain_len,
                        chain_bits + ([0] * num_padding_bits))

#
# Write the bitstream
#
bitstream = []
for tile_row in range(chip.rows - 1, -1, -1):
    row_height = chip.bitstream_height_for_row(tile_row)

    for row in range(0, row_height):
        row_bits = []
        for tile_col in range(chip.columns - 1, -1, -1):
            column_width = chip.bitstream_width_for_column(tile_col)
示例#21
0
    return sd


# read in recordedEvent and calculate the total time of the events recorded
recorded = open(sys.argv[1])
lines = recorded.readlines()
times = []  # array to store all of the times
for line in lines:
    if line[0] == "[":
        timeWithBraketAndSpace = line.split("]")
        timeWithBraket = timeWithBraketAndSpace[0].replace(" ", "")
        time = timeWithBraket.replace("[", "")
        times.append(float(time))

recordInterval = times[len(times) - 1] - times[0]
print("record time is " + str(round_up(recordInterval)))

# read in all of the replaytime and calculate the mean and standard diviation
allReplay = open(sys.argv[2])
lines = allReplay.readlines()
replayTimes = []
for line in lines:
    line = line.replace(" ", "")
    time = line.split("real")[0]
    replayTimes.append(float(time))
replayMean = getMean(replayTimes)
replaySD = getStandardDiviation(replayTimes)
print("mean: " + str(round_up(replayMean)))
print("standard diviation: " + str(round_up(replaySD)))
print("error: " + str(round_up(replayMean / recordInterval)))
示例#22
0
    def sign(self, app, signer):

        temp = tempfile.NamedTemporaryFile('wb', delete=False)

        # If signing fat binary from scratch, need special handling

        # TODO: we assume that if any slice is unsigned, all slices are.  This should be true in practice but
        # we should still guard against this.
        if self.sign_from_scratch and 'FatArch' in self.m.data:
            assert len(self.arches) >= 2

            # todo(markwang): Update fat headers and mach_start for each slice if needewd
            log.debug('signing fat binary from scratch')

            sorted_archs = sorted(self.arches, key=lambda arch: arch['arch_offset'])

            prev_arch_end = 0
            for arch in sorted_archs:
                fatentry = arch['macho']  # has pointert to container

                codesig_arch_offset, new_codesig_data = self._sign_arch(arch, app, signer)
                codesig_file_offset = arch['arch_offset'] + codesig_arch_offset
                log.debug('existing arch slice: cputype {}, cpusubtype {}, offset {}, size {}'
                          .format(fatentry.cputype, fatentry.cpusubtype, arch['arch_offset'], arch['arch_size']))
                log.debug("codesig arch offset: {2}, file offset: {0}, len: {1}"
                          .format(codesig_file_offset, len(new_codesig_data), codesig_arch_offset))
                assert codesig_file_offset >= (arch['arch_offset'] + arch['arch_size'])

                # Store the old slice offset/sizes because we need them when we copy the data slices from self.f to temp
                arch['old_arch_offset'] = arch['arch_offset']
                arch['old_arch_size'] = arch['arch_size']

                arch['codesig_arch_offset'] = codesig_arch_offset
                arch['codesig_data'] = new_codesig_data

                new_arch_size = codesig_arch_offset + len(new_codesig_data)

                if prev_arch_end > arch['arch_offset']:
                    arch['arch_offset'] = utils.round_up(prev_arch_end, 16384)

                prev_arch_end = arch['arch_offset'] + new_arch_size
                arch['arch_size'] = new_arch_size

                log.debug('new arch slice after codesig: offset {}, size {}'.format(arch['arch_offset'],
                                                                                    arch['arch_size']))

            # write slices and code signatures in reverse order
            for arch in reversed(sorted_archs):
                self.f.seek(arch['old_arch_offset'])
                temp.seek(arch['arch_offset'])
                temp.write(self.f.read(arch['old_arch_size']))

                temp.seek(arch['arch_offset'] + arch['codesig_arch_offset'])
                temp.write(arch['codesig_data'])

                fatarch_info = self.m.data.FatArch[arch['fat_index']]
                fatarch_info.size = arch['arch_size']
                fatarch_info.offset = arch['arch_offset']

        else:
            # copy self.f into temp, reset to beginning of file
            self.f.seek(0)
            temp.write(self.f.read())
            temp.seek(0)

            # write new codesign blocks for each arch
            offset_fmt = ("offset: {2}, write offset: {0}, "
                          "new_codesig_data len: {1}")
            for arch in self.arches:
                offset, new_codesig_data = self._sign_arch(arch, app, signer)
                write_offset = arch['macho'].macho_start + offset
                log.debug(offset_fmt.format(write_offset,
                                            len(new_codesig_data),
                                            offset))
                temp.seek(write_offset)
                temp.write(new_codesig_data)

        # write new headers
        temp.seek(0)
        macho.MachoFile.build_stream(self.m, temp)
        temp.close()

        # make copy have same permissions
        mode = os.stat(self.path).st_mode
        os.chmod(temp.name, mode)
        # log.debug("moving temporary file to {0}".format(self.path))
        shutil.move(temp.name, self.path)
示例#23
0
文件: irf.py 项目: cobiadigital/irf
def postProcess(etdata, pred, pred_mask, events = [1, 2, 3], dev = False, **kwargs):

    status = pred_mask
    fs = etdata.fs

    #prepare array for storing event data
    events_pp = np.zeros((len(etdata.data), len(events)+1))
    events_pp[pred_mask, 1:] = pred

    #filter raw probabilities
    events_pp = gaussian_filter1d(events_pp, 1, axis=0)
    events_pp[~pred_mask, 0] = 1


#    #1. mark short interpolation sequences as valid,
#    #i.e. remove short interpolation (or other "invalid data") events
#    thres_id_s = round_up_to_odd(kwargs['thres_id']*fs/1000.+1)
#    status_aggr = np.array(aggr_events(pred_mask))
#    events_interp = status_aggr[status_aggr[:,-1]==False]
#    md = events_interp[:,1] - events_interp[:,0]
#    mask_rem_interp = md<thres_id_s
#    ind_rem_interp=[i for s, e in events_interp[mask_rem_interp, :2]
#                      for i in range(s, e)]
#    status[ind_rem_interp] = True
#
#    ind_leave_interp=[i for s, e in events_interp[~mask_rem_interp, :2]
#                        for i in range(s, e)]
#    events_pp[ind_leave_interp, 0]=1
#    events_pp[ind_leave_interp, 1:]=-1

    #2. merge fixations; can be implemented as hpp
    thres_ifa=kwargs['thres_ifa']
    thres_ifi=kwargs['thres_ifi']
    thres_ifi_s = round_up(thres_ifi*fs/1000.)

    _events = np.argmax(np.around(events_pp, 3), axis=1)
    _events_aggr = np.array(aggr_events(_events))
    _events_fix = _events_aggr[_events_aggr[:,-1]==1]
    _fix_pos = calc_fixPos(etdata, _events_fix)

    #inter-fixation amplitudes
    ifa = np.pad(np.hypot(_fix_pos[:-1, 1]-_fix_pos[1:, 0],
                          _fix_pos[:-1, 3]-_fix_pos[1:, 2]),
                (0,1), 'constant', constant_values=thres_ifa+1e-5)
    #inter-fixation intervals
    ifi = np.pad(_events_fix[1:,0]-_events_fix[:-1,1],
                 (0,1), 'constant', constant_values=thres_ifi_s+1)
    mask_merge_fix=(ifa<thres_ifa) & (ifi<thres_ifi_s)

    ind_merge_fix=[i for s, e in zip(_events_fix[mask_merge_fix,1],
                                     _events_fix[np.roll(mask_merge_fix, 1),0])
                     for i in range(s, e)]
    events_pp[ind_merge_fix, 0]=-1
    events_pp[ind_merge_fix, 1]=1
    events_pp[ind_merge_fix, 2:]=-1

    #3.1 expand saccades; can be implemented as hpp
    thres_sd_s=kwargs['thres_sd_s'] #make saccades to be at least 3 samples
    _events = np.argmax(np.around(events_pp, 3), axis=1)
    _events_aggr = np.array(aggr_events(_events))
    _events_sacc = _events_aggr[_events_aggr[:,-1]==2]
    _sd = _events_sacc[:,1]-_events_sacc[:,0]
    mask_expand_sacc = _sd < thres_sd_s
    ind_mid_sacc=(_events_sacc[mask_expand_sacc][:,1]-_events_sacc[mask_expand_sacc][:,0])/2+_events_sacc[mask_expand_sacc][:,0]
    ind_rem_fix=[i for s, e in zip(ind_mid_sacc-(thres_sd_s/2+thres_sd_s%2), ind_mid_sacc+(thres_sd_s/2))  for i in range(s, e)]
    events_pp[ind_rem_fix, :2]=-1
    events_pp[ind_rem_fix, 3]=-1
    events_pp[ind_rem_fix, 2]=1

    #3.2 merge nearby saccades; can be implemented as hpp
    thres_isi = kwargs['thres_isi']
    thres_isi_s = round_up(thres_isi*fs/1000.)
    _events = np.argmax(np.around(events_pp, 3), axis=1)
    _events_aggr = np.array(aggr_events(_events))
    _events_sacc = _events_aggr[_events_aggr[:,-1]==2]
    #inter-saccade intervals
    isi = np.pad(_events_sacc[1:,0]-_events_sacc[:-1,1],
                 (0,1), 'constant', constant_values=thres_isi_s+1)
    mask_merge_sacc=isi<thres_isi_s
    ind_merge_sacc=[i for s, e in zip(_events_sacc[mask_merge_sacc,1],
                                      _events_sacc[np.roll(mask_merge_sacc, 1),0])
                      for i in range(s, e)]
    events_pp[ind_merge_sacc, 2]=1
    events_pp[ind_merge_sacc, :2]=-1

    #3.3. remove too short or too long saccades.
    #+ for too long
    #- for too short; give a chance to become fixation samples
    #leave too long for hpp
    thres_sd_lo=kwargs['thres_sd_lo']
#    thres_sd_hi=kwargs['thres_sd_hi']
    thres_sd_lo_s = round_up(thres_sd_lo*fs/1000.)
#    thres_sd_hi_s = round_up(thres_sd_hi*fs/1000.)
    _events = np.argmax(np.around(events_pp, 3), axis=1)
    _events_aggr = np.array(aggr_events(_events))
    _events_sacc = _events_aggr[_events_aggr[:,-1]==2]
    fd = _events_sacc[:,1]-_events_sacc[:,0]
    mask_rem_sacc=(fd<thres_sd_lo_s) # | (fd>thres_sd_hi_s)
    ind_rem_sacc=[i for s, e in _events_sacc[mask_rem_sacc, :2]
                             for i in range(s, e)]
    events_pp[ind_rem_sacc, 2:]=-1

    #4. remove unreasonable PSOs; give a chance to become other classes
    _events = np.argmax(np.around(events_pp, 3), axis=1)
    _events_aggr = np.array(aggr_events(_events))
    mask_pso = _events_aggr[:,-1]==3

    #remove too short PSOs; not used
#    thres_pd = kwargs['thres_pd']
#    thres_pd_s = round_up(thres_pd*fs/1000.)
#    pso_dur = _events_aggr[:,1]-_events_aggr[:,0]
#    mask_pso_dur = pso_dur < thres_pd_s

    #remove PSOs not after saccade
    seq = ''.join(map(str, _events_aggr[:,-1]))
    mask_pso_after_sacc = np.ones_like(mask_pso)
    pso_after_sacc = [_m.start()+1 for _m in re.finditer('(?=23)', seq) ]
    mask_pso_after_sacc[pso_after_sacc]=False

    #mask_inv_pso = mask_pso & mask_pso_dur & mask_pso_after_sacc
    mask_inv_pso = mask_pso & mask_pso_after_sacc

    ind_inv_pso=[i for s, e in _events_aggr[mask_inv_pso, :2]
                            for i in range(s, e)]
    events_pp[ind_inv_pso, 2:]=-1 #can't be neither pso, neither saccade

    #5. remove too short fixations; +
    #leave for hpp
#    thres_fd=kwargs['thres_fd']
#    thres_fd_s = round_up(thres_fd*fs/1000.)
#    _events = np.argmax(np.around(events_pp, 3), axis=1)
#    _events_aggr = np.array(aggr_events(_events))
#    _events_fix = _events_aggr[_events_aggr[:,-1]==1]
#    fd = _events_fix[:,1]-_events_fix[:,0]
#    mask_rem_fix=fd<thres_fd_s
#    ind_rem_fix=[i for s, e in _events_fix[mask_rem_fix, :2]
#                            for i in range(s, e)]
#    events_pp[ind_rem_fix, 0]=1
#    events_pp[ind_rem_fix, 1:]=-1

    '''legacy code
    #6.1 blink detection: remove saccade-like events between missing data
    #leave for hpp
    _events = np.argmax(np.around(events_pp, 3), axis=1)
    _events_aggr = np.array(aggr_events(_events))

    seq = ''.join(map(str, _events_aggr[:,-1]))
    patterns = ['20', '02'] # !!! only works for patterns ['20', '02'] !!!
    _blinks = [_m.start() for _pattern in patterns
                         for _m in re.finditer('(?=%s)'%_pattern, seq) ]
    if len(_blinks):
        _blinks = np.array(_blinks)
        _blinks = np.unique(np.concatenate([_blinks, _blinks+1]))
        ind_blink=[i for s, e in _events_aggr[_blinks, :2]
                              for i in range(s, e)]
        events_pp[ind_blink, 0]=1
        events_pp[ind_blink, 1:]=-1

    #6.2 remove PSOs again, because some of saccades might been removed; +
    #leave for hpp
    _events = np.argmax(np.around(events_pp, 3), axis=1)
    _events_aggr = np.array(aggr_events(_events))
    mask_pso = _events_aggr[:,-1]==3
    seq = ''.join(map(str, _events_aggr[:,-1]))
    mask_pso_after_sacc = np.ones_like(mask_pso)
    pso_after_sacc = [_m.start()+1 for _m in re.finditer('(?=23)', seq) ]
    mask_pso_after_sacc[pso_after_sacc]=False
    mask_inv_pso = mask_pso & mask_pso_after_sacc
    ind_inv_pso=[i for s, e in _events_aggr[mask_inv_pso, :2]
                            for i in range(s, e)]

    events_pp[ind_inv_pso, 1:]=-1 #remove event completely
    '''

    #7. Final events
    events = np.argmax(np.around(events_pp, 3), axis=1)
    status = ~(events==0)
    return events, status, events_pp