Example #1
0
def packetizer():
    pow2 = [1, 2, 4, 8, 16, 32, 64, 128]
    while True:
        if len(pending_frames) > 0:
            frame = pending_frames.popleft()
            frame_bytes = bytearray()
            # Convert every 8 bits to a byte (LSB First)
            for octet in mi.grouper(frame, 8):
                frame_bytes.append(np.dot(list(octet), pow2))
            for packet in mi.grouper(frame_bytes, 12):
                pending_packets.append(list(packet))
        else:
            time.sleep(0.25)
def retrieve_bytes(array, number_bytes_hidden, index_dict=None):
    """Retrieve the bytes hidden in the array

        Parameters:
          array: Array of bytes with the hidden message
          number_bytes_hidden: Number of hidden bytes
          index_dict: Dictionary containing the lists to shuffle the bytes

        Returns:
          List containing the hidden bytes

    """
    assert check_size(
        len(array), number_bytes_hidden
    ), 'The array provided can not have the number of hidden bytes provided'

    array_output = []

    for i, [a, b, c, d, e, f, g,
            h] in enumerate(more_itertools.grouper(array, 8)):
        sub_array = [a, b, c, d, e, f, g, h]

        # check if the hidden bytes has been recovered
        if i >= number_bytes_hidden:
            break

        # retrieve the hidden byte in the sub-array
        byte = retrieve_byte(sub_array, index_dict, i)

        # add the byte to the list
        array_output.append(byte)

    return array_output
Example #3
0
 def simple_pagination(self, elements=3):
     products = self.get_children()
     if len(products) == 0:
         return None
     if len(products) < elements + 1:
         return products
     return grouper(elements, products, fillvalue=None)
Example #4
0
def mergesort(lst):
    lst = [[i] for i in lst]

    while len(lst) > 1:
        lst = [merge(lst1, lst2) for lst1, lst2 in grouper(lst, 2, [])]

    return [*lst]
Example #5
0
    def generateKey(self):
        '''
        Функция генерации ключа
        Ключ для blowfish от 32 до 448 бит
        '''

        self.__key = ''
        for j in [
                ''.join(i) for i in more_itertools.grouper(
                    BBS(self.len_key, 991, 997).run(), 8)
        ]:
            self.__key += chr(int(j, 2))

        self.__main_dict['key'] = self.__key

        if not os.path.exists(self.__file_name):
            logging.info('файл не существует')
            with open(self.__file_name, 'w', encoding='utf-8') as file:
                file.write(json.dumps(self.__main_dict))

            return self.__main_dict

        else:
            with open(self.__file_name, 'r', encoding='utf-8') as file:
                self.__main_dict = json.loads(file.read())
            if not self.__main_dict.get('key'):
                logging.info('Попытки взлома')
                self.__alarm = 1
            else:
                self.__key = self.__main_dict['key']
                self.__alarm = 0

            return self.__main_dict
Example #6
0
def downlink_handler(packet):
    if fletcher_decode(packet) != 0:
        return
    packet_no = (packet[1] & 0xF) + 1
    total_packets = (packet[1] & 0xF0) >> 4
    channels = packet[2:10]
    channels.reverse()
    nibbles = []
    for byte in channels:
        nibbles.append((byte & 0xF0) >> 4)
        nibbles.append(byte & 0xF)
    nibbles.pop(0)
    channels = []
    for channel in mi.grouper(3, nibbles):
        num = (channel[0] << 8) + (channel[1] << 4) + channel[2]
        if (num <= 320) and (num >= 80):
            channels.append(num)
    channels = np.array(channels)

    print("\n[Downlink Channel Packet]")
    print("Packet Number: {}/{}".format(packet_no, total_packets))
    print("Frequencies: ", end='')
    for channel in channels:
        print("{:.4f}MHz".format(channel * 0.0025 + 137), end=' ')
    print('')
Example #7
0
    def test_even(self):
        """Test when group size divides evenly into the length of
        the iterable.

        """
        self.assertEqual(list(mi.grouper('ABCDEF', 3)), [('A', 'B', 'C'),
                                                         ('D', 'E', 'F')])
Example #8
0
File: b.py Project: cj-wong/advent
def mask_floating(floating_mask: BINS, address: BINS) -> List[BINS]:
    """Apply floating mask to address and get all possible addresses.

    Args:
        floating_mask (BINS): where 'X' is in the original mask
        address (BINS): an address to get multiple floating addresses

    Returns
        List[BINS]: a list of addresses from the address

    """
    floats = floating_mask.count(1)
    indices = list(range(2 ** floats))
    addresses = [[a for a in address] for _ in indices]

    count = 0

    for index, (f, a) in enumerate(
            zip_longest(floating_mask[::-1], address[::-1])):
        if f != 1:
            continue
        count += 1
        for group_n, subindices in enumerate(
                grouper(indices, len(indices) // (2 ** count))):
            for subindex in subindices:
                addresses[subindex][-index - 1] = group_n % 2

    return addresses
Example #9
0
    def kill_tasks(self, tasks):
        logger = tasks[0].workflow.log if tasks else _get_null_logger()

        for group in grouper(50, tasks):
            group = [x for x in group if x is not None]
            job_ids = [str(t.drm_jobID) for t in group]
            qdel(job_ids, logger=logger)
Example #10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('files', metavar="IMAGE_FILE", nargs="+")
    parser.add_argument('--threshold', type=float, default=0.90)
    args = parser.parse_args()

    if len(args.files) % 2 != 0:
        parser.error("Files must be provided in pairs")

    failures = 0

    # TODO: don't punt on media type detection:
    for img_1, img_2 in grouper(2, args.files):
        digest_1 = image_digest(img_1)
        digest_2 = image_digest(img_2)

        pcc = cross_correlation(digest_1, digest_2)

        if pcc >= args.threshold:
            status = 'pass'
            log_f = sys.stdout
        else:
            status = 'FAIL'
            log_f = sys.stderr
            failures += 1

        print('%s\t%s\t%s\t%0.3f' % (status, img_1, img_2, pcc), file=log_f)

    return 1 if failures else 0
Example #11
0
    def test_odd(self):
        """Test when group size does not divide evenly into the length of the
        iterable.

        """
        self.assertEqual(list(mi.grouper('ABCDE', 3)), [('A', 'B', 'C'),
                                                        ('D', 'E', None)])
Example #12
0
def LoadImages(ImageURLsArray,ThreadCount=16):
    import concurrent.futures
    from more_itertools import grouper

    #Setup multithreaded for using the number of threads in ThreadCount
    executor = concurrent.futures.ThreadPoolExecutor(ThreadCount)
    ItemsPerGroup = 3 #Items per group is the number of items that each thread is given to load before it is given a new set of items
    futures = [executor.submit(GetMultiImageData, group)for group in grouper(ItemsPerGroup, ImageURLsArray)]
def fdia_inject(households, area, algorithm_label, injection_percentage):
    households[0]["o_demands"] = households[0]["demands"][:]
    device_original_demand = households[0]["demands"][0]
    total_demand = area[algorithm_label][k0_demand_total][0]
    device_new_demand = device_original_demand + (
        total_demand * injection_percentage) / households[0]["durs"][0]
    households[0]["demands"][0] = device_new_demand

    # calculate the new demand profile of the attacker house
    household_profile = [0] * no_intervals
    device = 0
    for device_demand in households[0]['demands']:
        for device_duration in range(households[0]['durs'][device]):
            household_profile[(households[0]['psts'][device] + device_duration)
                              % no_intervals] += device_demand
        device += 1

    # calculate the new community demand profile
    o_demand_profile = [
        sum(x) for x in grouper(households[0]['demand']['preferred'],
                                no_intervals_periods)
    ]
    n_demand_profile = [
        sum(x) for x in grouper(household_profile, no_intervals_periods)
    ]

    area[algorithm_label][k0_demand][0] = [
        a - b
        for a, b in zip(area[algorithm_label][k0_demand][0], o_demand_profile)
    ]
    area[algorithm_label][k0_demand][0] = [
        a + b
        for a, b in zip(area[algorithm_label][k0_demand][0], n_demand_profile)
    ]
    area[algorithm_label +
         '_fw'][k0_demand][0] = area[algorithm_label][k0_demand][0]

    area[algorithm_label][k0_demand_total][0] = sum(
        area[algorithm_label][k0_demand][0])
    area[algorithm_label + '_fw'][k0_demand_total][0] = sum(
        area[algorithm_label][k0_demand][0])

    # area[algorithm_label][k0_demand_max][0] = max(area[algorithm_label][k0_demand][0])
    # area[algorithm_label + '_fw'][k0_demand_max][0] = max(area[algorithm_label][k0_demand][0])

    return households, area
    def test_even(self):
        """Test when group size divides evenly into the length of
        the iterable.

        """
        self.assertEqual(
            list(mi.grouper("ABCDEF", 3)), [("A", "B", "C"), ("D", "E", "F")]
        )
def SquashBits(grayscaleArrayFlat):
    squashedArray = []
    for bitLeftMost, bitLeftMiddle, bitRightMiddle, bitRightMost in grouper(
            4, grayscaleArrayFlat, 0):
        squashedArray.append(bitLeftMost << 6 | bitLeftMiddle << 4
                             | bitRightMiddle << 2 | bitRightMost)

    return np.array(squashedArray)
Example #16
0
    def test_odd(self):
        """Test when group size does not divide evenly into the length of the
        iterable.

        """
        self.assertEqual(
            list(mi.grouper(3, 'ABCDE')), [('A', 'B', 'C'), ('D', 'E', None)]
        )
Example #17
0
    def test_even(self):
        """Test when group size divides evenly into the length of
        the iterable.

        """
        self.assertEqual(
            list(mi.grouper(3, 'ABCDEF')), [('A', 'B', 'C'), ('D', 'E', 'F')]
        )
    def test_odd(self):
        """Test when group size does not divide evenly into the length of the
        iterable.

        """
        self.assertEqual(
            list(mi.grouper("ABCDE", 3)), [("A", "B", "C"), ("D", "E", None)]
        )
Example #19
0
def batch_entrez(
    list_of_terms, db="nuccore", retmax=1, rettype="fasta", batchsize=1, email=myEmail
):
    """
    Retrieve multiple rather than a single record
    """

    for term in list_of_terms:

        logging.debug("Search term %s", term)
        success = False
        ids = None
        if not term:
            continue

        while not success:
            try:
                search_handle = Entrez.esearch(db=db, retmax=retmax, term=term)
                rec = Entrez.read(search_handle)
                success = True
                ids = rec["IdList"]
            except (HTTPError, URLError, RuntimeError, KeyError) as e:
                logging.error(e)
                logging.debug("wait 5 seconds to reconnect...")
                time.sleep(5)

        if not ids:
            logging.error("term {0} not found".format(term))
            continue

        assert ids
        nids = len(ids)
        if nids > 1:
            logging.debug("A total of {0} results found.".format(nids))

        if batchsize != 1:
            logging.debug("Use a batch size of {0}.".format(batchsize))

        ids = list(grouper(ids, batchsize))

        for id in ids:
            id = [x for x in id if x]
            size = len(id)
            id = ",".join(id)

            success = False
            while not success:
                try:
                    fetch_handle = Entrez.efetch(
                        db=db, id=id, rettype=rettype, email=email
                    )
                    success = True
                except (HTTPError, URLError, RuntimeError) as e:
                    logging.error(e)
                    logging.debug("wait 5 seconds to reconnect...")
                    time.sleep(5)

            yield id, size, term, fetch_handle
Example #20
0
def get_closest_color(color: str,
                      colors: Dict[str, str],
                      is_uq: bool = True) -> Union[str, None]:
    """Get the closest color representation. Only works for hex colors.

        e.g. '#4F2CD0' matches with '#341D8B'

    This is because the UQ schedule appears to use hand-picked colors,
    so some inconsistencies are present.

    Original comment:
        Used for example-urgent_quest-2020-06_1.html;
        Urgent Quest:
            The Manifested Planetbreaker &
            The Chant to Cleanse the Calamity (60 minutes)
        The key color is #341D8B while the schedule color is
        #4F2CD0.

    Args:
        color (str): a color representation; should be in hex
        colors (Dict[str, str]): a dictionary mapping colors from a key
            to UQs; cells must either match a color here or be ignored
        is_uq (bool, optional): whether to limit to UQs xor concerts;
            defaults to True

    Returns:
        str: if valid, the UQ name associated with a color
        None: if no colors were matched; probably an empty cell

    """
    # Not a hex color; abort comparison
    if not color.startswith('#'):
        return
    else:
        color = color.replace('#', '')

    try:
        rgb_int = [int(f'{x}{y}', base=16) for x, y in grouper(color, 2)]
    except ValueError:
        config.LOGGER.warning(f'{color} could not be converted via int()')
        return

    # Maximum color Euclidean distance between black (#000000) and
    # white (#FFFFFF)
    distance = 3 * 255**2
    closest = None

    for c, uq in colors.items():
        if uq.startswith('Urgent Quest:') ^ is_uq:
            continue
        # Get Euclidean distance of the colors; using square value
        d = sum([(c1 - c2)**2 for c1, c2 in zip(rgb_int, KEY_COLORS[c])])
        if d < distance:
            distance = d
            closest = uq

    return closest
Example #21
0
    def test_legacy_order(self):
        with warnings.catch_warnings(record=True) as caught:
            warnings.simplefilter('always')
            self.assertEqual(
                list(mi.grouper(3, 'ABCDEF')),
                [('A', 'B', 'C'), ('D', 'E', 'F')],
            )

        self.assertEqual(caught[0].category, DeprecationWarning)
Example #22
0
def _culane_line_to_curvelane_dict(culane_lines):
    curvelane_lines = []
    for culane_line_spec in culane_lines:
        curvelane_lien_spec = [{
            'x': x,
            'y': y
        } for x, y in grouper(map(float, culane_line_spec.split(' ')), 2)]
        curvelane_lines.append(curvelane_lien_spec)
    return dict(Lines=curvelane_lines)
    def test_legacy_order(self):
        """Historically, grouper expected the n as the first parameter"""
        with warnings.catch_warnings(record=True) as caught:
            warnings.simplefilter("always")
            self.assertEqual(
                list(mi.grouper(3, "ABCDEF")), [("A", "B", "C"), ("D", "E", "F")],
            )

        (warning,) = caught
        assert warning.category == DeprecationWarning
def batch_generator(nr_samples, batch):
    batches = grouper(np.random.permutation(nr_samples), batch)
    while True:
        try:
            indices = list(next(batches))
            if None in indices:
                indices = [index for index in indices if index is not None]
            yield torch.tensor(indices, dtype=torch.int64)
        except StopIteration:
            return
def handler(message: Dict[str, Any], context):
    region = get_env_variable(AWS_REGION)
    bucket = get_env_variable(AWS_S3_BUCKET)
    target_function_name = get_env_variable(TIINGO_FETCHER_FUNCTION_NAME)
    tiingo_tickers = get_env_variable_or_default(TIINGO_TICKERS_FILE,
                                                 "tiingo/tickers.csv")
    invocation_type = get_env_variable_or_default(LAMBDA_INVOCATION_TYPE,
                                                  "Event")
    filters = message["filters"]
    base_path = message.get("base_path", "market_data")
    tiingo_filters = Filter.schema().load(filters, many=True)
    logger.debug(f"Number of filters : {len(tiingo_filters)}")

    tiingo_tickers_path = os.path.join(tempfile.gettempdir(),
                                       "tiingo_tickers.csv")
    download_file_from_S3_to_temp(region, bucket, tiingo_tickers,
                                  tiingo_tickers_path)

    lambda_client = boto3.client("lambda", region_name=region)

    nb_msg_send = 0
    for tiingo_filter in tiingo_filters:
        logger.debug(f"run filter: {tiingo_filter}")
        tickers = set(
            filter(tiingo_filter.filter_out,
                   get_tiingo_tickers(tiingo_tickers_path)))
        for tiingo_tickers in grouper(10, tickers):
            nb_msg_send += len(tiingo_tickers)
            messages = [
                Message(
                    tiingo_ticker.ticker,
                    f"{base_path}/{tiingo_ticker.ticker}/1d/data.csv",
                ) for tiingo_ticker in tiingo_tickers
                if tiingo_ticker is not None
            ]
            event = {"records": Message.schema().dump(messages, many=True)}
            payload = json.dumps(event)
            logger.debug(f"send -> {payload}")

            lambda_call_result = lambda_client.invoke(
                FunctionName=target_function_name,
                InvocationType=invocation_type,
                Payload=payload,
            )

            if lambda_call_result["StatusCode"] in (200, 202, 204) and (
                    "FunctionError" not in lambda_call_result.keys()):
                continue
            error_msg = f"Call to {target_function_name} Error. \
                {lambda_call_result['FunctionError']}-\
                {base64.b64decode(lambda_call_result['LogResult']).decode('utf-8')}"

            logger.error(error_msg)
            raise Exception(error_msg)
    logger.info(f"Numbers of messages sent: {nb_msg_send}")
Example #26
0
def blasr(args):
    """
    %prog blasr ref.fasta fofn

    Run blasr on a set of PacBio reads. This is based on a divide-and-conquer
    strategy described below.
    """
    from more_itertools import grouper
    from jcvi.apps.grid import MakeManager

    p = OptionParser(blasr.__doc__)
    p.set_cpus(cpus=8)
    opts, args = p.parse_args(args)

    if len(args) != 2:
        sys.exit(not p.print_help())

    reffasta, fofn = args
    flist = sorted([x.strip() for x in open(fofn)])
    h5list = []
    mm = MakeManager()
    for i, fl in enumerate(grouper(flist, 3)):
        chunkname = "chunk{0:03d}".format(i)
        fn = chunkname + ".fofn"
        h5 = chunkname + ".cmp.h5"
        fw = open(fn, "w")
        print("\n".join(fl), file=fw)
        fw.close()

        cmd = "pbalign {0} {1} {2}".format(fn, reffasta, h5)
        cmd += " --nproc {0} --forQuiver --tmpDir .".format(opts.cpus)
        mm.add((fn, reffasta), h5, cmd)
        h5list.append(h5)

    # Merge h5, sort and repack
    allh5 = "all.cmp.h5"
    tmph5 = "tmp.cmp.h5"
    cmd_merge = "cmph5tools.py merge --outFile {0}".format(allh5)
    cmd_merge += " " + " ".join(h5list)
    cmd_sort = "cmph5tools.py sort --deep {0} --tmpDir .".format(allh5)
    cmd_repack = "h5repack -f GZIP=1 {0} {1}".format(allh5, tmph5)
    cmd_repack += " && mv {0} {1}".format(tmph5, allh5)
    mm.add(h5list, allh5, [cmd_merge, cmd_sort, cmd_repack])

    # Quiver
    pf = reffasta.rsplit(".", 1)[0]
    variantsgff = pf + ".variants.gff"
    consensusfasta = pf + ".consensus.fasta"
    cmd_faidx = "samtools faidx {0}".format(reffasta)
    cmd = "quiver -j 32 {0}".format(allh5)
    cmd += " -r {0} -o {1} -o {2}".format(reffasta, variantsgff,
                                          consensusfasta)
    mm.add(allh5, consensusfasta, [cmd_faidx, cmd])

    mm.write()
Example #27
0
    def __init__(self):
        self.add_suf = 0
        self.LEN_PI = 20000  #Длина Числа ПИ
        mp.dps = self.LEN_PI  #Установка числа длины ПИ
        self.PI = int(
            (mp.pi - int(mp.pi)) * 10**self.LEN_PI)  #Берем мантису числа ПИ
        self.PI_HEX = hex(self.PI)[2:8336 + 2]  #Переводим в 16 систему

        self.FIXED_P = [
            ''.join(i) for i in more_itertools.grouper(self.PI_HEX[:8 * 18], 8)
        ]  #Матрица Раундовых ключей

        #Матрица подстановок
        self.FIXED_S = [
            i for i in more_itertools.grouper([
                ''.join(i)
                for i in more_itertools.grouper(self.PI_HEX[8 * 18:], 8)
            ], 256)
        ]
        self.FIXED_S = [list(i) for i in self.FIXED_S]
Example #28
0
    def test_legacy_order(self):
        """Historically, grouper expected the n as the first parameter"""
        with warnings.catch_warnings(record=True) as caught:
            warnings.simplefilter('always')
            self.assertEqual(
                list(mi.grouper(3, 'ABCDEF')),
                [('A', 'B', 'C'), ('D', 'E', 'F')],
            )

        warning, = caught
        assert warning.category == DeprecationWarning
Example #29
0
 def batch_generator(self, nr_samples, batch):
     print('Start batching...')
     batches = grouper(np.arange(nr_samples), batch)
     while True:
         try:
             indices = list(next(batches))
             if None in indices:
                 indices = [index for index in indices if index is not None]
             yield indices
         except StopIteration:
             return
Example #30
0
    def write(self, conn, bytes_):
        assert len(bytes_) <= self.DATA_LEN
        padded_bytes = pad_zeros(bytes_, self.BLOCK_LEN)

        addr_iter = range(self.FIRST_BLOCK, self.LAST_BLOCK + 1)
        data_iter = mi.grouper(self.BLOCK_LEN, padded_bytes)

        for block_num, data_block in zip(addr_iter, data_iter):
            LOG.debug("Writing %s to block %02x", fmt_bytes(data_block),
                      block_num)
            self.write_block(conn, block_num, data_block)
Example #31
0
    def test_legacy_order(self):
        """Historically, grouper expected the n as the first parameter"""
        with warnings.catch_warnings(record=True) as caught:
            warnings.simplefilter('always')
            self.assertEqual(
                list(mi.grouper(3, 'ABCDEF')),
                [('A', 'B', 'C'), ('D', 'E', 'F')],
            )

        warning, = caught
        assert warning.category == DeprecationWarning
Example #32
0
 def describe_operation(self, query):
     '''  Returns an RDF graph that describes the resources found. '''
     try:
         self.query_sparql_store.setReturnFormat(XML)
         self.query_sparql_store.setQuery(self.concatenate_prefix(query))
         results = self.query_sparql_store.query().convert()
         str_result = results.serialize(format='nt')
         uris = str_result.split(' ')
         clean_uris = [x if not x[0] == '.' else x[2:] for x in uris]
         return list(grouper(3, clean_uris))[:-1]
     except Exception as e:
         print("Could not complete DESCRIBE operation")
Example #33
0
def binarytohex():

    user_input = values["-IN-"]

    user_input_string = str(user_input)

    for contents in user_input_string:

        if contents != '0' and contents != '1':

            return sg.popup('Number not binary! Binary numbers are made up of only 1s and 0s ',title='error')

    
    df = pd.read_csv('hextable.csv')

    t_string = str(user_input)

    t_list = list(t_string)

    t_list.reverse()

    # https://more-itertools.readthedocs.io/en/latest/api.html#more_itertools.grouper

    y = list(grouper(t_list, 4, '0'))

    x = [tup[::-1] for tup in y]

    random_list = []

    for i in x:

        i_str = "".join(i)

        i_num = int(i_str)

        f = df.loc[df['Binary'] == i_num, 'Hex']

        d = f.values

        r = d.tolist()

        random_list.append(r)

        merged = list(itertools.chain(*random_list))

        merged.reverse()

        merged_str = ''.join(merged)

        merged_str_message =  merged_str + ' is the equivalent of ' + values['-IN-'] + ' in base 16.'
        

    return sg.popup(merged_str_message)
Example #34
0
def parse_katsins(filename):
    lines = open(filename + '.recs').read().split('\n')[:-1]
    out = bytes()
    for inlen, msg, md in grouper(3, lines):
        inlen = plen(inlen)[0]
        msg = pmsg(msg)[0]
        md = pmd(md)[0]
        if (int(inlen) % 8) != 0:
            continue
        msg = unhexlify(msg)
        print(int(inlen) // 8, len(msg))
        out += msg
    with open(filename.split('.')[0] + '.bin', 'wb') as f:
        f.write(out)
    return out
Example #35
0
def _group_by_thousands(x):
    """Sequence of (magnitude, chunk) tuples.

    `magnitude` is an integer representing which "group of thousands" the chunk
    represents. So 0 means "0 through 999", 1 means "1000 to 9999", etc.

    `chunk` is a tuple of three integers representing the digits of the current
    chunk.

    The iteration will be in order from greatest magnitude to smallest.
    """
    digits = util.digits(x)

    # Groups by thousands
    chunks = list(enumerate(grouper(3, reversed(digits))))
    chunks.reverse()

    for magnitude, chunk in chunks:
        chunk = tuple(0 if d is None else d for d in reversed(chunk))
        yield magnitude, chunk
Example #36
0
def parse_kats(filename):
    lines = open(filename.split('.')[0] + '.recs').read().split('\n')[:-1]
    outtxt = []
    out = bytes()
    for inlen, msg, md in grouper(3, lines):
        r = [inlen, msg, md]
        ilen = int(plen(inlen)[0])
        if (ilen % 8) != 0:
            continue
        outtxt.extend([inlen, msg, md])
        msg = pmsg(msg)[0]
        md = pmd(md)[0]
        msg = unhexlify(msg)
        md = unhexlify(md)
        outlen = len(md)
        inlen = len(msg)
        out += ostruct.pack(inlen, outlen)
        out += msg
        out += md
    with open(filename.split('.')[0] + '.bin', 'wb') as f:
        f.write(out)
    return outtxt
Example #37
0
#!/usr/bin/env python
from __future__ import division, print_function

from binascii import unhexlify
import sys
from more_itertools import grouper

with open(sys.argv[1]) as f:
    lines = f.read().split('\n')[:-1]

for len, msg in grouper(2, lines):
    length = int(length)
    if length % 8 != 0:
        continue
    with open('in/{}'.format(length), 'wb') as f:
        if length != 0:
            f.write(unhexlify(msg))
Example #38
0
 def kill_tasks(self, tasks):
     for group in grouper(50, tasks):
         group = filter(lambda x: x is not None, group)
         pids = map(lambda t: unicode(t.drm_jobID), group)
         sp.call(['scancel', '-Q'] + pids, preexec_fn=exit_process_group)
Example #39
0
def resolve_args(namespaces):
    from pathlib import Path

    # TODO: make so that we can run without science frames
    args, head_info, names = namespaces

    # Positional argument and -s / --sci argument mean the same thing
    if args.files_or_directory and not args.sci:
        args.sci = args.files_or_directory

    if args.outdir:
        # output directory given explicitly
        args.outdir = iocheck(args.outdir, os.path.exists, 1)
    # else:
    # infer output directory from images provided

    # If input is a directory, process all files in tree!
    # If outdir given, rebuild the tree for reduced files there.  Otherwise
    # maintain current tree for reduced files.
    from pySHOC import treeops

    root = Path(args.sci[0])
    if root.is_dir():  # this is a directory try process entire tree!
        _infer_indir = False
        # first check if still has day-by-day folders
        # if next(args.sci[0].glob('[01][0-9][0-3][0-9]'), None):
        #     # try to partition
        #     treeops.partition_by_source(args.sci[0])

        # get file tree
        tree = treeops.get_tree(root, '.fits')
        flats = tree.pop('flats', tree.pop('flat', None))
        bias = tree.pop('bias', None)
        if not args.flats:
            args.flats = flats
        if not args.bias:
            args.bias = bias

        # flatten the tree into list of files
        args.sci = list(mit.flatten(tree.values()))

    else:
        _infer_indir = True

    # Resolve inputs and get the input folder form resolved file list for
    # sci / flats / bias
    _infer_outdir = not bool(args.outdir)
    work_dir = ''
    for name in ('sci', 'flats', 'bias'):  # args.dark   # 'sci',
        images = getattr(args, name)
        if images:
            # Resolve the input images
            images = parse.to_list(images,
                                   os.path.exists,
                                   include='*.fits',
                                   path=work_dir,
                                   abspaths=True,
                                   raise_error=1)
            # put resolved list in arg namespace
            setattr(args, name, images)
            if _infer_indir:
                work_dir = Path(images[0]).parent
                _infer_indir = False

            if _infer_outdir:
                args.outdir = os.path.split(images[0])[0]
                _infer_outdir = False

    # All inputs should now be resolved to lists of file names
    if args.sci:
        # Initialize Run
        args.sci = shocSciRun.load(args.sci, label='science')
        # TODO: use kind and set that as label default?

        # for cube in args.sci:  # DO YOU NEED TO DO THIS IN A LOOP?
        #     cube._needs_flip = not cube.cross_check(args.sci[0], 'flip_state')
        # self-consistency check for flip state of science cubes
        # #NOTE: THIS MAY BE INEFICIENT IF THE FIRST CUBE IS THE ONLY ONE WITH A DIFFERENT FLIP STATE...

    # ===========================================================================
    if args.gps:
        args.timing = True  # Do timing if gps info given

        if len(args.gps) == 1:
            # triggers give either as single trigger time string or filename of trigger list
            valid_gps = iocheck(args.gps[0], validity.RA,
                                raise_error=-1)  # if valid single time this will return that same str else None
            if not valid_gps:
                args.gps = parse.to_list(args.gps, validity.RA,
                                         path=work_dir,
                                         abspath=0,
                                         sort=0,
                                         raise_error=1)

        # at ths point args.gps is list of explicit time strings.
        # Check if they are valid representations of time
        args.gps = [iocheck(g, validity.RA, raise_error=1, convert=convert.RA)
                    for g in args.gps]

        # Convert and set as cube attribute
        args.sci.that_need_triggers().set_gps_triggers(args.gps)

        # if any cubes are GPS triggered on each individual frame
        grun = args.sci.that_need_kct()
        if len(args.kct) == 1 and len(grun) != 1:
            warn(
                    'A single GPS KCT provided for multiple externally triggered runs. '
                    'Assuming this applies for all these files: %s' % grun)
            args.kct *= len(grun)  # expand by repeating

        elif len(grun) != len(args.kct):
            l = str(len(args.kct)) or 'No'
            s = ': %s' % str(args.kct) if len(args.kct) else ''
            raise ValueError('%s GPS KCT values provided%s for %i file(s): %s'
                             '' % (l, s, len(grun), grun))

        # "Please specify KCT (Exposure time + Dead time):")
        # args.kct = InputCallbackLoop.str(msg, 0.04, check=validity.float, what='KCT')

        for cube, kct in zip(grun, args.kct):
            cube.timing.kct = kct

    # ===========================================================================
    if args.flats or args.bias:

        args.combine = list(map(str.lower, args.combine))
        hows = 'day', 'daily', 'week', 'weekly'
        methods = 'sigma clipped',
        funcs = 'mean', 'median'
        vocab = hows + methods + funcs
        transmap = dict(mit.grouper(hows, 2))
        understood, misunderstood = map(list, mit.partition(vocab.__contains__,
                                                            args.combine))
        if any(misunderstood):
            raise ValueError('Argument(s) {} for combine not understood.'
                             ''.format(misunderstood))
        else:
            understood = [transmap.get(u, u) for u in understood]

            how = next(filter(hows.__contains__, understood))
            func = next(filter(funcs.__contains__, understood))
            meth = next(filter(methods.__contains__, understood), '')

            args.combine = how
            args.fcombine = getattr(np, func)
            print('\nBias/Flat combination will be done by {}.'.format(
                    ' '.join([how, meth, func])))

            # TODO: sigma clipping ... even though it sucks

    # ===========================================================================
    if args.flats:
        # TODO full matching here ...

        # args.flats = parse.to_list(args.flats, imaccess, path=work_dir, raise_error=1)
        args.flats = shocFlatFieldRun.load(args.flats, label='flat')

        # isolate the flat fields that match the science frames. only these will be processed
        match = args.flats.cross_check(args.sci, 'binning', 1)
        args.flats = args.flats[match]

        # check which are master flats

        # for flat in args.flats:
        #     flat._needs_flip = not flat.cross_check(args.sci[0], 'flip_state')

        # flag the flats that need to be subframed, based on the science frames which are subframed
        args.flats.flag_sub(args.sci)

        args.flats.print_instrumental_setup()

        # check which of the given flats are potentially master
        # is_master = [f.ndims == 2 for f in args.flats]

        # else:
        # print('WARNING: No flat fielding will be done!')

    # ===========================================================================
    if args.bias:
        # args.bias = parse.to_list(args.bias, imaccess, path=work_dir, raise_error=1)
        args.bias = shocBiasRun.load(args.bias, label='bias')

        # match the biases for the science run
        match4sci = args.bias.cross_check(args.sci, ['binning', 'mode'], 0)
        # for bias in args.bias:
        #     bias._needs_flip = bias.cross_check(args.sci[0], 'flip_state')
        # NOTE: THIS MAY BE INEFICIENT IF THE FIRST CUBE IS THE ONLY ONE WITH A DIFFERENT FLIP STATE...
        # args.bias[match4sci].flag_sub(args.sci) ?
        args.bias.flag_sub(args.sci)
        args.bias[match4sci].print_instrumental_setup(
                description='(for science frames)')

        # match the biases for the flat run
        if args.flats:
            match4flats = args.bias.cross_check(args.flats, ['binning', 'mode'],
                                                -1)
            # args.bias4flats = args.bias[match4flats]
            # for bias in args.bias4flats:
            #     bias._needs_flip = bias.cross_check(args.flats[0], 'flip_state')

            # print table of bias frames
            args.bias[match4flats].print_instrumental_setup(
                    description='(for flat fields)')
            match = match4sci & match4flats
        else:
            match = match4sci

        args.bias = args.bias[match]

        # check which of the given flats are potentially master
        # is_master = [f.ndims == 2 for f in args.flats]

    # else:
    # warn( 'No de-biasing will be done!' )

    # ===========================================================================
    if args.split:
        if args.outdir[0]:  # if an output directory is given
            args.outdir = os.path.abspath(args.outdir[0])
            if not os.path.exists(args.outdir):  # if it doesn't exist create it
                print(
                        'Creating reduced data directory {}.\n'.format(
                            args.outdir))
                os.mkdir(args.outdir)

    # ===========================================================================
    # Handle header updating here

    # NOTE: somehow, this attribute gets set even though we can never read it due to a syntax error
    delattr(head_info, 'update-headers')

    hi = head_info
    hi.coords = None
    # join arguments since they are read as lists
    hi.object = ' '.join(hi.object)
    hi.ra = ' '.join(hi.ra)
    hi.dec = ' '.join(hi.dec)
    hi.date = ' '.join(hi.date)

    if args.update_headers:
        if hi.ra and hi.dec:
            iocheck(hi.ra, validity.RA, 1)
            iocheck(hi.dec, validity.DEC, 1)
            hi.coords = SkyCoord(ra=hi.ra, dec=hi.dec,
                                 unit=('h', 'deg'))  # , system='icrs'
        else:
            from pySHOC.utils import retrieve_coords_ra_dec
            hi.coords, hi.ra, hi.dec = retrieve_coords_ra_dec(hi.object)

        # TODO: maybe subclass SkyCoords to calculate this?
        def is_close(cooA, cooB, threshold=1e-3):
            return np.less([(cooA.ra - cooB.ra).value,
                            (cooA.dec - cooB.dec).value], threshold).all()

        for cube in args.sci:  # TODO: select instead of loop
            if cube.has_coords and hi.coords and not is_close(cube.coords,
                                                              hi.coords):
                fmt = dict(style='hmsdms', precision=2, sep=' ', pad=1)
                warn(
                        'Supplied coordinates {} will supersede header coordinates {} in {}'
                        ''.format(hi.coords.to_string(**fmt),
                                  cube.coords.to_string(**fmt),
                                  cube.filename()))
                cube.coords = hi.coords

        if not hi.date:
            # hi.date = args.sci[0].date#[c.date for c in args.sci]
            warn('Dates will be assumed from file creation dates.')

            # if not hi.filter:
            #     warn('Filter assumed as Empty')
            #     hi.filter = 'Empty'

            # if hi.epoch:
            #     iocheck(hi.epoch, validity.epoch, 1)
            # else:
            # warn('Assuming epoch J2000')
            # hi.epoch = 2000

            # if not hi.obs:
            # note('Assuming location is SAAO Sutherland observatory.')
            # hi.obs = 'SAAO'

            # if not hi.tel:
            #     note('Assuming telescope is SAAO 1.9m\n')   #FIXME: Don't have to assume for new data
            #     hi.tel = '1.9m'

    elif args.timing or args.split:
        # Need target coordinates for Barycentrization! Check the headers
        for cube in args.sci:  # TODO: select instead of loop
            if cube.coords is None:
                warn('Object coordinates not found in header for {}!\n'
                     'Barycentrization cannot be done without knowing target '
                     'coordinates!'.format(cube.filename()))

                # iocheck( hi.date, validity.DATE, 1 )
                # else:
                # warn( 'Headers will not be updated!' )

                # ===========================================================================
                # if args.timing and not hi.coords:
                # Target coordinates not provided / inferred from
                # warn( 'Barycentrization cannot be done without knowing target coordinates!' )

    if args.names:
        shocFlatFieldRun.nameFormat = names.flats
        shocBiasRun.nameFormat = names.bias
        shocSciRun.nameFormat = names.sci

    # ANIMATE

    return args, head_info, names
Example #40
0
def u8s(s):
    return ', '.join('0x{}'.format(''.join(group)) for group in grouper(2, s))
Example #41
0
#!/usr/bin/env python
from __future__ import division, print_function

import sys
from more_itertools import grouper

with open(sys.argv[1]) as f:
    lines = f.read().split('\n')[:-1]

for length, md in grouper(2, lines):
    length = int(length)
    if length % 8 != 0:
        continue
    with open('answers/{}/{}'.format(sys.argv[1].split('.')[0], length), 'wb') as f:
        f.write(md.lower())
def hash_cinit(name, hash_):
  length = len(hash_) // 2
  return ('uint8_t {name}_digest[{length}] = {{ {bytes} }};\n'
          .format(length=length, name=name,
                  bytes=', '.join(('0x' + ''.join(byte)) for byte in grouper(2, hash_))))
Example #43
0
 def test_fill_value(self):
     """Test that the fill value is used to pad the final group"""
     self.assertEqual(
         list(mi.grouper(3, 'ABCDE', 'x')),
         [('A', 'B', 'C'), ('D', 'E', 'x')]
     )
Example #44
0
 def kill_tasks(self, tasks):
     for group in grouper(tasks, 50):
         group = filter(lambda x: x is not None, group)
         pids = ','.join(map(lambda t: str(t.drm_jobID), group))
         sp.Popen(['qdel', pids], preexec_fn=preexec_function)