def Decrypt(data, key):
    transp = Permutation(data, BegPermut)

    L = transp[:32]
    R = transp[32:]

    for i in range(15,-1,-1):
        R, L = ba(L), ba(R ^ fRK(L, GetKey(key, i)))

    return Permutation(L + R, EndPermut)
Beispiel #2
0
def colour_matrix(degrees, matrix):
    """
    Greedy coloring of bit-encoded RMSD matrix.

    Parameters
    ----------
    degrees : numpy.ndarray
        array containing each node degree. Clustered nodes have degree = 0.
    matrix : collections.OrderedDict
        dict of bitarrays.

    Returns
    -------
    colors : numpy.ndarray
        array of colors assigned to each node of the matrix.
    """
    # Constants ---------------------------------------------------------------
    N = degrees.size
    m = len(matrix)
    one = ba('1')
    xcolor = 0
    # Initialize containers ---------------------------------------------------
    ordered_by_degrees = iter((-degrees[:m]).argsort())
    colors = np.zeros(N, dtype=np.int32)
    colored = ba(N)
    colored.setall(0)
    seen = set()
    while True:
        # Retrieve the max-degree node ----------------------------------------
        max_node = next(ordered_by_degrees)
        if max_node in seen:
            continue
        seen.add(max_node)
        xcolor += 1
        not_neighbors = ~ matrix[max_node]
        not_colored = ~colored
        candidates = not_neighbors & not_colored
        # Nodes passing conditions (not-neighb, not-colored, not-neighb) ------
        passed = [max_node]
        for candidate in candidates.itersearch(one):
            passed.append(candidate)
            try:
                candidates &= ~matrix[candidate]
            except KeyError:
                continue
            if not candidates.any():
                break
        seen.update(passed)
        # Deliver a color class to passed nodes -------------------------------
        colors[passed] = xcolor
        colored = ba()
        colored.pack(colors.astype(np.bool).tobytes())
        if colored.count(0) == 0:
            break
    return colors
def TripleDES_decrypt(file,data,key):
    while( len(key) < 24 ):
        key+=key
    key1, key2, key3 = ba(), ba(), ba()
    key1.frombytes(key[0:8])
    key2.frombytes(key[8:16])
    key3.frombytes(key[16:24])
    data_len = len(data)
    for i in range(0, data_len, 8):
        block = ba()
        block.frombytes(data[i:i + 8])
        res=Decrypt(Decrypt(Decrypt(block, key3), key2), key1).tobytes()
        if res.endswith(b"\x00"):
            res=res[:res.index(b"\x00")]
        file.write(res)
Beispiel #4
0
def calc_matrix_degrees(unclustered_bit, matrix):
    """
    Calculate number of neighbors (degree) of unclustered nodes in matrix.

    Parameters
    ----------
    unclustered_bit : bitarray.bitarray
        bitarray with indices of unclustered nodes turned on.
    matrix : collections.OrderedDict
        dict of bitarrays.

    Returns
    -------
    degrees : numpy.ndarray
        array containing each node degree. Clustered nodes have degree = 0.

    """
    one = ba('1')
    degrees = np.zeros(len(unclustered_bit), dtype=np.int32)
    for node in unclustered_bit.itersearch(one):
        try:
            degrees[node] = matrix[node].count()
        except KeyError:
            pass
    return degrees
Beispiel #5
0
def calc_rmsd_matrix(trajectory, args):
    """
    Calculate optimal RMSD binary-encoded square matrix using MDTraj. Pairwise
    similarity is saved in RAM as bits (dict of bitarrays), not floats.

    Parameters
    ----------
    trajectory : mdtraj.Trajectory
        MDTraj trajectory object.
    args : argparse.Namespace
        user input parameters parsed by argparse (CLI).

    Returns
    -------
    matrix : collections.OrderedDict
        dict of bitarrays.

    """
    trajectory.center_coordinates()
    cutoff = np.full(trajectory.n_frames, args.cutoff / 10, dtype=np.float32)
    matrix = OrderedDict()
    to_explore = range(trajectory.n_frames)
    for i in to_explore:
        rmsd_ = md.rmsd(trajectory, trajectory, i, precentered=True)
        vector_np = np.less_equal(rmsd_, cutoff)
        bitarr = ba()
        bitarr.pack(vector_np.tobytes())
        bitarr.fill()
        matrix.update({i: bitarr})
    return matrix
def TripleDES_encrypt(file,data,key):
    print(key)
    while(len(key)<24):
        key+=key
    key1,key2,key3=ba(),ba(),ba()
    key1.frombytes(key[0 :8])
    key2.frombytes(key[8 :16])
    key3.frombytes(key[16:24])
    data_len=len(data)
    if data_len%8 != 0:
        data=data+b"\x00"*(8 - data_len % 8)

    for i in range(0,data_len,8):
        block=ba()
        block.frombytes(data[i:i+8])
        file.write(Encrypt(Encrypt(Encrypt(block, key1), key2), key3).tobytes())
def Sbox_permut(data): # data 48 bit; ret 32
    res = ba()
    for i in range(8):
        b=data[6*i:6*i+6]
        m=int(b[0:6:5].to01(), 2)
        l=int(b[1:5].to01(), 2  )
        res+= bin(S_box[i][m][l])[2:].zfill(4)

    return res
Beispiel #8
0
def test_no_blocking_two_datasets(k_):
    THRESHOLD = .6

    # These values don't actually matter...
    dataset0 = [ba('00'), ba('01'), ba('10'), ba('11')]
    dataset1 = [ba('00'), ba('01'), ba('11'), ba('')]
    datasets = [dataset0, dataset1]

    def similarity_f(datasets, threshold, k=None):
        assert threshold == THRESHOLD
        # All we need to check for k is that it's been passed correctly
        assert k == k_
        dset0, dset1 = map(list, datasets)
        if dset0 == dataset0 and dset1 == dataset1:
            sims = [
                0.9432949307428928, 0.8568189930049877, 0.8419286042520673,
                0.6343698774541688, 0.6
            ]
            rec_is0 = [1, 2, 0, 3, 0]
            rec_is1 = [1, 0, 0, 3, 2]
        else:
            assert False, 'datasets not passed through as expected'
        return array('d', sims), (array('I', rec_is0), array('I', rec_is1))

    sims, (dset_is0, dset_is1), (rec_is0,
                                 rec_is1) = find_candidate_pairs(datasets,
                                                                 similarity_f,
                                                                 THRESHOLD,
                                                                 k=k_)

    if k_ == 0:
        assert list(sims) == []
        assert list(dset_is0) == []
        assert list(dset_is1) == []
        assert list(rec_is0) == []
        assert list(rec_is1) == []
    elif k_ == 1:
        assert list(sims) == [
            0.9432949307428928, 0.8568189930049877, 0.6343698774541688
        ]
        assert list(dset_is0) == [0, 0, 0]
        assert list(dset_is1) == [1, 1, 1]
        assert list(rec_is0) == [1, 2, 3]
        assert list(rec_is1) == [1, 0, 3]
    else:
        assert list(sims) == [
            0.9432949307428928, 0.8568189930049877, 0.8419286042520673,
            0.6343698774541688, 0.6
        ]
        assert list(dset_is0) == [0, 0, 0, 0, 0]
        assert list(dset_is1) == [1, 1, 1, 1, 1]
        assert list(rec_is0) == [1, 2, 0, 3, 0]
        assert list(rec_is1) == [1, 0, 0, 3, 2]
Beispiel #9
0
def test_no_blocking_too_few_datasets(datasets_n, k_, blocking_f):
    THRESHOLD = .6

    # These values don't actually matter...
    dataset0 = [ba('00'), ba('01'), ba('10'), ba('11')]
    datasets = [dataset0] if datasets_n else []

    def similarity_f(datasets, threshold, k=None, blocking_f=blocking_f):
        assert False, 'should not be called at all'

    sims, (dset_is0, dset_is1), (rec_is0,
                                 rec_is1) = find_candidate_pairs(datasets,
                                                                 similarity_f,
                                                                 THRESHOLD,
                                                                 k=k_)

    assert list(sims) == []
    assert list(dset_is0) == []
    assert list(dset_is1) == []
    assert list(rec_is0) == []
    assert list(rec_is1) == []
Beispiel #10
0
def np_to_bitarray(np_array, N):
    '''
    DESCRIPTION
    Converts a numpy array to a bitarray using the fastest way. It creates a
    bitarray of N bits and sets to 1 only those indices that coincides with
    the integers in the numpy array.

    Arguments:
        np_array (numpy.array): numpy array.
        N (int): size of the desired bitarray.
    Return:
        bitarr (bitarray): a bitarray of N bits having as 1 only those indices
        that coincides with the integers present in the numpy array.
    '''
    zero_arr = np.zeros(N, dtype=np.bool)
    zero_arr[np_array] = 1
    bitarr = ba()
    bitarr.pack(zero_arr.tobytes())
    return bitarr
Beispiel #11
0
def set_to_bitarray(set_, N):
    """
    Convert from python set to bitarray.bitarray.

    Parameters
    ----------
    set_ : set
        a python set.
    N : int
        lenght of the desired bitarray. It must be greater than the maximum
        value of indices present in set.

    Returns
    -------
    bitarr : bitarray.bitarray
        bitarray of lenght N with indices present in set turned on.
    """
    zero_arr = np.zeros(N, dtype=np.bool)
    zero_arr[list(set_)] = 1
    bitarr = ba()
    bitarr.pack(zero_arr.tobytes())
    return bitarr
Beispiel #12
0
def test_blocking_two_datasets(k_):
    THRESHOLD = .6

    # These values don't actually matter...
    dataset0 = [ba('00'), ba('01'), ba('10'), ba('11')]
    dataset1 = [ba('00'), ba('01'), ba('11'), ba('')]
    datasets = [dataset0, dataset1]

    def similarity_f(datasets, threshold, k=None):
        assert threshold == THRESHOLD
        # All we need to check for k is that it's been passed correctly
        assert k == k_
        dset0, dset1 = map(list, datasets)
        if (dset0 == list(map(dataset0.__getitem__, [0, 1]))
                and dset1 == list(map(dataset1.__getitem__, [0, 1]))):
            # Block where first bits are 0
            sims = [0.9432949307428928, 0.8419286042520673]
            rec_is0 = [1, 0]
            rec_is1 = [1, 0]
        elif (dset0 == list(map(dataset0.__getitem__, [2, 3]))
              and dset1 == list(map(dataset1.__getitem__, [2]))):
            # Block where first bits are 1
            sims = []
            rec_is0 = []
            rec_is1 = []
        elif (dset0 == list(map(dataset0.__getitem__, [0, 2]))
              and dset1 == list(map(dataset1.__getitem__, [0]))):
            # Block where second bits are 0
            sims = [0.8568189930049877, 0.8419286042520673]
            rec_is0 = [1, 0]
            rec_is1 = [0, 0]
        elif (dset0 == list(map(dataset0.__getitem__, [1, 3]))
              and dset1 == list(map(dataset1.__getitem__, [1, 2]))):
            # Block where second bits are 1
            sims = [0.9432949307428928]
            rec_is0 = [0]
            rec_is1 = [0]
        else:
            assert False, 'datasets not passed through as expected'
        return array('d', sims), (array('I', rec_is0), array('I', rec_is1))

    def blocking_f(dataset_index, record_index, hash_):
        assert datasets[dataset_index][record_index] == hash_
        if hash_:
            yield 0, hash_[0]
            yield 1, hash_[1]

    sims, (dset_is0,
           dset_is1), (rec_is0,
                       rec_is1) = find_candidate_pairs(datasets,
                                                       similarity_f,
                                                       THRESHOLD,
                                                       k=k_,
                                                       blocking_f=blocking_f)

    if k_ == 0:
        assert list(sims) == []
        assert list(dset_is0) == []
        assert list(dset_is1) == []
        assert list(rec_is0) == []
        assert list(rec_is1) == []
    elif k_ == 1:
        assert list(sims) == [0.9432949307428928, 0.8568189930049877]
        assert list(dset_is0) == [0, 0]
        assert list(dset_is1) == [1, 1]
        assert list(rec_is0) == [1, 2]
        assert list(rec_is1) == [1, 0]
    else:
        assert list(sims) == [
            0.9432949307428928, 0.8568189930049877, 0.8419286042520673
        ]
        assert list(dset_is0) == [0, 0, 0]
        assert list(dset_is1) == [1, 1, 1]
        assert list(rec_is0) == [1, 2, 0]
        assert list(rec_is1) == [1, 0, 0]
Beispiel #13
0
def test_blocking_three_datasets():
    THRESHOLD = .6
    SIMV = .7

    # These values don't actually matter...
    dataset0 = [ba('00'), ba('01'), ba('10'), ba('11')]
    dataset1 = [ba('00'), ba('01'), ba('11'), ba('10')]
    dataset2 = [ba('11'), ba('01'), ba('00'), ba('10')]
    datasets = [dataset0, dataset1, dataset2]

    def similarity_f(datasets, threshold, k=None):
        items = [(SIMV, i, j) for i in range(len(datasets[0]))
                 for j in range(len(datasets[1]))]

        return (array('d',
                      (i[0] for i in items)), (array('I',
                                                     (i[1] for i in items)),
                                               array('I',
                                                     (i[2] for i in items))))

    def blocking_f(dataset_index, record_index, hash_):
        assert datasets[dataset_index][record_index] == hash_
        # Records share a block if either their first bits match or
        # their second bits match.
        return enumerate(hash_)

    blocks = [[(i, j) for i, dset in enumerate(datasets)
               for j, rec in enumerate(dset) if rec[k] == v] for k in range(2)
              for v in [False, True]]

    sims, (dset_is0,
           dset_is1), (rec_is0,
                       rec_is1) = find_candidate_pairs(datasets,
                                                       similarity_f,
                                                       THRESHOLD,
                                                       blocking_f=blocking_f)

    assert (len(sims) == len(dset_is0) == len(rec_is0) == len(dset_is1) ==
            len(rec_is1))
    assert all(s == SIMV for s in sims)

    for i0, i1 in zip(zip(dset_is0, rec_is0), zip(dset_is1, rec_is1)):
        assert any(i0 in block and i1 in block for block in blocks)
    for block in blocks:
        for i0, i1 in combinations(block, 2):
            if i0[0] != i1[0]:
                assert (i0, i1) in zip(zip(dset_is0, rec_is0),
                                       zip(dset_is1, rec_is1))
Beispiel #14
0
def test_no_blocking_three_datasets(k_):
    THRESHOLD = .6

    # These values don't actually matter...
    dataset0 = [ba('00'), ba('01'), ba('10'), ba('11')]
    dataset1 = [ba('00'), ba('01'), ba('11'), ba('')]
    dataset2 = [ba('11'), ba('01'), ba('00'), ba('10')]
    datasets = [dataset0, dataset1, dataset2]

    def similarity_f(datasets, threshold, k=None):
        assert threshold == THRESHOLD
        # All we need to check for k is that it's been passed correctly
        assert k == k_
        dset0, dset1 = map(list, datasets)
        if dset0 == dataset0 and dset1 == dataset1:
            sims = [
                0.9432949307428928, 0.8568189930049877, 0.8419286042520673,
                0.6343698774541688, 0.6
            ]
            rec_is0 = [1, 2, 0, 3, 0]
            rec_is1 = [1, 0, 0, 3, 2]
        elif dset0 == dataset0 and dset1 == dataset2:
            sims = [
                0.9962946784347061, 0.900267827898046, 0.88468228054972,
                0.6956392099710476
            ]
            rec_is0 = [1, 0, 3, 2]
            rec_is1 = [2, 1, 2, 3]
        elif dset0 == dataset1 and dset1 == dataset2:
            sims = [
                0.88468228054972, 0.699430643486643, 0.6121560533778709,
                0.6076471833512952
            ]
            rec_is0 = [3, 3, 2, 0]
            rec_is1 = [2, 3, 2, 3]
        else:
            assert False, 'datasets not passed through as expected'
        return array('d', sims), (array('I', rec_is0), array('I', rec_is1))

    sims, (dset_is0, dset_is1), (rec_is0,
                                 rec_is1) = find_candidate_pairs(datasets,
                                                                 similarity_f,
                                                                 THRESHOLD,
                                                                 k=k_)

    if k_ is 0:
        assert list(sims) == []
        assert list(dset_is0) == []
        assert list(dset_is1) == []
        assert list(rec_is0) == []
        assert list(rec_is1) == []
    elif k_ is 1:
        assert list(sims) == [
            0.9962946784347061, 0.9432949307428928, 0.900267827898046,
            0.88468228054972, 0.8568189930049877, 0.6956392099710476,
            0.6343698774541688
        ]
        assert list(dset_is0) == [0, 0, 0, 1, 0, 0, 0]
        assert list(dset_is1) == [2, 1, 2, 2, 1, 2, 1]
        assert list(rec_is0) == [1, 1, 0, 3, 2, 2, 3]
        assert list(rec_is1) == [2, 1, 1, 2, 0, 3, 3]
    else:
        assert list(sims) == [
            0.9962946784347061, 0.9432949307428928, 0.900267827898046,
            0.88468228054972, 0.88468228054972, 0.8568189930049877,
            0.8419286042520673, 0.699430643486643, 0.6956392099710476,
            0.6343698774541688, 0.6121560533778709, 0.6076471833512952, 0.6
        ]
        assert list(dset_is0) == [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0]
        assert list(dset_is1) == [2, 1, 2, 2, 2, 1, 1, 2, 2, 1, 2, 2, 1]
        assert list(rec_is0) == [1, 1, 0, 3, 3, 2, 0, 3, 2, 3, 2, 0, 0]
        assert list(rec_is1) == [2, 1, 1, 2, 2, 0, 0, 3, 3, 3, 2, 3, 2]
 def initialize(self):
     """ Randomly initialize population """
     return np.array(
         [ba(self.gene_number) for i in range(self.population_number)],
         float)
Beispiel #16
0
mps = maxPacketSize = 100  # bytes
hs = headerSize = 40  # bytes
pds = payloadSize = mps - hs  # bytes

##dataRateMode = 0        # for payload size dependency

# Constants and fixed values

#known:     v - tc - fl
#fromset:   nh
#unknown:   pl - hl - sa - da

###<SHOULD CHOOSE RULE 2>

v = version = bits(ba('0110'))  # 4   bits, value = 6
tc = trafficClass = bits(uint=0, length=8)  # 8   bits, value = 0
fl = flowLabel = bits(uint=0, length=20)  # 20  bits, value = 0
pl = payloadLength = bits(uint=pds,
                          length=16)  # 16  bits, value = payload size in bytes
nh = nextHeader = bits(uint=17, length=8)  # 8   bits, value = 17 = udp
hl = hopLimit = bits(uint=randrange(20, 255),
                     length=8)  # 8   bits, value = random
sa = sourceAddress = bits(ba(128))  # 128 bits, value = random
da = destinationAddress = bits(ba(128))  # 128 bits, value = random
pd = payload = bits(ba(pds * 8))  # bits, value = random

print "v: ", v, v.length
print "tc: ", tc, tc.length
print "fl: ", fl, fl.length
print "pl: ", pl, pl.int, pl.length
def Permutation(data, table):
    return ba([data[i] for i in table])
Beispiel #18
0
def count(rowsLeft, rowA, rowB, rowC):
    if rowsLeft == 0:
        return 1

    # filled another row ?
    if rowA.count() == width:
        return count(rowsLeft - 1, rowB, rowC, EmptyRow)

    # find first gap in rowA
    pos = rowA.index(0)

    result = 0

    # shape: @@  a
    #        @   b
    a, b = ba(rowA), ba(rowB)
    if (rowsLeft >= 2 and pos < width - 1 and use(pos, a) and use(pos + 1, a)
            and use(pos, b)):
        result += count(rowsLeft, fba(a), fba(b), rowC)

    # shape: @@  a
    #         @  b
    a, b = ba(rowA), ba(rowB)
    if (rowsLeft >= 2 and pos < width - 1 and use(pos, a) and use(pos + 1, a)
            and use(pos + 1, b)):
        result += count(rowsLeft, fba(a), fba(b), rowC)

    # shape: @   a
    #        @@  b
    a, b = ba(rowA), ba(rowB)
    if (rowsLeft >= 2 and pos < width - 1 and use(pos, a) and use(pos, b)
            and use(pos + 1, b)):
        result += count(rowsLeft, fba(a), fba(b), rowC)

    # shape:  @  a
    #        @@  b
    # note: this shape extends one "negative" unit to the left
    a, b = ba(rowA), ba(rowB)
    if (rowsLeft >= 2 and pos > 0 and use(pos, a) and use(pos - 1, b)
            and use(pos, b)):
        result += count(rowsLeft, fba(a), fba(b), rowC)

    # shape: @  a
    #        @  b
    #        @  c
    a, b, c = ba(rowA), ba(rowB), ba(rowC)
    if (rowsLeft >= 3 and use(pos, a) and use(pos, b) and use(pos, c)):
        result += count(rowsLeft, fba(a), fba(b), fba(c))

    # shape: @@@  a
    a = ba(rowA)
    if (rowsLeft >= 1 and pos < width - 2 and use(pos, a) and use(pos + 1, a)
            and use(pos + 2, a)):
        result += count(rowsLeft, fba(a), rowB, rowC)

    return result
Beispiel #19
0
def bitclusterize(matrix, degrees, args):
    '''
    DESCRIPTION
    Clusters the bit matrix using bitwise operations.

    Args:
        matrix (list): list of bitarrays
        degrees (collections.OrderedDict): dict of bitarrays lenghts.
    Return:
        clusters (numpy.ndarray): array of clusters ID.
        leaders (list) : list of clusters´ centers ID.
    '''
    degrees = np.asarray([degrees[x] for x in degrees])
    # Memory allocation for clusters container --------------------------------
    clusters = np.empty(len(matrix), dtype='int32')
    clusters.fill(-1)
    # Declare all bits as available -------------------------------------------
    available_bits = ba(int(len(degrees)))
    available_bits.setall('1')
    # Start iterative switching -----------------------------------------------
    leaders = []
    clust_id = 0
    ncluster = -1
    while True:
        ncluster += 1
        # Break 0: break if max number of cluster was reached -----------------
        if ncluster > args.max_clust:
            break
        # Find the biggest cluster --------------------------------------------
        leader = degrees.argmax()
        # Break 1: all candidates cluster have degree 1 (can´t clusterize) ----
        if degrees.sum() == np.nonzero(degrees)[0].size:
            # return clusters, leaders
            break
        biggest_cluster = matrix[leader] & available_bits
        biggest_cluster_list = np.frombuffer(biggest_cluster.unpack(),
                                             dtype=np.bool)
        # Break 2: all candidates cluster have degree < minsize ---------------
        if biggest_cluster_list.sum() < args.minsize:
            # return clusters, leaders
            break
        # Break 3: No more candidates available (empty matrix) ----------------
        if degrees.sum() == 0:
            # return clusters, leaders
            break
        degrees[biggest_cluster_list] = 0
        available_bits = (available_bits ^ biggest_cluster) & available_bits
        if biggest_cluster.count() <= 1:
            leaders.append(-1)
            # return clusters, leaders
            break
        else:
            leaders.append(leader)
            # Assign next cluster ID ------------------------------------------
            clusters[biggest_cluster_list] = clust_id
            clust_id += 1
        # Update degrees of unclustered frames --------------------------------
        for degree in available_bits.itersearch(ba('1')):
            # degrees[degree] = ba.fast_hw_and(available_bits, matrix[degree])
            degrees[degree] = bau.count_and(available_bits, matrix[degree])
    return clusters, leaders
Beispiel #20
0
def main():
    # >>>> Debugging <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # import argparse
    # args = argparse.Namespace()
    # args.trajectory = '../trajs/aligned_tau.dcd'
    # args.topology = '../trajs/aligned_tau.pdb'
    # args.nclust = np.inf
    # args.min_clust_size = 2
    # args.first = 1000
    # args.last = 6000
    # args.stride = 3
    # args.selection = 'all'
    # args.cutoff = 4
    # args.outdir = 'bitQT_outputs'
    # =========================================================================
    # 1. Creating binary matrix (adjacency list)
    # =========================================================================
    # ++++ Get adjacency matrix of trajectory as list of bitarrays ++++++++++++
    args = parse_arguments()

    try:
        os.makedirs(args.outdir)
    except FileExistsError:
        raise Exception('{} directory already exists.'.format(args.outdir) +
                        'Please specify another location or rename it.')

    trajectory = load_raw_traj(args.trajectory, valid_trajs, args.topology)
    trajectory = shrink_traj_selection(trajectory, args.selection)
    N1 = trajectory.n_frames
    trajectory = shrink_traj_range(args.first, args.last, args.stride, trajectory)
    trajectory.center_coordinates()
    matrix = calc_rmsd_matrix(trajectory, args)
    # ++++ Tracking clust/uNCLUSTERed bits to avoid re-computations +++++++++++
    N = len(matrix[0])
    m = len(matrix)
    unclust_bit = ba(N)
    unclust_bit.setall(1)
    clustered_bit = unclust_bit.copy()
    clustered_bit.setall(0)
    zeros = np.zeros(N, dtype=np.int32)
    # ++++ Save clusters in an array (1 .. N) +++++++++++++++++++++++++++++++++
    clusters_array = np.zeros(N, dtype=np.int32)
    NCLUSTER = 0
    clustered = set()
    nmembers = []
    # ++++ Coloring ordered vertices (1 .. N) +++++++++++++++++++++++++++++++++
    degrees = calc_matrix_degrees(unclust_bit, matrix)
    ordered_by_degs = degrees.argsort()[::-1]
    colors = colour_matrix(ordered_by_degs, matrix)
    # colors[np.frombuffer(clustered_bit.unpack(), dtype=np.bool)] = 0

    # =========================================================================
    # 2. Main algorithm: BitQT !
    # =========================================================================
    while True:
        NCLUSTER += 1
        # ++++ Find a big clique early ++++++++++++++++++++++++++++++++++++++++
        big_node = degrees.argmax()
        bit_clique, big_clique = do_bit_cascade(big_node, degrees, colors,
                                                matrix, 0)
        big_clique_size = big_clique.size
        # ++++ Find promising nodes +++++++++++++++++++++++++++++++++++++++++++
        biggers = degrees > big_clique_size
        biggers[big_clique] = False
        cluster_colors = colors[big_clique]
        biggers_colors = colors[biggers]
        promising_colors = np.setdiff1d(biggers_colors, cluster_colors)
        promising_nodes = deque()
        for x in promising_colors:
            promising_nodes.extend(((colors == x) & biggers).nonzero()[0])
        # ++++ Explore all promising nodes ++++++++++++++++++++++++++++++++++++
        cum_found = big_clique
        while promising_nodes:
            node = promising_nodes.popleft()
            try:
                bit_clique, clique = do_bit_cascade(node, degrees, colors,
                                                    matrix, big_clique_size)
                CLIQUE_SIZE = len(clique)
            except TypeError:
                CLIQUE_SIZE = 0
            # ++++ Cumulative update only if biggers candidates are found +++++
            if CLIQUE_SIZE > big_clique_size:
                big_node = node
                big_clique = clique
                big_clique_size = big_clique.size
                # ++++ Repeat previous condition ++++++++++++++++++++++++++++++
                cum_found = np.concatenate((cum_found, big_clique))
                biggers = degrees > big_clique_size
                biggers[cum_found] = False
                cluster_colors = colors[big_clique]
                biggers_colors = colors[biggers]
                promising_colors = np.setdiff1d(biggers_colors, cluster_colors)
                promising_nodes = deque()
                for x in promising_colors:
                    promising_nodes.extend(((colors == x) & biggers).nonzero()[0])
        nmembers.append(big_clique_size)

        if (big_clique_size < args.min_clust_size) or (NCLUSTER == args.nclust):
            break

        # ++++ Save new cluster & update NCLUSTER +++++++++++++++++++++++++++++
        clusters_array[big_clique] = NCLUSTER
        # ++++ Update (un)clustered_bit +++++++++++++++++++++++++++++++++++++++
        clustered.update(big_clique)
        clustered_bit = set_to_bitarray(clustered, N)
        unclust_bit = ~clustered_bit
        # ++++ Hard erasing of clustered frames from matrix +++++++++++++++++++
        degrees = zeros.copy()
        for x in unclust_bit[:m].itersearch(ba('1')):
            degrees[x] = matrix[x].count()
            if bu.count_and(matrix[x], clustered_bit):
                matrix[x] &= (matrix[x] ^ clustered_bit)

    # =========================================================================
    # 3. Output
    # =========================================================================
    # saving pickle for api debugging tests
    outname = os.path.basename(args.topology).split('.')[0]
    pickle_to_file(clusters_array, os.path.join(args.outdir,
                                                '{}.pick'.format(outname)))
    # saving VMD visualization script
    to_VMD(args.outdir, args.topology, args.first, args.last, N1, args.stride,
           clusters_array[:m])
    # saving clustering info  files
    frames_stats = get_frames_stats(N1, args.first, args.last, args.stride,
                                    clusters_array[:m], args.outdir)
    cluster_stats = get_cluster_stats(clusters_array[:m], args.outdir)
    print('\n\nNormal Termination of BitQT :)')