def test_algo_4_multiple_1_k_equal_3(self):

        relevant_indexes = {1, 2, 3}
        F_cf = [{1, 2, 3}]
        k = 3
        database_path = "./mini_Src/mini/dbs/test_db_multiple.txt"
        taxonomy_first = {
            'data': [1, 2],
            'children': [{
                'data': [1, 1],
                'children': [],
                'index': [1, 2]
            }, {
                'data': [2, 2],
                'children': [],
                'index': [3]
            }]
        }

        taxonomy_second = {
            'data': [3, 4],
            'children': [{
                'data': [3, 3],
                'children': [],
                'index': [1]
            }, {
                'data': [4, 4],
                'children': [],
                'index': [2, 3]
            }]
        }
        attributes_array_by_order_database = [taxonomy_first, taxonomy_second]

        expected_result = [{1, 2, 3}]
        cover = algorithm_4(relevant_indexes, F_cf, k, database_path,
                            attributes_array_by_order_database)
        for item in cover:
            #print(item)
            self.assertTrue(item in expected_result)
        for item in expected_result:
            self.assertTrue(item in cover)
예제 #2
0
def algorithm_5(database_path, k, attributes_array_by_order_database,
                attributes_array_int_then_string, list_num, num_lines):
    (F_cf, relevant_indexes, lines_in_database) = algorithm_6(
        database_path, k, attributes_array_by_order_database,
        attributes_array_int_then_string, list_num, num_lines)
    #print("****F_cf : ****"  )
    #for item in F_cf :
    #    print(item)
    union_fcf = reduce(lambda a, b: a | b, F_cf)
    if (union_fcf != relevant_indexes):
        print("error occured - can not produce k-anonimity ")
        f = open("errorfcf", "w+")
        for item in F_cf:
            f.write(str(item))
            f.write("\n")
        f.write("relevant_indexes : \n")
        f.write(str(relevant_indexes))
        f.write("\n")
        f.close()
        return
    else:
        f = open("fcf", "w+")
        for item in F_cf:
            f.write(str(item))
            f.write("\n")
        f.write("relevant_indexes : \n")
        f.write(str(relevant_indexes))
        f.write("\n")
        f.close()
    #print("relevant_indexes : ")
    #print(relevant_indexes)

    cover = algorithm_4(relevant_indexes, F_cf, k, database_path,
                        attributes_array_by_order_database)
    cluster = algorithm_2(k, cover)
    k_anon(database_path, cluster, relevant_indexes,
           attributes_array_by_order_database, lines_in_database)
예제 #3
0
def main_DQPID(load):
    global platform
    signal.signal(signal.SIGINT,
                  sigint_handler)  # to execute the signal interrupt

    # QPID parameters
    Q_index = 0
    Q_arrange = deque()
    action_discretization_n = 3
    maximum_depth = 8

    # robot parameters
    current_robot = robot_dict[platform]
    state = current_robot['initial_state']
    set_point = current_robot['set_point']
    initial_action_centroid = current_robot['action_centroid']
    robot = current_robot['class'](set_point,
                                   dt=1. / Ts,
                                   Teval=Teval,
                                   simulation=simulation)
    K_step = current_robot['K_step']
    print('k step', K_step)
    time.sleep(1)
    # Classes instantiations
    action_selector = action_chooser(E_GREED, EXECUTION_TIME)
    memory = memory_comparator(memory_repetition)
    Q_arrange.append(
        DQPID(state,
              None,
              1.,
              initial_action_centroid,
              action_discretization_n,
              maximum_depth,
              0.,
              0.,
              K_step=K_step))
    ltm = deque(maxlen=10000)  # long term memory max
    minibatch_size = 32
    # others
    time.sleep(1)
    state = state[0]
    if load:
        n_to_load = np.load('len_Q.npy')[0]
        for _ in range(n_to_load):
            file = open('Q_arrange' + str(_) + '.txt', 'r')
            Q_arrange[_] = pickle.load(file)

    # I propose the generation of a new class, that will be responsible for saving everything and plotting

    for x in range(EXECUTION_TIME):

        start = time.time()

        flag_ab, action, action_index, e_greed, state_index = action_selector.get(
            Q_arrange[Q_index], state)
        memory.update(state_index, action_index, Q_index, action)

        next_state = robot.update(action, Q_arrange[Q_index].depth)

        reward = robot.get_gaussian_reward(next_state, set_point)
        #previous_Q_index = Q_index
        # save external stuff
        '''
        if x == 200:
            set_point = np.array([-0.41, 0.31]) 
            robot.set_point = set_point
        if x == 400:
            set_point = np.array([0.21, 0.11]) 
            robot.set_point = set_point
        '''
        if x < N_mariano:
            Q_arrange[Q_index] = algorithm_2(Q_arrange[Q_index], state_index,
                                             next_state, reward, action_index,
                                             flag_ab)
            next_Q_index = Q_index
            memory.counter = 0

        else:
            memory.compare()
            if memory.flag_no_variation == True:
                print('algorithm 4')
                Q_arrangement, Q_index, next_Q_index, action_selector.e_greed_counter = algorithm_4(
                    Q_arrange, memory.Mt, next_state, reward, maximum_depth,
                    action_discretization_n, action_selector.e_greed_counter,
                    set_point, flag_ab, K_step)
                memory.flag_no_variation = False
            else:
                #print('algorithm 3')
                Q_arrange, Q_index, next_Q_index = algorithm_3(
                    Q_arrange, memory.Mt, next_state, reward, flag_ab)

        end = time.time()

        ltm.append([
            state, next_state, action, Q_index, next_Q_index, reward, flag_ab,
            action_index
        ])
        if mode == 'long_term_memory':
            Q_arrange = update_long_term_memory(ltm, Q_arrange, minibatch_size)

        Q_index = next_Q_index
        state = next_state

        print(x, 'R', round(reward, 3), 's', next_state, 'depth',
              Q_arrange[Q_index].depth, 't', round(end - start, 2))

    #saving Q tables
    for _ in range(len(Q_arrange)):
        file = open('Q_arrange' + str(_) + '.txt', 'w')
        pickle.dump(Q_arrange[_], file)

    np.save('len_Q.npy', np.array([len(Q_arrange)]))

    # ploting and printing performance
    print('actions', action)
    robot.plotter.plot(savefig=True)
    robot.plotter.save_values()
    mse = robot.plotter.mean_squared_error(set_point)
    print('mse', mse, 'mean mse', np.mean(mse))
    euclidean_distance = robot.plotter.euclidean_distance(set_point)
    print('euclidean_distance', euclidean_distance)
    mahalanobis = robot.plotter.mahalanobis(set_point)
    print('mahalanobis', mahalanobis)

    robot.stop()
    def test_algo_4_multiple_2(self):

        relevant_indexes = {1, 2, 3}
        F_cf = [{1, 2}, {2, 3}, {1, 2, 3}, {1, 3}]
        k = 2
        database_path = "./mini_Src/mini/dbs/test_db_multiple_2.txt"
        taxonomy_first = {
            'data': [1, 2],
            'children': [{
                'data': [1, 1],
                'children': [],
                'index': [1, 2]
            }, {
                'data': [2, 2],
                'children': [],
                'index': [3]
            }]
        }
        taxonomy_str = {
            "data":
            "instruments",
            "children": [{
                "data":
                "String",
                "children": [{
                    "data": "guitar",
                    "children": [],
                    "index": [1, 3]
                }],
                "index": []
            }, {
                "data":
                "Percussion",
                "children": [{
                    "data": "piano",
                    "children": [],
                    "index": [2]
                }],
                "index": []
            }]
        }

        taxonomy_second = {
            'data': [3, 4],
            'children': [{
                'data': [3, 3],
                'children': [],
                'index': [1]
            }, {
                'data': [4, 4],
                'children': [],
                'index': [2, 3]
            }]
        }
        attributes_array_by_order_database = [
            taxonomy_first, taxonomy_str, taxonomy_second
        ]

        expected_result1 = [{1, 2}, {2, 3}]
        expected_result2 = [{1, 2}, {1, 3}]
        expected_result3 = [{2, 3}, {1, 3}]

        cover = algorithm_4(relevant_indexes, F_cf, k, database_path,
                            attributes_array_by_order_database)

        def two_way_contain(list1, list2):
            for item in list1:
                #print(item)
                if not (item in list2):
                    return False
            for item in list2:
                if not (item in list1):
                    return False
            return True

        isLegalResult = two_way_contain(
            cover, expected_result1) or two_way_contain(
                cover, expected_result2) or two_way_contain(
                    cover, expected_result3)
        self.assertTrue(isLegalResult)
    def test_algo_4_first_two_of_adult(self):

        relevant_indexes = {1, 2, 3, 4}
        F_cf = [{1, 3}, {2, 4}, {3, 4}, {1, 2, 3, 4}]
        k = 2
        database_path = "./mini_Src/mini/dbs/test_db_cover_1"
        age_map = {
            'data': [38, 53],
            'children': [{
                'data': [38, 39],
                'children': [{
                    'data': [39, 39],
                    'children': [],
                    'index': [1]
                }, {
                    'data': [38, 38],
                    'children': [],
                    'index': [3]
                }]
            }, {
                'data': [50, 53],
                'children': [{
                    'data': [50, 50],
                    'children': [],
                    'index': [2]
                }, {
                    'data': [53, 53],
                    'children': [],
                    'index': [4]
                }]
            }]
        }

        workclass_map = {
            "data":
            "*",
            "children": [{
                "data": "private",
                "children": [],
                "index": [3, 4]
            }, {
                "data":
                "self-emp",
                "children": [{
                    "data": "self-emp-not-inc",
                    "children": [],
                    "index": [2]
                }, {
                    "data": "self-emp-inc",
                    "children": [],
                    "index": []
                }]
            }, {
                "data":
                "gov",
                "children": [{
                    "data": "federal-gov",
                    "children": [],
                    "index": []
                }, {
                    "data": "local-gov",
                    "children": [],
                    "index": []
                }, {
                    "data": "state-gov",
                    "children": [],
                    "index": [1]
                }]
            }, {
                "data":
                "no income",
                "children": [{
                    "data": "without-pay",
                    "children": [],
                    "index": []
                }, {
                    "data": "never-worked",
                    "children": [],
                    "index": []
                }]
            }]
        }

        attributes_array_by_order_database = [age_map, workclass_map]
        '''
        attributes_array_int_then_string = [age_map , workclass_map]
        first_list = []
        list_num = [first_list]
        lists_num = make_int_lists(list_num,database_path,attributes_array_by_order_database,attributes_array_int_then_string)
        make_new_int_maps(lists_num , attributes_array_by_order_database, attributes_array_int_then_string )
        print(attributes_array_by_order_database[0])
        print(attributes_array_by_order_database[1])
        '''
        expected_result = [{3, 4}, {1, 3}, {2, 4}]
        cover = algorithm_4(relevant_indexes, F_cf, k, database_path,
                            attributes_array_by_order_database)
        for item in cover:
            #print(item)
            self.assertTrue(item in expected_result)
        for item in expected_result:
            self.assertTrue(item in cover)