Ejemplo n.º 1
0
def imperfect_features_test():
    from src.env.Amatrix_task import Amatrix

    n = 4
    m = 2
    env = Amatrix(n, m)

    features = env.get_approx_A()  # first m features
    weights = np.random.rand(m)

    config = Config()
    config.parameter_size = m
    config.init_alpha = 0.001
    adam = Adam(config)

    sample_size = 50000
    for i in range(sample_size):
        rand_row = np.random.randint(n)
        target = env.sample_target(rand_row, noisy=True)

        pred_features = features[rand_row, :]
        prediction = np.dot(pred_features, weights)
        error = target - prediction
        gradient, new_stepsize, new_weight_vector = adam.update_weight_vector(
            error, pred_features, weights)
        weights = new_weight_vector
        print("Sample number: {0}".format(i + 1))
        print("\tPrediction error:{0}".format(error))

    print("Theta star:\n{0}".format(env.theta_star))
    print("Estimated theta:\n{0}".format(weights))
def test_random_features_generator(sample_size=10,
                                   compute_sample_statistics=True):

    config = Config()
    config.num_true_features = 2
    config.num_obs_features = 2
    config.max_num_features = 20000
    task = RandomFeatures(config)
    print("The value of theta is:\n{0}".format(task.theta))
    print("The norm of theta is: {0}".format(np.linalg.norm(task.theta)))

    for i in range(sample_size):
        target, observable_features, best_approximation = task.sample_observation(
            noisy=False)
        print("The features are: {0}\tThe target is:{1}".format(
            observable_features, target))

    if compute_sample_statistics:
        num_samples = 100000
        samples = np.zeros(num_samples)
        for i in range(num_samples):
            target, _, _ = task.sample_observation(noisy=False)
            samples[i] += target
        # The sample average and sample variance of the target should be 0 and 1, respectively.
        print("The sample average of the target is: {:.2f}".format(
            np.average(samples)))
        print("The sample variance of the target is: {:.2f}".format(
            np.var(samples)))
Ejemplo n.º 3
0
def perfect_features_test():
    from src.env.Amatrix_task import Amatrix

    n = 20
    m = 3
    env = Amatrix(n, m)

    features = env.Amatrix  # perfect features
    weights = np.random.rand(n)

    config = Config()
    config.parameter_size = n
    config.init_alpha = 0.001
    adam = Adam(config)

    sample_size = 100000
    for i in range(sample_size):
        rand_row = np.random.randint(n)
        target = env.sample_target(rand_row, noisy=True)

        pred_features = features[rand_row, :]
        prediction = np.dot(pred_features, weights)
        error = target - prediction
        gradient, new_stepsize, new_weight_vector = adam.update_weight_vector(
            error, pred_features, weights)
        weights = new_weight_vector
        if (i + 1) % 10000 == 0:
            print("Sample number: {0}".format(i + 1))
            print("\tPrediction error:{0}".format(error))

    print("Theta star:\n{0}".format(env.theta_star))
    print("Estimated theta:\n{0}".format(weights))
    difference = np.sqrt(np.sum(np.square(env.theta_star - weights)))
    print("L2 norm of difference:\n{0}".format(difference))
Ejemplo n.º 4
0
def test_environment_features():
    np.random.seed(0)
    config = Config()
    config.init_noise_var = 0.0
    config.num_obs_features = 4

    env = BoyanChain(config)

    def run_env(e: BoyanChain, s=20):
        for i in range(s):
            print("Step number: {0}".format(i + 1))
            current_state = e.current_state
            next_state, _, observed_features, terminal = e.step()
            print("\tMoved: {0} --> {1}".format(current_state, next_state))
            print("\tObserved Features: {0}".format(observed_features))

            if terminal:
                e.reset()

    run_env(env, 20)

    print("\nAdding 4 features without noise...")
    env.reset()
    env.add_feature(4)
    run_env(env, 20)

    print("\nAdding 4 features with noise...")
    env.reset()
    env.add_feature(4, noise=1)
    run_env(env, 20)
Ejemplo n.º 5
0
    def __init__(self):
        super(Brisk, self).__init__()
        self.config = Config().config
        self.logger = Logger().logger
        self.proxy_manager = ProxyPool()
        self.db = DB().db

        self.__proxy_status = self.config.get('PROXY', 'empty')
        if self.__proxy_status == 'TRUE':
            self.proxy_manager.delete_valid_proxy()

        self.__hook_name = 'hook'
        self.__walk_name = 'walk'
        self.__flow_name = 'flow'

        self.__brisk_type = self.config.get('RUN', 'type')

        self.__func_filter = lambda m: not m.startswith("__") and \
                                       not m.startswith(self.__hook_name) and \
                                       not m.startswith(self.__walk_name) and \
                                       not m.startswith(self.__flow_name)

        self.__flow_num = int(self.config.get('RUN', 'num'))
        self.__hook = None
        self.__flow_queue = queue.Queue()
        self.__walk_queue = queue.Queue()
        self.__go_init()
Ejemplo n.º 6
0
 def __init__(self, queue=None):
     Request.__init__(self)
     threading.Thread.__init__(self)
     self.logger = Logger().logger
     self.config = Config().config
     self.proxy_pool = ProxyPool()
     self.__queue = queue
     self.db = DB().db
     self.proxy_ip, self.proxy_port = None, None
     self.proxy()
Ejemplo n.º 7
0
def learning_value_function(sample_size=100000, checkpoint=1000):
    np.random.seed(0)
    config = Config()
    config.init_noise_var = 0.1
    config.num_obs_features = 4

    env = BoyanChain(config)

    theta = np.zeros(config.num_obs_features, dtype=np.float64)
    theta_star = env.optimal_weights
    alpha = 0.005

    def train(th, th_star, e: BoyanChain, ss, ckpt):
        e.reset()
        current_features = e.get_observable_features()
        mean_square_value_diff = 0.0
        for i in range(ss):
            current_value = np.dot(current_features, th)
            optimal_value = np.dot(e.current_features, th_star)
            current_state, reward, next_features, terminal = e.step()

            next_value = np.dot(next_features, th)
            temporal_diff = reward + (
                1 - int(terminal)) * next_value - current_value
            th += alpha * temporal_diff * current_features

            mean_square_value_diff += np.square(current_value -
                                                optimal_value) / ckpt
            if (i + 1) % ckpt == 0:
                print("Training Step: {0}".format(i + 1))
                print(
                    "\tEstimated MSVE: {0:.4f}".format(mean_square_value_diff))
                print("\tTrue MSVE: {0:.4f}".format(e.compute_msve(th)))
                mean_square_value_diff *= 0

            current_features = next_features

            if terminal:
                e.reset()
                current_features = e.get_observable_features()

    print("First phase of training...")
    train(theta, theta_star, env, sample_size, checkpoint)
    env.add_feature(4, 0.0)

    print("\n\nSecond phase of training...")
    new_theta = np.zeros(8, dtype=np.float64)
    new_theta[:4] += theta
    train(new_theta, theta_star, env, sample_size, checkpoint)
Ejemplo n.º 8
0
def adding_bad_features_test():
    from src.env.Amatrix_task import Amatrix

    n = 10
    m = 5
    env = Amatrix(n, m)

    features = env.get_approx_A()  # first m features
    weights = np.zeros(m)

    config = Config()
    config.parameter_size = m
    config.theta = 0.1
    config.init_beta = np.log(0.0001)
    idbd = SIDBD(config)

    sample_size = 50000
    additional_features = 30
    for k in range(additional_features + 1):
        print("Number of features in the representation: {0}".format(
            idbd.parameter_size))
        for i in range(sample_size):
            rand_row = np.random.randint(n)
            target = env.sample_target(rand_row, noisy=True)

            pred_features = features[rand_row, :]
            prediction = np.dot(pred_features, weights)
            error = target - prediction
            gradient, new_stepsize, new_weight_vector = idbd.update_weight_vector(
                error, pred_features, weights)
            weights = new_weight_vector
            if ((i + 1) % 25000) == 0:
                print("\tSample number: {0}".format(i + 1))
                print("\t\tPrediction error: {0}".format(error))

        print("Theta star:\n{0}".format(env.theta_star))
        print("Estimated theta:\n{0}".format(weights))

        if k < additional_features:
            print("Adding new feature...")
            new_feature = env.get_new_bad_features(1)
            features = np.hstack((features, new_feature))
            idbd.increase_size(1)

            new_weights = np.zeros(m + 1)
            new_weights[:m] = weights
            m += 1
            weights = new_weights
Ejemplo n.º 9
0
def random_policy_test(steps=100, verbose=False):
    print("==== Results with Random Policy ====")
    config = Config()
    actions = 3

    config.current_step = 0
    env = MountainCar(config)

    cumulative_reward = 0
    terminations = 0
    steps_per_episode = []

    episode_steps = 0

    for i in range(steps):
        A = np.random.randint(actions)
        old_state = env.get_current_state()
        next_S, R, terminate = env.step(A)
        if verbose:
            print("Old state:", np.round(old_state, 3), "-->", "Action:", A,
                  "-->", "New state:", np.round(next_S, 3))
        cumulative_reward += R
        episode_steps += 1
        if terminate:
            if verbose:
                print("\n## Reset ##\n")
            if terminate:
                terminations += 1
                steps_per_episode.append(episode_steps)
                episode_steps *= 0
            env.reset()

    if not terminate:
        steps_per_episode.append(episode_steps)

    print("Number of steps per episode:", steps_per_episode)
    print("Number of episodes that reached the end:", terminations)
    average_length = np.average(episode_steps)
    print("The average number of steps per episode was:", average_length)
    print("Cumulative reward:", cumulative_reward)
    print("\n\n")
Ejemplo n.º 10
0
class Core(Request, threading.Thread):

    def __init__(self, queue=None):
        Request.__init__(self)
        threading.Thread.__init__(self)
        self.logger = Logger().logger
        self.config = Config().config
        self.proxy_pool = ProxyPool()
        self.__queue = queue
        self.db = DB().db
        self.proxy_ip, self.proxy_port = None, None
        self.proxy()

    def proxy(self):
        if self.config.get('PROXY', 'use') == 'TRUE':
            if not self.proxy_ip:
                self.proxy_ip, self.proxy_port = self.proxy_pool.get_proxy(
                        type=self.config.get('PROXY', 'type'), 
                        seed_num=int(self.config.get('PROXY', 'seed_num')),
                        distinct=self.config.get('PROXY', 'distinct') == 'TRUE'
                        )
            else:
                self.proxy_pool.delete_proxy(ip=self.proxy_ip, type=self.proxy_type)
                self.proxy_ip, self.proxy_port = self.proxy_pool.get_proxy(
                        type=self.config.get('PROXY', 'type'),
                        seed_num=int(self.config.get('PROXY', 'seed_num')),
                        distinct=self.config.get('PROXY', 'distinct') == 'TRUE'
                        )
        else:
            self.proxy_ip, self.proxy_port = None, None

    @use_proxy
    @load_params
    def get(self, url, params=None, **kwargs):
        return super().get(url, params, **kwargs)

    @use_proxy
    @load_params
    def post(self, url, data, **kwargs):
        return super().post(url, data, **kwargs)

    def task(self):
        pass

    def run(self):
        self.logger.info('go')
        try:
            self.task()
            self.logger.info('ok')
        except Exception as e:
            self.logger.info('something wrong')
        finally:
            if self.__queue:
                assert isinstance(self.__queue, queue.Queue)
                self.__queue.task_done()
Ejemplo n.º 11
0
def pumping_action_test(steps=100, verbose=False):
    print("==== Results with Pumping Action Policy ====")
    config = Config()

    config.current_step = 0
    env = MountainCar(config)

    steps_per_episode = []
    return_per_episode = []

    episode_steps = 0
    episode_return = 0
    terminations = 0
    for i in range(steps):
        current_state = env.get_current_state()
        A = 1 + np.sign(current_state[1])
        old_state = env.get_current_state()
        next_S, R, terminate = env.step(A)
        if verbose:
            print("Old state:", np.round(old_state, 3), "-->", "Action:", A,
                  "-->", "New state:", np.round(next_S, 3))

        episode_steps += 1
        episode_return += R
        if terminate:
            terminations += 1
            if verbose:
                print("\n## Reset ##\n")
            env.reset()
            steps_per_episode.append(episode_steps)
            return_per_episode.append(episode_return)
            episode_steps *= 0
            episode_return *= 0

    print("Number of steps per episode:", steps_per_episode)
    print("Number of successful episodes:", terminations)
    print("Return per episode:", return_per_episode)
    print("The average return per episode is:", np.mean(return_per_episode))
def test_function_approximator(num_features=20,
                               initial_features=20,
                               num_iterations=10000,
                               chkpt=100,
                               plot_mse=True,
                               noisy=True,
                               add_features=False,
                               add_true_features=True,
                               feature_add_interval=100,
                               mixed_features=False):

    from src.step_size_methods import SGD
    config = Config()
    # task setup
    config.num_true_features = num_features
    config.num_obs_features = initial_features  # same as function approximator
    config.max_num_features = 20000  # same as function approximator
    task = RandomFeatures(config)

    # function approximator setup
    approximator = LinearFunctionApproximator(config)

    # optimizer setup
    config.parameter_size = initial_features
    config.alpha = 0.001
    optimizer = SGD(config)

    # for plotting
    mse_per_chpt = np.zeros(num_iterations // chkpt, dtype=np.float64)
    mse = 0
    current_chpt = 0

    # training loop
    for i in range(num_iterations):
        target, observable_features, best_approximation = task.sample_observation(
            noisy=noisy)
        prediction = approximator.get_prediction(observable_features)
        error = target - prediction
        _, _, new_weights = optimizer.update_weight_vector(
            error, observable_features, approximator.get_weight_vector())
        approximator.update_weight_vector(new_weights)

        squared_loss = np.square(error)
        mse += squared_loss / chkpt
        if (i + 1) % chkpt == 0:
            # reporting and saving
            print("Iteration number: {0}".format(i + 1))
            print("\tTarget: {0:.4f}".format(target))
            print("\tPrediction: {0:.4f}".format(prediction))
            print("\tMean Squared Error: {0:.4f}".format(mse))
            mse_per_chpt[current_chpt] += mse
            mse *= 0
            current_chpt += 1

        if add_features and (i + 1) % feature_add_interval == 0:
            task.add_new_feature(k=1, true_feature=add_true_features)
            approximator.increase_num_features(k=1)
            optimizer.increase_size(k=1)
            if mixed_features:
                add_true_features = not add_true_features

    if plot_mse:
        # plots
        import matplotlib.pyplot as plt
        x_axis = np.arange(num_iterations // chkpt)
        plt.plot(x_axis, mse_per_chpt)
        plt.show()
        plt.close()
Ejemplo n.º 13
0
 def __init__(self):
     super(ProxyPool, self).__init__()
     self.config = Config().config
     self.default_db = DB().default_db
     self.proxy_db = DB().proxy_db
     self.logger = Logger().logger
Ejemplo n.º 14
0
def boyan_chain_test(steps=50000):
    from src.env.BoyanChain import BoyanChain
    from src.env.RandomFeatures_task import LinearFunctionApproximator
    from src.util import Config
    import matplotlib.pyplot as plt

    config = Config()
    checkpoint = 100
    """ Environment Setup """
    config.init_noise_var = 0.1
    config.num_obs_features = 4
    config.max_num_features = 9
    """ AutoTIDBD Setup """
    config.parameter_size = 4
    config.theta = 0.001
    config.tau = 10000
    config.init_stepsize = 0.001
    # to keep track of learning progress
    run_avg_msve = np.zeros(steps // checkpoint, dtype=np.float64)
    current_checkpoint = 0
    avg_msve = 0

    env = BoyanChain(config)
    approximator = LinearFunctionApproximator(config)
    optimizer = AutoTIDBD(config)
    """ Start of Learning"""
    curr_obs_feats = env.get_observable_features()
    for s in range(steps):
        state_value = approximator.get_prediction(curr_obs_feats)
        optimal_value = env.compute_true_value()
        # step in the environment
        _, r, next_obs_feats, term = env.step()
        next_state_value = approximator.get_prediction(next_obs_feats)
        # compute td error
        td_error = r + (1 - term) * next_state_value - state_value
        # update weights
        _, _, new_weights = optimizer.update_weight_vector(
            td_error,
            features=curr_obs_feats,
            weights=approximator.get_weight_vector(),
            discounted_next_features=next_obs_feats)
        approximator.update_weight_vector(new_weights)
        # update features
        curr_obs_feats = next_obs_feats
        # keep track of progress
        avg_msve += np.square(state_value - optimal_value) / checkpoint
        # check if terminal state
        if term:
            env.reset()
            curr_obs_feats = env.get_observable_features()
        # store learning progress so far
        if (s + 1) % checkpoint == 0:
            run_avg_msve[current_checkpoint] += avg_msve
            avg_msve *= 0
            current_checkpoint += 1

        if (s + 1) == (steps // 2):
            env.add_feature(k=4, noise=0.0, fake_feature=False)
            approximator.increase_num_features(4)
            optimizer.increase_size(4)
            curr_obs_feats = env.get_observable_features()

    print("The average MSVE is: {0:0.4f}".format(np.average(run_avg_msve)))

    xaxis = np.arange(run_avg_msve.size) + 1
    plt.plot(xaxis, run_avg_msve)
    plt.show()
    plt.close()
Ejemplo n.º 15
0
def sarsa_zero_test(steps=10000,
                    add_new_centers=False,
                    number_of_irrelevant_features=0):
    import matplotlib.pyplot as plt
    from src.env.RandomFeatures_task import LinearFunctionApproximator
    from src.step_size_methods.sgd import SGD

    # epsilon greedy policy
    def choose_action(av_array: np.ndarray, epsilon):
        p = np.random.rand()
        if p > epsilon:
            argmax_av = np.random.choice(
                np.flatnonzero(av_array == av_array.max()))
            return argmax_av
        else:
            return np.random.randint(av_array.size)

    # for computing action values
    def get_action_values(n, features, approximator_list):
        action_values = np.zeros(n, dtype=np.float64)
        for k in range(n):
            action_values[k] += approximator_list[k].get_prediction(features)
        return action_values

    completed_episodes_per_run = []
    for _ in range(1):
        print("==== Results for Sarsa(0) with Epsilon Greedy Policy ====")
        config = Config()

        # setting up feature function
        config.state_dims = 2
        config.state_lims = np.array(((-1, 1), (-1, 1)), dtype=np.float64)
        # config.initial_centers = np.array(((0.0,0.0), (-1.8,0), (1.8,0), (0.0,-1.8), (0.0,1.8)), dtype=np.float64)
        config.initial_centers = np.array(
            ((0.0, 0.0), (0.25, 0.25), (0.25, -0.25), (-0.25, -0.25),
             (-0.25, 0.25)),
            dtype=np.float64)
        config.sigma = 0.5
        config.init_noise_mean = 0.0
        config.init_noise_var = 0.01
        feature_function = RadialBasisFunction(config)

        # setting up environment
        config.norm_state = True
        env = MountainCar(config)

        # function approximator and optimizer parameters
        num_actions = 3
        random_action_prob = 0.1
        gamma = 0.99
        config.num_obs_features = feature_function.num_features
        config.max_num_features = 200  # as long as this is more than 12
        config.num_actions = num_actions
        config.alpha = 0.005
        config.rescale = False
        config.parameter_size = feature_function.num_features
        function_approximator = []
        optimizer = []
        # one instance for each action
        for i in range(num_actions):
            function_approximator.append(LinearFunctionApproximator(config))
            optimizer.append(SGD(config))

        # setting up summaries
        all_episodes_return = []
        episode_return = 0

        # setting up initial state, action, features, and action values
        curr_s = env.get_current_state()
        curr_features = feature_function.get_observable_features(curr_s)
        curr_avs = get_action_values(num_actions, curr_features,
                                     function_approximator)
        curr_a = choose_action(curr_avs, random_action_prob)
        midpoint_episode = 0
        for i in range(steps):
            # get current action values
            curr_avs = get_action_values(num_actions, curr_features,
                                         function_approximator)
            # execute current action
            next_s, r, terminal = env.step(curr_a)
            next_features = feature_function.get_observable_features(next_s)
            # get next action values and action
            next_action_values = get_action_values(num_actions, next_features,
                                                   function_approximator)
            next_action = choose_action(next_action_values, random_action_prob)
            # compute TD error for Sarsa(0)
            td_error = r + gamma * (
                1 -
                terminal) * next_action_values[next_action] - curr_avs[curr_a]
            # update weight vector
            _, ss, new_weights = optimizer[curr_a].update_weight_vector(
                td_error, curr_features,
                function_approximator[curr_a].get_weight_vector())
            function_approximator[curr_a].update_weight_vector(new_weights)
            # set current features and action
            curr_features = next_features
            curr_a = next_action
            # keep track of sum of rewards
            episode_return += r
            # if terminal state
            if terminal:
                env.reset()
                all_episodes_return.append(episode_return)
                episode_return *= 0
                curr_s = env.get_current_state()
                curr_features = feature_function.get_observable_features(
                    curr_s)
                curr_avs = get_action_values(num_actions, curr_features,
                                             function_approximator)
                curr_a = choose_action(curr_avs, random_action_prob)
            # if midpoint of training
            if (i + 1) == (steps // 2):
                if add_new_centers:
                    new_centers = np.array(
                        ((0, 0), (0.25, 0.25), (0.25, -0.25), (-0.25, -0.25),
                         (-0.25, 0.25)),
                        dtype=np.float64)
                    feature_function.add_centers(new_centers,
                                                 noise_var=0,
                                                 noise_mean=0)
                    for k in range(num_actions):
                        function_approximator[k].increase_num_features(
                            new_centers.shape[0])
                        optimizer[k].increase_size(new_centers.shape[0],
                                                   init_stepsize=0.25)
                if number_of_irrelevant_features > 0:
                    new_feature_mean = 0.0
                    new_feature_var = 0.05
                    fake_features = True
                    feature_function.add_feature(number_of_irrelevant_features,
                                                 noise_mean=new_feature_mean,
                                                 noise_var=new_feature_var,
                                                 fake_feature=fake_features)
                    for k in range(num_actions):
                        function_approximator[k].increase_num_features(
                            number_of_irrelevant_features)
                        optimizer[k].increase_size(
                            number_of_irrelevant_features)
                curr_features = feature_function.get_observable_features(
                    curr_s)
                midpoint_episode = len(all_episodes_return)
        completed_episodes_per_run.append(len(all_episodes_return))
        print("Number of episodes completed: {0}".format(
            len(all_episodes_return)))
    print("Average episodes completed: {0:0.4f}".format(
        np.average(completed_episodes_per_run)))

    print("Return per episode:\n", all_episodes_return)
    plt.plot(np.arange(len(all_episodes_return)) + 1, all_episodes_return)
    plt.vlines(x=midpoint_episode, ymin=-800, ymax=0)
    plt.ylim((-800, 0))
    plt.show()
    plt.close()
Ejemplo n.º 16
0
class Brisk(Request):

    def __init__(self):
        super(Brisk, self).__init__()
        self.config = Config().config
        self.logger = Logger().logger
        self.proxy_manager = ProxyPool()
        self.db = DB().db

        self.__proxy_status = self.config.get('PROXY', 'empty')
        if self.__proxy_status == 'TRUE':
            self.proxy_manager.delete_valid_proxy()

        self.__hook_name = 'hook'
        self.__walk_name = 'walk'
        self.__flow_name = 'flow'

        self.__brisk_type = self.config.get('RUN', 'type')

        self.__func_filter = lambda m: not m.startswith("__") and \
                                       not m.startswith(self.__hook_name) and \
                                       not m.startswith(self.__walk_name) and \
                                       not m.startswith(self.__flow_name)

        self.__flow_num = int(self.config.get('RUN', 'num'))
        self.__hook = None
        self.__flow_queue = queue.Queue()
        self.__walk_queue = queue.Queue()
        self.__go_init()

    def __go_init(self):

        for method_name in list(
                filter(lambda m: m.startswith(self.__hook_name) and callable(getattr(self, m)), dir(self))):
            method = self.__class__.__dict__[method_name]
            obj = Core()
            obj.task = types.MethodType(method, obj)
            for func_name in filter(self.__func_filter, self.__class__.__dict__):
                func = self.__class__.__dict__[func_name]
                setattr(obj, func_name, types.MethodType(func, obj))
            self.__hook = obj
            break

        if self.__brisk_type == 'WALK':
            for method_name in list(
                    filter(lambda m: m.startswith(self.__walk_name) and callable(getattr(self, m)), dir(self))):
                self.__walk_queue.put(method_name)

        if self.__brisk_type == 'FLOW':
            for method_name in list(
                    filter(lambda m: m.startswith(self.__flow_name) and callable(getattr(self, m)), dir(self))):
                self.__flow_queue.put(method_name)

    def go(self):
        self.logger.info('brisk go')

        self.logger.info('brisk create {} task(s)'.format(self.__flow_queue.qsize()))
        if self.__hook:
            self.__hook_attr_base = dir(self.__hook)
            self.logger.info('brisk create hook')
            self.__hook.start()
            self.__hook.join()
            self.logger.info('brisk complete hook')

        self.__hook: Core
        self.__hook_attr = []
        for method_name in dir(self.__hook):
            if method_name not in self.__hook_attr_base:
                self.__hook_attr.append(method_name)
        while not self.__walk_queue.empty():
            method_name = self.__walk_queue.get()
            method = self.__class__.__dict__[method_name]
            t = Core(self.__walk_queue)
            for attr_name in self.__hook_attr:
                setattr(t, attr_name, self.__hook.__dict__[attr_name])
            t.task = types.MethodType(method, t)
            for func_name in filter(self.__func_filter, self.__class__.__dict__):
                func = self.__class__.__dict__[func_name]
                setattr(t, func_name, types.MethodType(func, t))
            if self.__hook:
                t.make(self.__hook.headers, self.__hook.cookies)
            t.start()
            t.join()
        self.__walk_queue.join()

        while not self.__flow_queue.empty():
            if (threading.activeCount() - 1) < self.__flow_num:
                method_name = self.__flow_queue.get()
                method = self.__class__.__dict__[method_name]
                t = Core(self.__flow_queue)
                for attr_name in self.__hook_attr:
                    setattr(t, attr_name, self.__hook.__dict__[attr_name])
                t.task = types.MethodType(method, t)
                for func_name in filter(self.__func_filter, self.__class__.__dict__):
                    func = self.__class__.__dict__[func_name]
                    setattr(t, func_name, types.MethodType(func, t))
                if self.__hook:
                    t.make(self.__hook.headers, self.__hook.cookies)
                t.start()
        self.__flow_queue.join()

        self.logger.info('brisk ok')