Esempio n. 1
0
class Monitor(object):
    """主要的监测器类"""
    def __init__(self, ips=[], num_thread=10):
        self.ip_monitor_pool = ips
        self._cycle_ip = LoopIterator(self.ip_monitor_pool)
        self.num_thread = num_thread
        self.task_queue = Queue(maxsize=20)

    def _add_one_ip(self, ip, network):
        if ip not in self.ip_monitor_pool:
            self.ip_monitor_pool.append(ip)
            db.monitor_ip(ip, network)

    def _remove_one_ip(self, ip):
        if ip in self.ip_monitor_pool:
            self.ip_monitor_pool.remove(ip)
            db.unmonitor_ip(ip)
        

    def add_ip(self, ip):
        self._add_one_ip(ip=ip, network="others")
        logging.debug("Succecc to add the ip %s" % ip)

    def remove_ip(self, ip):
        self._remove_one_ip(ip=ip)
        logging.debug("Succecc to remove the ip %s" % ip)

    def add_network(self, network):
        ips = get_network_ips(network=network)
        for ip in ips:
            self._add_one_ip(ip=ip, network=network)
        logging.debug("Succecc to add the network %s" % network)

    def remove_network(self, network):
        ips = get_network_ips(network)
        for ip in ips:
            self._remove_one_ip(ip=ip)
        logging.debug("Succecc to remove the network %s" % network)

    def run(self):
        """开启多个工作线程同时在主线程循环往queue中加入待ping的ip"""
        for i in range(self.num_thread):
            t = Thread(target=self.worker)
            t.setDaemon(True)
            t.start()

        while True:
            try:
                next = self._cycle_ip.next()
            except EmptyError:
                sleep(1)
            else:
                self.task_queue.put(next)
            
            sleep(0.05)

    def worker(self):
        """工作线程,负责从queue中取出任务处理"""
        while True:
            ip = self.task_queue.get()
            ping_and_update(ip)
            sleep(0.5)
Esempio n. 2
0
 def __init__(self, ips=[], num_thread=10):
     self.ip_monitor_pool = ips
     self._cycle_ip = LoopIterator(self.ip_monitor_pool)
     self.num_thread = num_thread
     self.task_queue = Queue(maxsize=20)
Esempio n. 3
0
    def fit(self, data_z, data_p, data_y):
        ''' Fits the treatment response model.
        Parameters
        data_z: (n x d np array) of instruments
        data_p: (n x p np array) of treatments
        data_y: (n x 1 np array) of outcomes
        '''

        num_instruments = data_z.shape[1]
        num_treatments = data_p.shape[1]
        num_outcomes = data_y.shape[1]
        self.num_treatments = num_treatments

        # Data iterators for critics/modeler and for meta-critic
        data_it = LoopIterator(np.arange(data_z.shape[0]),
                               self._batch_size_modeler,
                               random=True)
        data_it_hedge = LoopIterator(np.arange(data_z.shape[0]),
                                     data_z.shape[0],
                                     random=self._bootstrap_hedge)

        # Creat a test grid for calculating loss at intervals
        test_min = np.percentile(data_p, 5)
        test_max = np.percentile(data_p, 95)
        self.test_grid = np.linspace(test_min, test_max, 100)

        # Create the clusterings of the data that define the critics
        cluster_labels, cluster_ids = self._data_clusterings(
            data_z, data_p, data_y)
        if self._critic_type == 'Gaussian':
            # We put a symmetric gaussian encompassing all the data points of each cluster of each clustering
            center_grid = []
            precision_grid = []
            normalizers = []
            for tree in range(cluster_labels.shape[1]):
                for leaf in cluster_ids[tree]:
                    center = np.mean(
                        data_z[cluster_labels[:, tree].flatten() == leaf, :],
                        axis=0)
                    distance = np.linalg.norm(data_z - center,
                                              axis=1) / data_z.shape[1]
                    precision = 1. / (
                        np.sqrt(2) *
                        (np.sort(distance)[self._min_cluster_size]))
                    center_grid.append(center)
                    precision_grid.append(precision)
                    normalizers.append(
                        (precision**num_instruments) *
                        np.sum(np.exp(-(precision * distance)**2)) /
                        (np.power(2. * np.pi, num_instruments / 2.)))
            normalizers = np.ones(len(center_grid))  #np.array(normalizers)
            center_grid = np.array(center_grid)
            precision_grid = np.array(precision_grid)
            if self._critics_precision is not None:
                precision_grid = self._critics_precision * np.ones(
                    precision_grid.shape)
            print(np.sort(center_grid[:, 0].flatten()))
            print(precision_grid[np.argsort(center_grid[:, 0].flatten())])
        else:
            # We put a uniform kernel only on the data points of each cluster of each clustering
            normalizers = []
            center_grid = []
            leaf_id_list = []
            for tree in range(cluster_labels.shape[1]):
                for leaf in cluster_ids[tree]:
                    center_grid.append(
                        np.mean(
                            data_z[cluster_labels[:,
                                                  tree].flatten() == leaf, :],
                            axis=0))  # used only for tensorflow summary
                    normalizers.append(
                        np.sum(cluster_labels[:, tree].flatten() == leaf))
                    leaf_id_list.append((tree, leaf))
            center_grid = np.array(center_grid)
            print(np.sort(center_grid[:, 0].flatten()))
            print(
                np.array(normalizers)[np.argsort(center_grid[:, 0].flatten())])
            normalizers = np.ones(len(center_grid))  #np.array(normalizers)
            leaf_id_list = np.array(leaf_id_list)

        if num_instruments > 1:
            import matplotlib
            matplotlib.use('Agg')
            import matplotlib.mlab
            import matplotlib.pyplot as plt
            plt.figure()
            for tree in range(cluster_labels.shape[1]):
                plt.subplot(1, cluster_labels.shape[1], tree + 1)
                plt.scatter(data_z[:, 0],
                            data_z[:, 1],
                            c=[int(i % 23) for i in cluster_labels[:, tree]])
            plt.savefig('clusters3.png')
            if self._critic_type == 'Gaussian':
                plt.figure()
                z1 = np.linspace(np.min(data_z[:, 0]), np.max(data_z[:, 0]),
                                 20)
                z2 = np.linspace(np.min(data_z[:, 1]), np.max(data_z[:, 1]),
                                 20)
                z1v, z2v = np.meshgrid(z1, z2)
                for it, (ctr,
                         prec) in enumerate(zip(center_grid, precision_grid)):
                    plt.contour(z1v,
                                z2v,
                                matplotlib.mlab.bivariate_normal(
                                    z1v,
                                    z2v,
                                    mux=ctr[0],
                                    muy=ctr[1],
                                    sigmax=1 / prec,
                                    sigmay=1 / prec),
                                1,
                                c=int(it % 23))
                plt.savefig('clusters_gaussian3.png')

        # tf Graph input
        if self._random_seed is not None:
            tf.set_random_seed(self._random_seed)

        self.Z = tf.placeholder("float", [None, num_instruments],
                                name="instrument")
        self.P = tf.placeholder("float", [None, num_treatments],
                                name="treatment")
        self.Y = tf.placeholder("float", [None, num_outcomes], name="outcome")
        self.Leaf = tf.placeholder("float", [None, cluster_labels.shape[1]],
                                   name="leaf_id")
        self.drop_prob = tf.placeholder_with_default(1.0,
                                                     shape=(),
                                                     name="drop_prob")

        self.gmm_graph = GMMGameGraph(
            self.Z,
            self.P,
            self.Y,
            self.Leaf,
            self.drop_prob,
            eta_hedge=self._eta_hedge,
            loss_clip_hedge=self._loss_clip_hedge,
            learning_rate_modeler=self._learning_rate_modeler,
            learning_rate_critics=self._learning_rate_critics,
            critics_jitter=self._critics_jitter,
            critic_type=self._critic_type,
            l1_reg_weight_modeler=self._l1_reg_weight_modeler,
            l2_reg_weight_modeler=self._l2_reg_weight_modeler,
            dnn_layers=self._dnn_layers,
            dnn_poly_degree=self._dnn_poly_degree,
            dissimilarity_eta=self._dissimilarity_eta)
        if self._critic_type == 'Gaussian':
            self.gmm_graph.create_graph(normalizers=normalizers,
                                        center_grid=center_grid,
                                        precision_grid=precision_grid)
        else:
            self.gmm_graph.create_graph(normalizers=normalizers,
                                        leaf_list=leaf_id_list)

        # Initialize the variables (i.e. assign their default value)
        init = tf.global_variables_initializer()
        if num_treatments == 1:
            self.avg_fn = []
            self.final_fn = []
            self.best_fn = []
        else:
            saver = tf.train.Saver(scope_variables("Modeler"),
                                   max_to_keep=self._num_steps)
            print(scope_variables("Modeler"))
        avg_store_steps = list(
            np.random.choice(np.arange(int(0.2 * self._num_steps),
                                       self._num_steps),
                             int(0.4 * self._num_steps),
                             replace=False))
        print(avg_store_steps)
        # Start training
        loss = np.inf
        with tf.Session() as sess:
            if self._log_summary:
                merged = tf.summary.merge_all()
                writer = tf.summary.FileWriter(self._summary_dir, sess.graph)

            # Run the initializer
            sess.run(init)
            d1 = d2 = d3 = d4 = d5 = d6 = 0.
            for step in range(1, self._num_steps + 1):

                t1 = datetime.now()

                # Modeler
                for inner_step in range(self._train_ratio[0]):
                    inds = data_it.get_next()
                    y1, p1, z1, leaf1 = data_y[inds], data_p[inds], data_z[
                        inds], cluster_labels[inds]
                    inds = data_it.get_next()
                    y2, p2, z2, leaf2 = data_y[inds], data_p[inds], data_z[
                        inds], cluster_labels[inds]
                    sess.run(self.gmm_graph.update_prev_moments,
                             feed_dict={
                                 self.Z: z1,
                                 self.P: p1,
                                 self.Y: y1,
                                 self.Leaf: leaf1,
                                 self.drop_prob: .9
                             })
                    sess.run(self.gmm_graph.gradient_step_modeler,
                             feed_dict={
                                 self.Z: z2,
                                 self.P: p2,
                                 self.Y: y2,
                                 self.Leaf: leaf2,
                                 self.drop_prob: .9
                             })

                t2 = datetime.now()
                d1 += (t2 - t1).seconds + (t2 - t1).microseconds * 1E-6

                if DEBUG:
                    new_loss = sess.run(self.gmm_graph.max_violation,
                                        feed_dict={
                                            self.Z: data_z,
                                            self.P: data_p,
                                            self.Y: data_y,
                                            self.Leaf: cluster_labels
                                        })
                    print("After modeler: Step " + str(step) +
                          ", Moment violation= " + "{:.10f}".format(new_loss))
                    print([
                        sess.run(
                            [
                                crt.precision, crt.weights,
                                crt._normalized_translation, crt.center,
                                crt.output[0]
                            ],
                            feed_dict={
                                self.Z: data_z,
                                self.P: data_p,
                                self.Y: data_y,
                                self.Leaf: cluster_labels
                            }) for crt in self.gmm_graph.critics
                    ])
                    print(
                        sess.run([
                            cw.value() for cw in self.gmm_graph.critic_weights
                        ]))

                # Critics
                for inner_step in range(self._train_ratio[1]):
                    inds = data_it.get_next()
                    y1, p1, z1, leaf1 = data_y[inds], data_p[inds], data_z[
                        inds], cluster_labels[inds]
                    inds = data_it.get_next()
                    y2, p2, z2, leaf2 = data_y[inds], data_p[inds], data_z[
                        inds], cluster_labels[inds]
                    sess.run(self.gmm_graph.update_prev_moments,
                             feed_dict={
                                 self.Z: z1,
                                 self.P: p1,
                                 self.Y: y1,
                                 self.Leaf: leaf1,
                                 self.drop_prob: .9
                             })
                    sess.run(self.gmm_graph.gradient_step_critics,
                             feed_dict={
                                 self.Z: z2,
                                 self.P: p2,
                                 self.Y: y2,
                                 self.Leaf: leaf2,
                                 self.drop_prob: .9
                             })

                if DEBUG:
                    new_loss = sess.run(self.gmm_graph.max_violation,
                                        feed_dict={
                                            self.Z: data_z,
                                            self.P: data_p,
                                            self.Y: data_y,
                                            self.Leaf: cluster_labels
                                        })
                    print("After Critic Step " + str(step) +
                          ", Moment violation= " + "{:.10f}".format(new_loss))
                    print([
                        sess.run(
                            [
                                crt.precision, crt.weights,
                                crt._normalized_translation, crt.center,
                                crt.output[0]
                            ],
                            feed_dict={
                                self.Z: data_z,
                                self.P: data_p,
                                self.Y: data_y,
                                self.Leaf: cluster_labels
                            }) for crt in self.gmm_graph.critics
                    ])
                    print([
                        sess.run(cw.value())
                        for cw in self.gmm_graph.critic_weights
                    ])
                t3 = datetime.now()
                d2 += (t3 - t2).seconds + (t3 - t2).microseconds * 1E-6

                # Meta-Critic
                if step % self._hedge_step == 0:
                    inds = data_it_hedge.get_next()
                    y1, p1, z1, leaf1 = data_y[inds], data_p[inds], data_z[
                        inds], cluster_labels[inds]
                    sess.run(self.gmm_graph.gradient_step_meta_critic,
                             feed_dict={
                                 self.Z: z1,
                                 self.P: p1,
                                 self.Y: y1,
                                 self.Leaf: leaf1
                             })

                if DEBUG:
                    new_loss = sess.run(self.gmm_graph.max_violation,
                                        feed_dict={
                                            self.Z: data_z,
                                            self.P: data_p,
                                            self.Y: data_y,
                                            self.Leaf: cluster_labels
                                        })
                    print("After Meta-Critic Step " + str(step) +
                          ", Moment violation= " + "{:.10f}".format(new_loss))
                    print([
                        sess.run(
                            [
                                crt.precision, crt.weights,
                                crt._normalized_translation, crt.center,
                                crt.output[0]
                            ],
                            feed_dict={
                                self.Z: data_z,
                                self.P: data_p,
                                self.Y: data_y,
                                self.Leaf: cluster_labels
                            }) for crt in self.gmm_graph.critics
                    ])
                    print([
                        sess.run(cw.value())
                        for cw in self.gmm_graph.critic_weights
                    ])

                t4 = datetime.now()
                d3 += (t4 - t3).seconds + (t4 - t3).microseconds * 1E-6

                if step % self._check_loss_step == 0 or step == 1 or step == self._num_steps:
                    # Calculate batch loss and accuracy
                    new_loss = sess.run(self.gmm_graph.max_violation,
                                        feed_dict={
                                            self.Z: data_z,
                                            self.P: data_p,
                                            self.Y: data_y,
                                            self.Leaf: cluster_labels
                                        })
                    if new_loss <= loss:
                        if num_treatments == 1:
                            self.best_fn = sess.run(
                                self.gmm_graph.modeler.output,
                                feed_dict={
                                    self.P: self.test_grid.reshape(-1, 1)
                                }).flatten()
                        else:
                            saver.save(sess, "./tmp/model_best.ckpt")
                            loss = new_loss

                t5 = datetime.now()
                d4 += (t5 - t4).seconds + (t5 - t4).microseconds * 1E-6

                if self._log_summary and step % self._store_step == 0:
                    summary = sess.run(merged,
                                       feed_dict={
                                           self.Z: data_z,
                                           self.P: data_p,
                                           self.Y: data_y,
                                           self.Leaf: cluster_labels
                                       })
                    writer.add_summary(summary, step)
                    log_function(writer,
                                 'CriticWeights',
                                 center_grid,
                                 np.array([
                                     sess.run(cw.value())
                                     for cw in self.gmm_graph.critic_weights
                                 ]),
                                 step,
                                 agg='sum')
                    #log_function(writer, 'CriticPrecisions', center_grid, np.array([sess.run(cr.precision.value()) for cr in self.gmm_graph.critics]), step, agg='mean')

                t6 = datetime.now()
                d5 += (t6 - t5).seconds + (t6 - t5).microseconds * 1E-6

                if step in avg_store_steps:  #step > .2 * self._num_steps:
                    if num_treatments == 1:
                        self.avg_fn.append(
                            sess.run(self.gmm_graph.modeler.output,
                                     feed_dict={
                                         self.P: self.test_grid.reshape(-1, 1)
                                     }).flatten())
                    else:
                        saver.save(sess, "./tmp/model_{}.ckpt".format(step))
                    self._checkpoints.append(step)

                t7 = datetime.now()
                d6 += (t7 - t6).seconds + (t7 - t6).microseconds * 1E-6

                if step % self._display_step == 0:
                    new_loss = sess.run(self.gmm_graph.max_violation,
                                        feed_dict={
                                            self.Z: data_z,
                                            self.P: data_p,
                                            self.Y: data_y,
                                            self.Leaf: cluster_labels
                                        })
                    print("Final Step " + str(step) + ", Moment violation= " +
                          "{:.10f}".format(new_loss))
                    print("Modeler train time: {:.2f}".format(d1))
                    print("Critic train time: {:.2f}".format(d2))
                    print("Meta-critic train time: {:.2f}".format(d3))
                    print("Best loss checking time: {:.2f}".format(d4))
                    print("Summary storing time: {:.2f}".format(d5))
                    print("Average model calculation time: {:.2f}".format(d6))

            print("Optimization Finished!")
            if num_treatments == 1:
                self.final_fn = sess.run(self.gmm_graph.modeler.output,
                                         feed_dict={
                                             self.P:
                                             self.test_grid.reshape(-1, 1)
                                         }).flatten()
            else:
                saver.save(sess, "./tmp/model_final.ckpt")

            sess.close()

        if self._log_summary:
            writer.close()