Пример #1
0
    def reward(self, action):

        # accept
        if not action == 0:
            # count edge num in path
            num_edge_on_path = len(self.path_dict\
                [self.get_source_sink_pair(self.current_request)][action])
            base_reward = self.current_request.bandwidth \
                * self.current_request.service_time * \
                    math.pow(self.decrease_with_edge, num_edge_on_path)
            if self.current_request.type.isStatic:
                return base_reward * self.base_rate * self.static_rate
            else:
                return base_reward * self.base_rate * self.elastic_rate
        else:
            if not self.current_request.isScale:
                return 0
            else:
                base_reward = self.current_request.bandwidth * \
                    self.current_request.service_time * \
                        self.base_rate * self.elastic_rate
                hist_dist = self.elastic_hist_dist[
                    self.current_request.type.id]
                report_dist = self.current_request.type.distribution_list
                # KL
                KL = kl_divergence(hist_dist, report_dist)
                return -1.0 * math.exp(-KL) * base_reward
Пример #2
0
    def _importance(self, feature):
        prior_prediction = self.predict(
            [util.bias_feature()] +
            [pb.Feature()] * (self._config.num_features - 1))
        with_weight_prediction = self.predict(
            [util.bias_feature()] +
            [pb.Feature()] * (self._config.num_features - 2) + [feature])

        return util.kl_divergence(with_weight_prediction, prior_prediction)
Пример #3
0
def ITCC(p, k, l, n_iters, convergeThresh, cX, cY):


    import numpy as np
    from util import prob_clust_Indiv,prob_clust,prob_x_given_xhat,prob_y_given_yhat,prob_Y_given_x,prob_X_given_y,calc_q_Indiv,calc_q,prob_Y_given_xhat,prob_X_given_yhat,kl_divergence,next_cx,next_cX,next_cy,next_cY,sorting
     #import calc_q #,kl_divergence,next_cX,next_cY

    m = np.shape(p)[0]
    n = np.shape(p)[1]
    
    converged = False

    kl_curr = 0.0
    kl_prev = 0.0
    

    
    q = calc_q(p, range(0,m), cX, range(0,n), cY)
    kl_curr = kl_divergence(p.ravel(), q.ravel())
    Error=[kl_curr] 

    for i in range(0,n_iters):
        print("iteration:    ", i, "Error:    ",kl_curr)   
        kl_prev = kl_curr
    # Update cX, q
        cX = np.matrix (next_cX(p,q, cX, k) )
        q = calc_q(p, range(0,m), cX, range(0,n), cY)

    # Update cY, q
        cY = np.matrix (next_cY(p,q, cY, l) )
        q = calc_q(p, range(0,m), cX, range(0,n), cY)
    
        kl_curr = kl_divergence(p.ravel(), q.ravel())
        Error.append(kl_curr)
    
        
        #print(1)
        if abs(kl_prev - kl_curr) < convergeThresh: #& ((kl_prev - kl_curr) >0 ):
            converged = True
            break
            
    M1=sorting(p,k,l,cX,cY)      
    clustered=prob_clust(M1, range(0,k),cX, range(0,l),cY)     
    return(M1,q,cX,cY,clustered,Error)
Пример #4
0
    def _importance(self, feature):
        prior_prediction = self.predict([util.bias_feature()] +
                                        [pb.Feature()] *
                                        (self._config.num_features - 1))
        with_weight_prediction = self.predict([util.bias_feature()] +
                                              [pb.Feature()] *
                                              (self._config.num_features - 2) +
                                              [feature])

        return util.kl_divergence(with_weight_prediction, prior_prediction)
# sys.argv[2]: average_feature_num_per_sample - 1
# sys.argv[3]: output_feature_importance_file

# assume every feature_value is 1

importances = []
extra_feature_num = int(sys.argv[2])

line_index = 0
for line in open(sys.argv[1]):
    line = line.strip()
    fields = line.split()
    if line_index == 0:
        beta = float(fields[1])
    else:
        feature_index = int(fields[0])
        mean = float(fields[1])
        variance = float(fields[2])
        prob = norm.cdf(mean / (variance + extra_feature_num + beta ** 2))
        impor = kl_divergence(prob, 0.5)
        importances.append((feature_index, impor))
    line_index += 1

importances_list = sorted(importances, key=lambda x:x[1], reverse=True)
output_handle = open(sys.argv[3], 'w')
for fea_index, impor in importances_list:
    line = str(fea_index) + " " + str(impor) + "\n"
    output_handle.write(line)
output_handle.close()

Пример #6
0
# sys.argv[1]: input_model_file
# sys.argv[2]: average_feature_num_per_sample - 1
# sys.argv[3]: output_feature_importance_file

# assume every feature_value is 1

importances = []
extra_feature_num = int(sys.argv[2])

line_index = 0
for line in open(sys.argv[1]):
    line = line.strip()
    fields = line.split()
    if line_index == 0:
        beta = float(fields[1])
    else:
        feature_index = int(fields[0])
        mean = float(fields[1])
        variance = float(fields[2])
        prob = norm.cdf(mean / (variance + extra_feature_num + beta**2))
        impor = kl_divergence(prob, 0.5)
        importances.append((feature_index, impor))
    line_index += 1

importances_list = sorted(importances, key=lambda x: x[1], reverse=True)
output_handle = open(sys.argv[3], 'w')
for fea_index, impor in importances_list:
    line = str(fea_index) + " " + str(impor) + "\n"
    output_handle.write(line)
output_handle.close()