コード例 #1
0
    def __init__(self):

        filePath = "F:/test/workload.xlsx"
        wrFile = WRFile()
        self.workload = wrFile.readDataFromExcel(filePath=filePath,
                                                 sheet_name="1")
        data = self.evaluateBurst()
        data = data / np.max(data)
        wrFile.writeDataIntoExcel(data=data, filePath="F:/test/avgsampEn.xlsx")
コード例 #2
0
def analyzePredictionPrecision():
    objFileName = "svr_rbf"
    precision = []# 列表头分别为 ratio,max,mean
    wrFile = WRFile()
    objFilePath = "F:\\FIFA\\predict\\"+objFileName+"/precision_over.xlsx"
    predict = wrFile.readDataFromExcel(objFilePath,min_cols = 1,max_cols = 3)
    predict = predict.reshape(3,8)
    result = [np.average(predict[0]),np.average(predict[1]),np.average(predict[2])]
    wrFile.writeDataIntoExcel(data = result,filePath = "F:\\FIFA\\predict\\"+objFileName+"/precision_over_evaluate.xlsx" )
コード例 #3
0
def analyzeStats():
    qList = [2, 3, 4, 5, 6]
    wrFile = WRFile()
    result = []
    for q in qList:
        fileName = "F:\data\experiment/Delay_SQ_q" + str(q) + ".xlsx"
        data = wrFile.readDataFromExcel(filePath=fileName,
                                        sheet_name="1",
                                        min_cols=4,
                                        max_cols=4)
        r = getStatisticAttribute(data)
        result.append(r)
    #print("ATBM is",result)
    wrFile.writeDataIntoExcel(
        result, filePath="F:\data\experiment/Delay_SQ_stats.xlsx")
コード例 #4
0
def compareModel():
    fileStart = "F:\\one\\final\\inmin/workload"
    fileEnd = "inmin.xlsx"
    wrFile = WRFile()

    #fft = FFTPredict()
    #noBurst_pre = fft.FFTofNoBurst(data51 , data52 )
    #WFD_pre = fft.FFTofWFD(data51 , data52 )
    periods = 10
    for i in range(53, 61):
        result = TestGreyModel(periods, filePath=fileStart + str(i) + fileEnd)
        predict = result[1]
        print(i)
        wrFile.writeDataIntoExcel(data=predict,
                                  filePath="F:/one/predict/GM/wc" + str(i) +
                                  ".xlsx")
コード例 #5
0
def analyzeResult(q, better, worse):
    axeRange = q
    record = []
    print(axeRange)
    for x in range(axeRange):
        for y in range(axeRange):
            for z in range(axeRange):
                if better[x][y][z] > 0 or worse[x][y][z] > 0:
                    print()
                    #print("better:"+str(better[x][y][z]))
                    #print("worse:"+str(worse[x][y][z]))
                    record.append([
                        str(x) + "<c1<=" + str(x + 1) + "," + str(y) +
                        "<c2<=" + str(y + 1) + "," + str(z) + "<c3<=" +
                        str(z + 1), better[x][y][z], worse[x][y][z]
                    ])
                    #print("=======================")
    wrFile = WRFile()
    filePath = "F:\\data\\experiment/Delay_distribute_q" + str(q) + ".xlsx"
    wrFile.writeDataIntoExcel(data=record, filePath=filePath)
コード例 #6
0
def splitOnlineData():
    shopName = ["京东商城", "天猫商城", "亚马逊商城", "淘宝商城"]
    #读出所有数据,然后都存放到
    # 1.生成日期数据

    wrFile = WRFile()
    data = wrFile.readDataFromExcel(filePath="F:\\data\\orginal\\online.xlsx",
                                    sheet_name="online",
                                    cols=3)
    #2. 对数据进行切片
    jingdong = []
    tmall = []
    amazon = []
    taobao = []
    for i in range(len(data)):
        count = i % 4
        if count == 0:
            if data[i] > 0:
                jingdong.append(data[i])
            else:
                jingdong.append(data[i - 4])
        elif count == 1:
            if data[i] > 0:
                tmall.append(data[i])
            else:
                tmall.append(data[i - 4])
        elif count == 2:
            if data[i] > 0:
                amazon.append(data[i])
            else:
                amazon.append(data[i - 4])
        elif count == 3:
            if data[i] > 0:
                taobao.append(data[i])
            else:
                taobao.append(data[i - 4])
    #3.将数据写入excel
    fileRoot = "F:\\data\\online/"
    filesuffix = ".xlsx"
    wrFile.writeDataIntoExcel(data=jingdong,
                              filePath=fileRoot + "jingdong" + filesuffix)
    wrFile.writeDataIntoExcel(data=tmall,
                              filePath=fileRoot + "tmall" + filesuffix)
    wrFile.writeDataIntoExcel(data=amazon,
                              filePath=fileRoot + "amazon" + filesuffix)
    wrFile.writeDataIntoExcel(data=taobao,
                              filePath=fileRoot + "taobao" + filesuffix)
コード例 #7
0
        c = moveDown(c)
        result = a[i]
        if result == 0:
            equal[c[0]][c[1]][c[2]] += 1
        elif result == 1:
            better[c[0]][c[1]][c[2]] += 1
        else:
            worse[c[0]][c[1]][c[2]] += 1

    #print("better is",better)
    #print("worse is",worse)
    #print("equal is",equal)
    analyzeResult(q, better, worse)


#divideSpace(q)
#getStatisticAttribute(q)
#analyzeStats()

wrFile = WRFile()
data = wrFile.readDataFromExcel(
    filePath="D:\\cloudsim\\log\\workload1/taobao.xlsx",
    min_cols=1,
    max_cols=1,
    sheet_name="1")
data = np.floor((np.array(data) / 100))
#print(data)
wrFile.writeDataIntoExcel(
    data=data, filePath="D:\\cloudsim\\log\\workload1/deplete_taobao.xlsx")
print(np.percentile(np.array(data), 80))
コード例 #8
0
class ClusterData:
    def __init__(self):
        self.wrFile = WRFile()

    '''
    timestamp                      second,new_timestamp
    ["2016-11-17 06:08:42.692"]===>[42,2016-11-17 06:08:42]
    ["2016-11-17 06:08:42"]===>[42,2016-11-17 06:08:42]
    '''

    def getSecond_timestamp(self, timestamp):
        loc = timestamp.rindex(":")
        try:
            dot_loc = timestamp.rindex(".")
            new_item = timestamp[:dot_loc]
            second = timestamp[loc + 1:dot_loc]
        except ValueError:
            new_item = timestamp
            second = timestamp[loc + 1:]
        return {"second": second, "timestamp": new_item}

    def transformQtoXYZ(self, q_x, q_y, q_z, q_w):
        x = round(2 * q_x * q_z + 2 * q_y * q_w, 2)
        y = round(2 * q_y * q_z - 2 * q_x * q_w, 2)
        z = round(1 - 2 * q_x * q_x - 2 * q_y * q_y, 2)
        return [x, y, z]

    def mergeDataInSeconds(self, num, video_num):
        path = "F:\\project\\dataset\\vr\\Formated_Data\\Experiment_1/" + str(
            num) + "/VD_1.xlsx"
        #readDataFromExcel(self,filePath,cols=1,sheet_name = "1",min_col =1,max_col = 1)
        #readDataFromExcel2(self,filePath,rows = 1,sheet_name = "1",min_row =1,max_row = 1):
        result = self.wrFile.readDataFromExcel2(path,
                                                rows=2,
                                                sheet_name="Sheet",
                                                max_row=0)
        #Timestamp	PlaybackTime	UnitQuaternion.x	UnitQuaternion.y	UnitQuaternion.z	UnitQuaternion.w	HmdPosition.x	HmdPosition.y	HmdPosition.z
        max_rows = len(result)  #最大行-1
        min_rows = 1
        count = 1
        sum_q_x = float(result[1][2])
        sum_q_y = float(result[1][3])
        sum_q_z = float(result[1][4])
        sum_q_w = float(result[1][5])

        time_xyz = []

        for r in range(min_rows + 1, max_rows):
            item = result[r]
            item = numpy.array(item[2:], dtype="float64")
            current_time = self.getSecond_timestamp(result[r][0])
            last_time = self.getSecond_timestamp(result[r - 1][0])
            if (current_time["second"] == last_time["second"]):
                #print("same")
                sum_q_x = sum_q_x + item[0]
                sum_q_y = sum_q_y + item[1]
                sum_q_z = sum_q_z + item[2]
                sum_q_w = sum_q_w + item[3]
                count = count + 1
            else:

                average_q_x = round(sum_q_x / count, 2)
                average_q_y = round(sum_q_y / count, 2)
                average_q_z = round(sum_q_z / count, 2)
                average_q_w = round(sum_q_w / count, 2)
                xyz = self.transformQtoXYZ(average_q_x, average_q_y,
                                           average_q_z, average_q_w)
                #print(xyz)
                count = 1
                sum_q_x = item[0]
                sum_q_y = item[1]
                sum_q_z = item[2]
                sum_q_w = item[3]
                temp = [current_time["timestamp"]]
                temp.extend(xyz)
                time_xyz.append(temp)

        #结算最后一秒
        average_q_x = round(sum_q_x / count, 2)
        average_q_y = round(sum_q_y / count, 2)
        average_q_z = round(sum_q_z / count, 2)
        average_q_w = round(sum_q_w / count, 2)
        xyz = self.transformQtoXYZ(average_q_x, average_q_y, average_q_z,
                                   average_q_w)
        timestamp = [
            self.getSecond_timestamp(result[max_rows - 1][0])["timestamp"]
        ]
        timestamp.extend(xyz)
        time_xyz.append(timestamp)
        #(self,data,filePath,describe ="no description",cols = 0,sheet_name = "1")#
        rows = self.wrFile.writeDataIntoExcel(
            data=time_xyz,
            filePath="F:\\project\\dataset\\vr\\Formated_Data\\Experiment_1/" +
            str(num) + "/u" + str(num) + "_v" + str(video_num) +
            "_second.xlsx")
        return rows

    '''
    data = 48*204*3 
     48 stands for the total number of users
     204 represents there are 204 seconds of this video
     3 describe 3 dimension data needs to be recorded
    
    '''

    def mergeDataFromUsers(self, video):
        data = numpy.zeros((48, 204, 3))
        #print(data)
        '''=====store the data of users in a multi-dimension matrix Data====='''
        for user in range(1, 48 + 1):
            filePath = "F:\\project\\dataset\\vr\\Formated_Data\\Experiment_1/" + str(
                user) + "/u" + str(user) + "_v" + str(
                    video) + "_second.xlsx"  #u1_v1_second.xlsx

            user_data = numpy.array(self.wrFile.readDataFromExcel2(filePath))
            #print(user_data.shape)
            user_rows = len(user_data)
            row_standard = 204
            if (user_rows >= row_standard):
                data[user - 1, 0:row_standard] = user_data[:row_standard, 1:]
            else:  #user_rows<row_standard needs to be added some rows
                data[user - 1, :user_rows] = user_data[:, 1:]
                for i in range(1, row_standard - user_rows + 1):
                    data[user - 1,
                         user_rows - 1 + i, :] = user_data[user_rows - 1, 1:]
        '''====================================='''
        #print(data[18,:,:])
        second_data = data[:, 0, :]
        for r in range(1, 204):
            #if r==18:
            #print(data[:,r,:])
            second_data = numpy.hstack((second_data, data[:, r, :]))
        print(second_data.shape)
        self.wrFile.writeDataIntoExcel(
            data=second_data,
            filePath=
            "F:\\project\\dataset\\vr\\Formated_Data\\Experiment_1/second.xlsx"
        )

    def predictMotion():
        pass

    def getClassifer(self, Bmax):
        #1.randomly select 43 samples of the data, and
        #2.depend on samples to train classfier
        filePath = "F:\\project\\dataset\\vr\\Formated_Data\\Experiment_1/cluster_data.xlsx"
        data = numpy.array(
            self.wrFile.readDataFromExcel2(filePath=filePath, max_row=48))
        accuracy = numpy.zeros(203).reshape((203, 1))
        for col in range(0, 203):  # 应该从后往前选
            X = data[:, col * 4:(col + 1) * 4]
            if col >= 199:
                label = numpy.array(data[:, col * 4 + 3 + 1 * 4]).reshape(
                    (48, 1))
            else:
                label = numpy.array(data[:, col * 4 + 3 + Bmax * 4]).reshape(
                    (48, 1))
            # get samples
            whole_index = range(0, 48)
            sample_index = ran.sample(whole_index, 43)
            test_index = list(set(whole_index) - set(sample_index))
            sample_data = X[sample_index][:, :3]
            sample_label = label[sample_index]

            test_data = X[test_index][:, :3]
            test_label = label[test_index]

            #carry out training
            try:
                clf = SVC(gamma='auto')
                clf.fit(sample_data, sample_label)
                accuracy_temp = clf.score(test_data, test_label)
                accuracy[col] = accuracy_temp
            except ValueError:
                accuracy[col] = 1
        #print(accuracy.shape)
        filePath = "F:\\project\\dataset\\vr\\Formated_Data\\Experiment_1/prediction_accuracy.xlsx"
        self.wrFile.writeDataIntoExcel(data=accuracy, filePath=filePath)

    def plotScatter(self, x, y, z, slot):
        plt.scatter(x=x, y=y, c=z)
        plt.xlabel("x", fontsize=20)
        plt.ylabel("y", fontsize=20)
        plt.legend("z", fontsize=20)
        plt.show()
        plt.savefig(
            "F:\\project\\dataset\\vr\\Formated_Data\\Experiment_1\\image/xyz"
            + str(slot) + ".png")

    def plotPie(self, data):
        #3 11 34
        labels = ["class-1", "class-2", "class-3"]
        sizes = data
        plt.pie(sizes, labels=labels)
        plt.show()

    def plotPlot(self, x, y):
        plt.plot(x, y, "-.")
        plt.xlabel("time/second")
        plt.ylabel("prediction precision")
        plt.show()

    def clusterData(self, filePath):
        data = numpy.array(
            self.wrFile.readDataFromExcel2(filePath=filePath, max_row=48))
        #X = data[:,:3]
        #cluster_result = numpy.zeros((204,3))
        label_result = None
        for col in range(0, 204):
            X = data[:, col * 3:(col + 1) * 3]
            #samples = numpy.sample(range(0,48),43)
            #test = set(range(0,48))-set(samples)

            # 计算向量间的欧氏距离
            #print(" col ",col)
            #dist =  DistanceMetric.get_metric('euclidean')
            #result = dist.pairwise(X)
            #print(result)

            # 进行聚类
            eps = 0.6
            clustering = DBSCAN(eps=eps, min_samples=5).fit(X)
            labels = numpy.array(clustering.labels_).reshape((48, 1))
            #print(clustering.labels_)

            # 记录聚类结果
            X = numpy.hstack((X, labels))
            if col == 0:
                label_result = X
            else:
                label_result = numpy.hstack((label_result, X))
        filePath = "F:\\project\\dataset\\vr\\Formated_Data\\Experiment_1/cluster_statistic.xlsx"
コード例 #9
0
 def writeAndNormalize(self):
     data = self.m / (np.max(self.m))
     wrFile = WRFile()
     wrFile.writeDataIntoExcel(data=data,
                               filePath="F:/test/ATBM_" + str(self.q) +
                               "burst.xlsx")
コード例 #10
0
                end = (J + 1) * timeIncrement
                if end > num:
                    end = num
                day_begin = max(end - 60 * 60, 0)
                m = numpy.mean(data[begin:end])
                v = numpy.var(data[begin:end])
                Y.append(v / m)
                tol = abs(1 - Y[len(Y) - 1] / Y[len(Y) - 2])
                J += 1
            idc_data[start:end] = Y[len(Y) - 1]
            #print("收敛的idc",Y[len(Y)-1],"end",end)
            tol = 1
            start = end
        '''
        plt.plot(numpy.arange(0,len(self.data)),self.data/400,"g-",label = "workload/400")
        plt.plot(numpy.arange(0,len(idc_data)),idc_data,"m",label = "IDC")
        plt.legend(loc = "upper right")
        plt.title("Bursty Evaluation of IDC")
        plt.xlabel("Time(seconds)")
        plt.ylabel("Bursty Intensity")
        plt.grid(True)
        '''
        return idc_data


filePath = "F:/test/workload.xlsx"
burstResult = "F:/test/idc_burst.xlsx"
data = AnalyseIDC(filePath).caculIDC()
wrFile = WRFile()
wrFile.writeDataIntoExcel(data=data, filePath=burstResult)