예제 #1
0
def plotTrends(data,
               keys,
               outputDir,
               vector,
               plotTitle,
               fileName,
               moduleName=None):
    print("Plotting Service Times for {}...".format(plotTitle))

    plotData = []
    for renTime in keys["renegingTime"]:
        times, values, _ = filterData(data, vector, renTime, moduleName)

        assert len(times) == len(values)

        seedsData = []
        for seed in keys["seed"]:
            #print("ren time: {}, seed: {}".format(renTime, seed))
            seedsData.append((times[seed], values[seed]))

        qTimes, qValues = quantizeData(getTupleValues(0, seedsData),
                                       getTupleValues(1, seedsData),
                                       step=100.0)
        plotData.append((qTimes, runningAvg(qValues)))

    titles = {
        "title": plotTitle,
        "x": "Simulation Time [s]",
        "y": "Service Time [s]"
    }
    plotGraph(getTupleValues(0, plotData),
              getTupleValues(1, plotData),
              titles,
              savePath=os.path.join(outputDir, fileName))
예제 #2
0
def train_top_model():
    print("Top model training started...")

    train_per_class = s.nb_train_samples // s.num_classes
    valid_per_class = s.nb_validation_samples // s.num_classes

    # Load saved features from bottlenecks
    train_data = np.load(open(bottleneck_train_datapath))
    train_labels = np.array([0] * train_per_class + [1] * train_per_class +
                            [2] * train_per_class + [3] * train_per_class +
                            [4] * train_per_class + [5] * train_per_class +
                            [6] * train_per_class + [7] * train_per_class +
                            [8] * train_per_class + [9] * train_per_class +
                            [10] * train_per_class + [11] * train_per_class)

    validation_data = np.load(open(bottleneck_valid_datapath))
    validation_labels = np.array(
        [0] * valid_per_class + [1] * valid_per_class + [2] * valid_per_class +
        [3] * valid_per_class + [4] * valid_per_class + [5] * valid_per_class +
        [6] * valid_per_class + [7] * valid_per_class + [8] * valid_per_class +
        [9] * valid_per_class + [10] * valid_per_class +
        [11] * valid_per_class)

    train_labels = to_categorical(train_labels, num_classes=s.num_classes)
    validation_labels = to_categorical(validation_labels,
                                       num_classes=s.num_classes)

    # Create new top layers
    model = u.obtainNewTopLayers(train_data.shape[1:], s.num_classes)

    # Compile the model using Stocastic Gradient Descent and a low learning rate
    optimizer = SGD(lr=1e-3, momentum=0.9)
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # Start training...
    print("Fitting...")
    history = model.fit(train_data,
                        train_labels,
                        epochs=s.botEpochs,
                        batch_size=s.batch_size,
                        validation_data=(validation_data, validation_labels),
                        verbose=1)

    model.save_weights(s.top_model_weights_path)
    model.save(s.top_model_model_path)

    print("Model and weights saved...")

    # Create graphs
    legend = ['Training', 'Validation']
    accData = [history.history['acc'], history.history['val_acc']]
    lossData = [history.history['loss'], history.history['val_loss']]
    u.plotGraph(accData, "Feature Extraction Accuracy", "Epoch", "Accuracy",
                legend, bottleneck_accuracy_plot_path)
    u.plotGraph(lossData, "Feature Extraction Loss", "Epoch", "Loss", legend,
                bottleneck_loss_plot_path)
예제 #3
0
def computeMeanEnergyConsumption(data, keys, outputDir):
	print("Plotting Mean Energy Consumption... ")
	
	wifiPowerCoefficient = 0.7
	cellularPowerCoefficient = 2.5
	energyData = copy.deepcopy(data)
	
	mecData = []
	confidenceValues = {}
	
	for renTime in keys["renegingTime"]:
		wTimes, wValues, _ = filterData(data, "jobServiceTime:vector", renTime, "FullOffloadingNetwork.wifiQueue")
		cTimes, cValues, _ = filterData(data, "jobServiceTime:vector", renTime, "FullOffloadingNetwork.cellularQueue")
		
		confidenceValues[renTime] = {}
		mecs = []
		for seed in keys["seed"]:
			if seed not in wValues or seed not in cValues:
				continue
			wst = np.sum(wValues[seed]) * wifiPowerCoefficient
			cst = np.sum(cValues[seed]) * cellularPowerCoefficient
			mec = (np.sum([wst, cst]) / (len(wValues[seed]) + len(cValues[seed]))) / 60.0
			
			mecs.append(mec)
			confidenceValues[renTime][seed] = mec
			
		mec = np.mean(mecs)
		#print("ren time: {}, MEC: {:.4f}".format(renTime, mec))
		
		mecData.append([int(renTime), mec])
		energyData[renTime] = mec
	
	mecData = np.array(mecData)
	plotData = []
		
	mecData = mecData[mecData[:,0].argsort()]
	xs = mecData[:,0] / 60.0
	ys = mecData[:,1]
	coeff = np.polyfit(xs.flatten(), ys.flatten(), 5)
	p = np.poly1d(coeff)
	newys = p(xs).tolist()
	plotData.append((xs.tolist(), newys, "trend"))
	#plotData.append((xs.tolist(), ys.tolist(), "values"))	
	
	titles = {
		"title": "Full Offloading Model",
		"x": "Deadline [min]",
		"y": "Mean Energy Consumption [J]"
	}
	filePath = os.path.join(outputDir, "FullOffloading_Energy_Deadline.png")
	plotGraph(getTupleValues(0, plotData), getTupleValues(1, plotData), titles, legends=getTupleValues(2, plotData), savePath=filePath)
	
	return energyData, confidenceValues
예제 #4
0
def computeMeanResponseTime(data, keys, outputDir):
	print("Plotting Mean Response Time...")
	
	responseData = copy.deepcopy(data)
	mrtData = []
	confidenceValues = {}
	
	for renTime in keys["renegingTime"]:
		times, values, _ = filterData(data, "totalResponseTime:vector", renTime)
		confidenceValues[renTime] = {}
			
		respTimes = []
		for seed in keys["seed"]:
			meanRespTimePerSeed = np.mean(values[seed]) / 60.0
			respTimes.append(meanRespTimePerSeed)
			confidenceValues[renTime][seed] = meanRespTimePerSeed
			
		meanRespTimePerDeadline = np.mean(respTimes)
		#print("ren time: {}, MRT: {:.4f}".format(renTime, meanRespTimePerDeadline))
			
		mrtData.append([int(renTime), meanRespTimePerDeadline])
		responseData[renTime] = meanRespTimePerDeadline
	
	mrtData = np.array(mrtData)
	plotData = []
	
	mrtData = mrtData[mrtData[:,0].argsort()]
	xs = mrtData[:,0] / 60.0
	ys = mrtData[:,1]
	coeff = np.polyfit(xs.flatten(), ys.flatten(), 7)
	p = np.poly1d(coeff)
	newys = p(xs).tolist()
	plotData.append((xs.tolist(), newys, "trend"))
	#plotData.append((xs.tolist(), ys.tolist(), "values"))
	
	titles = {
		"title": "Full Offloading Model",
		"x": "Deadline [min]",
		"y": "Mean Response Time [min]"
	}
	filePath = os.path.join(outputDir, "FullOffloading_Response_Deadline.png")
	plotGraph(getTupleValues(0, plotData), getTupleValues(1, plotData), titles, legends=getTupleValues(2, plotData), savePath=filePath)

	return responseData, confidenceValues
예제 #5
0
def computeERWP(responseData, energyData, keys, outputDir, w=None):
	print("Plotting ERWP... ")
	
	if not w:
		w = [0.5]
	
	erwpData = []
	for exp in w:
		for renTime in keys["renegingTime"]:
			meanRespTime = responseData[renTime]
			meanEnergyCons = energyData[renTime]
			erwp = np.multiply(np.power(meanEnergyCons, exp), np.power(meanRespTime, 1-exp))
			erwpData.append([int(renTime)/60.0, erwp, exp])
	
	erwpData = np.array(erwpData)
	plotData = []
	for exp in w:
		mask = (erwpData[:,2] == exp)
		subMatrix = erwpData[mask]
		subMatrix[:,0] = 1.0 / subMatrix[:,0]
		subMatrix = subMatrix[subMatrix[:,0].argsort()]
		xs = subMatrix[:,0]
		ys = subMatrix[:,1]
		coeff = np.polyfit(xs.flatten(), ys.flatten(), 5)
		p = np.poly1d(coeff)
		newys = p(xs).tolist()
		plotData.append((xs.tolist(), p(xs), "w: {}".format(exp)))
		#plotData.append((xs.tolist(), ys, "w: {}".format(exp)))
	
	titles = {
		"title": "Full Offloading Model",
		"x": "Reneging Rate r",
		"y": "ERWP"
	}
	filePath = os.path.join(outputDir, "FullOffloading_ERWP_RenegingRate.png")
	plotGraph(getTupleValues(0, plotData), getTupleValues(1, plotData), titles, getTupleValues(2, plotData), savePath=filePath)
예제 #6
0
from utils import readGraphs, plotGraph
from GraphCollection import GraphCollection

graphs = readGraphs('mico.outx')

if __name__ == "__main__":
    graphDB = GraphCollection(graphs, 0.8)
    print(graphDB.freqEdges)
    # exit(0)
    print("End frequent", len(graphs))
    for graph in graphs:
        plotGraph(graph, isShowedID=False)
        # plotGraph(graph)
예제 #7
0
    validation_steps=s.nb_validation_samples // s.batch_size,
    verbose=1,
    callbacks=[lrSched])

model.save_weights(s.fine_tuned_weights_path)
model.save(s.fine_tuned_model_path)

print("Model and weights saved...")
print("Trying to evaluate...")

# Evaluate fine tuned model
metrics = model.evaluate_generator(validation_generator,
                                   steps=s.nb_validation_samples //
                                   s.batch_size)
statsDict = dict(zip(model.metrics_names, metrics))
print(statsDict)

# Generate and save fine tuning plots
print("Saving plots...")
legend = ['Training', 'Validation']
accData = [history.history['acc'], history.history['val_acc']]
lossData = [history.history['loss'], history.history['val_loss']]
u.plotGraph(accData, "Fine Tuning Accuracy", "Epoch", "Accuracy", legend,
            fine_tuning_acc_plot_path)
u.plotGraph(lossData, "Fine Tuning Loss", "Epoch", "Loss", legend,
            fine_tuning_loss_plot_path)
u.plotGraph([lrs], "Learning Rates", "Epoch", "Learning Rate", None,
            fine_tuning_alr_plot_path)

print("Done!")
예제 #8
0
파일: spin.py 프로젝트: vinhsuhi/HUY_SPIN

if __name__ == "__main__":
    # frequents = getFrequentEdges(graphs,0.8)
    # print(frequents)
    # frequentGraph(graphs,frequents)
    
    graphDB = GraphCollection(graphs,1.0)
    print("Frequent edges",len(graphDB.freqEdges.items()))
    # plotGraph(graphDB.graphs[0])
    
    freqGraphs = graphDB.frequentGraph()
    print("freqGraphs",freqGraphs)
    # with open('result-{}.json'.format(datasets), 'w') as fp:
        # json.dump(freqGraphs, fp)
    # df = pd.DataFrame({"MaxSubgraph" : [np.array2string(g) for g in freqGraphs]})
    # df.to_csv("result-{}.csv".format(datasets),index=False)
    # exit()
    # freqGraphs = extractResultGraph(freqGraphs)
    # print("freqGraph",freqGraph)
    for freqGraph in freqGraphs:
        plotGraph(freqGraph,False)
   

    # for k,v in freqGraphs:
    #     for kk,vv in v.items():
    #         print("graph",kk)
    #         plotGraph(vv[0],False)
    # print(freqGraphs)
    
예제 #9
0
        # model_demonstrator = DQN(in_size, out_size, save_model_name="")
        agent_demonstrator = DQNAgent(model_dqn,
                                      env,
                                      replay_start=50,
                                      target_update_freq=0.005)
        agent_demonstrator.memory.reset()
        agent_demonstrator.run(update_memory=True, num_step=12e3)

        model_dqfd = DQN(in_size, out_size, save_model_name="dqfd")
        # model_dqfd = TestDQN(in_size, out_size, save_model_name="dqfd")
        agent_dqfd = DQfDAgent(model_dqfd,
                               env,
                               agent_demonstrator.memory,
                               double_DQN=True,
                               replay_start=200,
                               target_update_freq=0.005,
                               eps_start=0.9,
                               eps_decay=1000,
                               n_step=1,
                               demo_percent=0.3,
                               lambda_1=0,
                               lambda_2=0.05,
                               expert_margin=0.5)

        agent_dqfd.pre_train(6000)
        agent_dqfd.train_test(num_step=steps, test_period=200, test_step=500)

        filename = str(i) + ".png"
        plotGraph("dqn", "dqfd", "DQN", "DQfD", filename)