def GetFeature(self, request, context): ###################################################################### # Section 1 : wait for all the clients -> get their vectors and # appoint one of them as the printer. self.iterator += 1 if (request.poids == "pret" or request.poids == "getw0"): self.vectors.append(request.poids) else: entry = request.poids.split("<bytes>") b = int(entry[1]) if (self.epoch in self.bytesTab): self.bytesTab[self.epoch] += b else: self.bytesTab[self.epoch] = b v = std.str2dict(entry[0]) self.vectors.append(v) self.enter_condition = (self.iterator == nbClients) waiting.wait(lambda: self.enter_condition) self.printerThreadName = threading.current_thread().name ###################################################################### ###################################################################### # Section 2 : compute the new vector -> send the data, a merge of # all the vectors we got from the clients or the message 'stop' the # signal to the client that we converged. normDiff = 0 normGradW = 0 normPrecW = 0 if (request.poids == 'pret'): vector = std.datadict2Sstr(trainingSet) + "<samples>" + str( numSamples) + "<#compo>" + str(nbCompo) elif (request.poids == 'getw0'): vector = std.dict2str(w0) + "<<||>>" + str(self.step) else: # Modification of the vector of parameters gradParam = std.mergeSGD(self.vectors) vector = std.sparse_vsous(self.oldParam, gradParam) # Normalization of the vector of parameters normW = math.sqrt(std.sparse_dot(vector, vector)) vector = std.sparse_mult(1 / normW, vector) # Checking of the stoping criterion diff = std.sparse_vsous(self.oldParam, vector) normDiff = math.sqrt(std.sparse_dot(diff, diff)) normGradW = math.sqrt(std.sparse_dot(gradParam, gradParam)) normPrecW = math.sqrt(std.sparse_dot(self.oldParam, self.oldParam)) if ((normDiff <= c1 * normPrecW) or (self.epoch > nbMaxCall) or (normGradW <= c2 * self.normGW0)): self.paramVector = vector vector = 'stop' else: vector = std.dict2str(vector) + "<<||>>" + str(self.step) ###################################################################### ###################################################################### # Section 3 : wait that all the threads pass the computation area, and # store the new computed vector. realComputation = (request.poids != 'pret') and ( request.poids != 'getw0') and (vector != 'stop') self.iterator -= 1 self.exit_condition = (self.iterator == 0) waiting.wait(lambda: self.exit_condition) if (realComputation): self.oldParam = std.str2dict(vector.split("<<||>>")[0]) ###################################################################### ###################### PRINT OF THE CURRENT STATE ###################### ##################### AND DO CRITICAL MODIFICATIONS #################### if (threading.current_thread().name == self.printerThreadName): std.printTraceRecData(self.epoch, vector, self.paramVector, self.testingErrors, self.trainingErrors, normDiff, normGradW, normPrecW, normGW0, realComputation, self.oldParam, trainingSet, testingSet, nbTestingData, nbExamples, c1, c2, l, nbCompo, filePath) self.merged.append(self.oldParam) self.epoch += 1 self.step *= 0.9 #std.stepSize(nbExamples, self.epoch, nbDesc, nbCompo) ############################### END OF PRINT ########################### dataTest = trainingSet[9] label = dataTest.get(-1, 0) example = std.take_out_label(dataTest) print("label = " + str(label)) print("SVM says = " + str(std.sparse_dot(self.oldParam, example))) ###################################################################### # Section 4 : empty the storage list of the vectors, and wait for all # the threads. self.vectors = [] waiting.wait(lambda: (self.vectors == [])) ###################################################################### #time.sleep(1) return route_guide_pb2.Vector(poids=vector)
def GetFeature(self, request, context): ###################################################################### # Section 1 : wait for all the clients -> get their vectors and # appoint one of them as the printer. if (way2work == "sync"): self.iterator += 1 if (request.poids == "pret" or request.poids == "getw0"): self.vectors.append(request.poids) else: self.vectors.append( std.str2dict(request.poids.split("<delay>")[0])) self.enter_condition = (self.iterator == nbClients) waiting.wait(lambda: self.enter_condition) self.printerThreadName = threading.current_thread().name ###################################################################### ###################################################################### # Section 2 : compute the new vector -> send the data, a merge of # all the vectors we got from the clients or the message 'stop' the # signal to the client that we converged. normDiff = 0 normGradW = 0 normPrecW = 0 if (request.poids == 'pret'): vector = std.datadict2Sstr(trainingSet) + "<depre>" + str( l) + "<samples>" + str(numSamples) elif (request.poids == 'getw0'): vector = std.dict2str(w0) else: if (way2work == "sync"): gradParam = std.mergeSGD(self.vectors) gradParam = std.sparse_mult(self.step, gradParam) vector = std.sparse_vsous(self.oldParam, gradParam) else: info = request.poids.split("<delay>") grad_vector = std.str2dict(info[0]) wt = std.str2dict(info[1]) vector = std.asynchronousUpdate(self.oldParam, grad_vector, wt, l, self.step) ######## NORMALIZATION OF THE VECTOR OF PARAMETERS ######### normW = math.sqrt(std.sparse_dot(vector, vector)) vector = std.sparse_mult(1. / normW, vector) ############################################################ diff = std.sparse_vsous(self.oldParam, vector) normDiff = math.sqrt(std.sparse_dot(diff, diff)) normGradW = math.sqrt(std.sparse_dot(vector, vector)) normPrecW = math.sqrt(std.sparse_dot(self.oldParam, self.oldParam)) if ((normDiff <= c1 * normPrecW) or (self.epoch > nbMaxCall) or (normGradW <= c2 * self.normgW0)): self.paramVector = vector vector = 'stop' else: vector = std.dict2str(vector) ###################################################################### ###################################################################### # Section 3 : wait that all the threads pass the computation area, and # store the new computed vector. realComputation = (request.poids != 'pret') and ( request.poids != 'getw0') and (vector != 'stop') if (way2work == "sync"): self.iterator -= 1 self.exit_condition = (self.iterator == 0) waiting.wait(lambda: self.exit_condition) if (realComputation): self.oldParam = std.str2dict(vector) ###################################################################### ###################### PRINT OF THE CURRENT STATE ###################### ##################### AND DO CRITICAL MODIFICATIONS #################### if ((threading.current_thread().name == self.printerThreadName) & (way2work == "sync") or (way2work == "async")): std.printTraceGenData(self.epoch, vector, self.paramVector, self.testingErrors, self.trainingErrors, trainaA, trainaB, trainoA, trainoB, hypPlace, normDiff, normGradW, normPrecW, self.normgW0, w0, realComputation, self.oldParam, trainingSet, testingSet, nbTestingData, nbExamples, nbMaxCall, self.merged, "", c1, c2, l) self.merged.append(self.oldParam) self.epoch += 1 self.step *= 0.9 dataTest = trainingSet[9] label = dataTest.get(-1, 0) example = std.take_out_label(dataTest) print("label = " + str(label)) print("SVM says = " + str(std.sparse_dot(self.oldParam, example))) ############################### END OF PRINT ########################### ###################################################################### # Section 4 : empty the storage list of the vectors, and wait for all # the threads. self.vectors = [] waiting.wait(lambda: (self.vectors == [])) ###################################################################### #time.sleep(1) return route_guide_pb2.Vector(poids=vector)
def GetFeature(self, request, context): ###################################################################### # Section 1 : wait for all the clients -> get their vectors and # appoint one of them as the printer. print(self.epoch) self.printerThreadName = threading.current_thread().name if (request.poids == "pret" or request.poids == "getw0" or request.poids[:5] == "chunk"): self.iterator += 1 self.vectors.append(request.poids) self.enter_condition = (self.iterator == nbClients) waiting.wait(lambda: self.enter_condition) if ((way2work == "sync") and (request.poids != "pret") and (request.poids != "getw0") and (request.poids[:5] != "chunk")): self.iterator += 1 self.vectors.append(std.str2dict( request.poids.split("<delay>")[0])) self.enter_condition = (self.iterator == nbClients) waiting.wait(lambda: self.enter_condition) if ((threading.current_thread().name == self.printerThreadName) and (self.epoch == 1)): ############ Starting of the timer to time the run ############ self.startTime = time.time() ###################################################################### ###################################################################### # Section 2 : compute the new vector -> send the data, a merge of # all the vectors we got from the clients or the message 'stop' the # signal to the client that we converged. normDiff = 0 normGradW = 0 normPrecW = 0 if (request.poids == 'pret'): vector = str(nbChunks) + "<depre>" + str(l) + "<samples>" + str( numSamples) elif (request.poids[:5] == 'chunk'): chunk = request.poids.split("<nb>") chunk = int(chunk[1]) vector = std.datadict2Sstr( trainingSet[(chunk - 1) * chunkSize:chunk * chunkSize]) elif (request.poids == 'getw0'): vector = std.dict2str(w0) else: if (way2work == "sync"): gradParam = std.mergeSGD(self.vectors) if (self.epoch == 2): self.normGradW0 = math.sqrt( std.sparse_dot(gradParam, gradParam)) normGradW = math.sqrt(std.sparse_dot(gradParam, gradParam)) gradParam = std.sparse_mult(self.step, gradParam) vector = std.sparse_vsous(self.oldParam, gradParam) else: info = request.poids.split("<delay>") grad_vector = std.str2dict(info[0]) if (self.epoch == 2): self.normGradW0 = math.sqrt( std.sparse_dot(grad_vector, grad_vector)) normGradW = math.sqrt(std.sparse_dot(grad_vector, grad_vector)) wt = std.str2dict(info[1]) vector = std.asynchronousUpdate(self.oldParam, grad_vector, wt, l, self.step) ######## NORMALIZATION OF THE VECTOR OF PARAMETERS ######### normW = math.sqrt(std.sparse_dot(vector, vector)) vector = std.sparse_mult(1. / normW, vector) ############################################################ diff = std.sparse_vsous(self.oldParam, vector) normDiff = math.sqrt(std.sparse_dot(diff, diff)) normPrecW = math.sqrt(std.sparse_dot(self.oldParam, self.oldParam)) if ((normDiff <= c1 * normPrecW) or (self.epoch > nbMaxCall) or (normGradW <= c2 * normGW0)): self.paramVector = vector print("1 : " + str((normDiff <= c1 * normPrecW))) print("2 : " + str((self.epoch > nbMaxCall))) print("3 : " + str((normGradW <= c2 * normGW0))) vector = 'stop' else: vector = std.dict2str(vector) ###################################################################### ###################################################################### # Section 3 : wait that all the threads pass the computation area, and # store the new computed vector. realComputation = (request.poids != 'pret') and ( request.poids != 'getw0') and (vector != 'stop') and (request.poids[:5] != 'chunk') if (realComputation): self.oldParam = std.str2dict(vector) ###################################################################### ###################### PRINT OF THE CURRENT STATE ###################### ##################### AND DO CRITICAL MODIFICATIONS #################### if (((threading.current_thread().name == self.printerThreadName) and (way2work == "sync")) or (way2work == "async")): print("oooooooo") endTime = time.time() duration = endTime - self.startTime if (vector == 'stop'): print("The server ran during : " + str(duration)) std.printTraceRecData(self.epoch, vector, self.testingErrors, self.trainingErrors, normDiff, normGradW, normPrecW, normGW0, realComputation, self.oldParam, trainingSet, testingSet, nbTestingData, nbExamples, c1, c2, l, duration, filePath) self.merged.append(self.oldParam) if (realComputation): self.epoch += 1 self.step *= 0.9 ############################### END OF PRINT ########################### ###################################################################### # Section 4 : empty the storage list of the vectors, and wait for all # the threads. self.vectors = [] ###################################################################### ###################################################################### # Section 5 : synchronize all clients at the end of a server iteration if (way2work == "sync"): self.iterator -= 1 self.exit_condition = (self.iterator == 0) waiting.wait(lambda: self.exit_condition) #time.sleep(1) return route_guide_pb2.Vector(poids=vector)
print("############### Test of the sparse map. ###############") print('') def opp(x): return (-x) spdmap = std.sparse_map(opp, spV1) print("spdmap = " + str(spdmap)) print('') print("############ Test of the sparse soustraction. #########") print('') spdsous = std.sparse_vsous(spV1, spV2) print("spdsous = " + str(spdsous)) print('') print("############ Test of the sparse division. ############") print('') spddiv = std.sparse_vdiv(spV1, spV2) print("spddiv = " + str(spddiv)) print('') print("############ Test of the elementwise multiplication. ############") print('') spdmult = std.sparse_mult(2, spV1) print("spdmult = " + str(spdmult))