def process_chunk(chunk): line = chunk[0] commands = [] if "vlan" in line: vlan = VLAN(chunk, iface_map) commands = vlan.generate_junos() elif "interface ve" in line: ve = VE(chunk) elif "interface ethernet" in line: iface = INTERFACE(chunk) # print(iface) elif "lag" in line: lag = LAG(chunk, iface_map) commands = lag.generate_junos() for c in commands: print(c) elif "snmp-server" in line: snmp = SNMP(chunk) elif "router ospf" in line: ospf = OSPF(chunk, debug=0) commands = ospf.generate_junos() elif "router bgp" in line: bgp = BGP(chunk) commands = bgp.generate_junos() return commands
def LAG_train_unit(x_1, x_2, x_3, Y): input_list = [x_1, x_2, x_3] the_list = [] lag_list = [] for i in range(3): print("Currently doing LAG for hop {}".format(i+1)) lag = LAG(encode='distance', num_clusters=[2,2,2,2,2,2,2,2,2,2], alpha=10, learner=myLLSR(onehot=False)) lag.fit(input_list[i], Y) X_train_trans = lag.transform(input_list[i]) the_list.append(X_train_trans) lag_list.append(lag) return the_list, lag_list
def lag_module(params, use_filters): all_train_labels = load_data( os.path.join(params.save_data, "all_train_labels")) for ratio in params.ratios: time_start = time.time() if use_filters is True: filters = load_data( os.path.join(params.save_data, "filters_" + str(ratio))) subset_label = all_train_labels[:int(all_train_labels.shape[0] * ratio)].copy() for i in range(2, params.num_layers): ## ATTENTION data = [] for j in range(0, 50000, 5000): data.append( load_data( os.path.join(params.save_data, 'out_{}_{}'.format(j, i)))) data = np.concatenate(data, axis=0) subset_data = data[:int(data.shape[0] * ratio)].copy() del data subset_data = np.reshape(subset_data, newshape=(subset_data.shape[0], -1)) print("subset data shape:", subset_data.shape) if use_filters is True: print("Number of selected channels:", np.sum(filters[i])) subset_data = subset_data[:, filters[i]] lag = LAG( num_clusters=[10, 10, 10, 10, 10, 10, 10, 10, 10, 10], alpha=10, learner=myLLSR(onehot=False)) lag.fit(subset_data, subset_label) subset_predprob = lag.predict_proba(subset_data) save_data( lag, os.path.join(params.save_data, 'LAG_{}_{}'.format(i, ratio))) save_data( subset_predprob, os.path.join(params.save_data, 'lag_predict_{}_{}'.format(i, ratio))) print("RATIO=", ratio, " LAYER=", i, " DONE") print("Time cost - LAG:", time.time() - time_start, "ratio=", ratio) save_data( subset_label, os.path.join(params.save_data, 'lag_labels_{}_{}'.format(i, ratio)))
def lag_oper(feature_ce, test_ce, train_label): my_LAG = LAG(encode='distance', num_clusters=[5, 5, 5, 5, 5, 5, 5, 5, 5, 5], alpha=10, learner=myLLSR(onehot=False)) my_LAG.fit(feature_ce, train_label) X_train_trans = my_LAG.transform(feature_ce) X_test_trans = my_LAG.transform(test_ce) # X_train_pred_prob = my_LAG.predict_proba(feature_ce) # print(" --> train acc: %s" % str(my_LAG.score(feature_ce, train_label))) return X_train_trans, X_test_trans
def get_ops(dataset): ckpt = LAG.load(FLAGS.ckpt).eval_mode(dataset=dataset) def eval(batch): v = ckpt.sess.run(ckpt.ops.eval_op, feed_dict={ckpt.ops.x: batch}) return v def sres(batch, noise): v = ckpt.sess.run(ckpt.ops.sres_op, feed_dict={ ckpt.ops.y: batch, ckpt.ops.noise: noise }) return v def lores(batch): return ckpt.sess.run(ckpt.ops.downscale_op, feed_dict={ckpt.ops.x: batch}) def hires(batch): return ckpt.sess.run(ckpt.ops.upscale_op, feed_dict={ckpt.ops.y: batch}) return EasyDict(eval=eval, sres=sres, lores=lores, hires=hires)
f = open('feature_set_avg_max_single.pkl', 'wb') pickle.dump(feature_set, f) f.close() print() print('feature_set saved!') print() # # f = open('feature_set.pkl', 'rb') # # feature_set = pickle.load(f) # # f.close() lag_rep = [ LAG(encode='distance', num_clusters=[7] * 10, alpha=10, learner=LLSR(onehot=False)), LAG(encode='distance', num_clusters=[6] * 10, alpha=10, learner=LLSR(onehot=False)), LAG(encode='distance', num_clusters=[5] * 10, alpha=10, learner=LLSR(onehot=False)), # LAG(encode='distance', num_clusters=[7] * 10, alpha=10, learner=LLSR(onehot=False)), # LAG(encode='distance', num_clusters=[6] * 10, alpha=10, learner=LLSR(onehot=False)), # LAG(encode='distance', num_clusters=[5] * 10, alpha=10, learner=LLSR(onehot=False)), ] x_train_trans = []
#Feature Selection timerObj.tic() #selectedFeaturesIndeces_unit1_train , reducedFeatureImgs_unit1_train, numOfNs_unit1_train = featureSelector(pooled_module1Out1_train,subYTrain4) selectedFeaturesIndeces_unit1_train, reducedFeatureImgs_unit1_train, numOfNs_unit1_train = featureSelector_binMethod( pooled_module1Out1_train, subYTrain4) reducedFeatureImgs_unit1_test = featureSelectorWithoutCrossEnt( pooled_module1Out1_test, selectedFeaturesIndeces_unit1_train, numOfNs_unit1_train) print("feature selection is done for train and test::" ) #Elapsed time is 99.167789 seconds. timerObj.toc() #LAG Unit lag_unit1 = LAG(encode='distance', num_clusters=[7, 7, 7, 7, 7, 7, 7, 7, 7, 7], alpha=10, learner=myLLSR(onehot=False)) lag_unit1.fit(reducedFeatureImgs_unit1_train, subYTrain4) timerObj.tic() finalFeature_unit1_train = lag_unit1.transform(reducedFeatureImgs_unit1_train) print("LAG is done for train set:") #Elapsed time is 0.321958 seconds. timerObj.toc() timerObj.tic() finalFeature_unit1_test = lag_unit1.transform(reducedFeatureImgs_unit1_test) print("LAG is done for test set:") #Elapsed time is 0.173667 seconds. timerObj.toc() # In[ ]: ##2nd unit(depth 2)
output[i] = skimage.measure.block_reduce(output[i], (1, 2, 2, 1), np.max) # reshaping for i in range(0, 3): output[i] = output[i].reshape( 10, output[i].shape[1] * output[i].shape[2] * output[i].shape[3]) # ------------------ cross entropy ------------------ # ce = Cross_Entropy(num_class=10, num_bin=5) entropy = np.zeros(output[2].shape[1]) rank = [] for j in range(0, output[2].shape[1]): entropy[j] = ce.KMeans_Cross_Entropy(output[2][:, j].reshape(-1, 1), y_train[0:10]) rank = np.argsort(-entropy) output[2] = output[2][:, rank[0:15]] # ------------------ LAG ------------------ # lag = LAG(encode='distance', num_clusters=[5, 5, 5, 5, 5, 5, 5, 5, 5, 5], alpha=10, learner=myLLSR(onehot=False)) # ------------------ Module 3 ------------------# print('Time - %2d sec' % (time.time() - start_time)) print("------- TRAINING FINISHED -------\n")
def LAG_Fit(slctd_Features_Train, trainLabel): num1 = 10 num2 = 10 num3 = 10 num4 = 10 num5 = 10 numCluster1 = [num1, num1, num1, num1, num1, num1, num1, num1, num1, num1] numCluster2 = [num2, num2, num2, num2, num2, num2, num2, num2, num2, num2] numCluster3 = [num3, num3, num3, num3, num3, num3, num3, num3, num3, num3] numCluster4 = [num4, num4, num4, num4, num4, num4, num4, num4, num4, num4] numCluster5 = [num5, num5, num5, num5, num5, num5, num5, num5, num5, num5] lag1 = LAG(encode='distance', num_clusters=numCluster1, alpha=10, learner=myLLSR(onehot=False)) lag2 = LAG(encode='distance', num_clusters=numCluster2, alpha=10, learner=myLLSR(onehot=False)) lag3 = LAG(encode='distance', num_clusters=numCluster3, alpha=10, learner=myLLSR(onehot=False)) lag4 = LAG(encode='distance', num_clusters=numCluster4, alpha=10, learner=myLLSR(onehot=False)) lag5 = LAG(encode='distance', num_clusters=numCluster5, alpha=10, learner=myLLSR(onehot=False)) #Extract Training Features slctd_Features_Train1 = slctd_Features_Train[0] slctd_Features_Train2 = slctd_Features_Train[1] slctd_Features_Train3 = slctd_Features_Train[2] slctd_Features_Train4 = slctd_Features_Train[3] slctd_Features_Train5 = slctd_Features_Train[4] #Fit the three LAG units lag1.fit(slctd_Features_Train1, trainLabel) lag2.fit(slctd_Features_Train2, trainLabel) lag3.fit(slctd_Features_Train3, trainLabel) lag4.fit(slctd_Features_Train4, trainLabel) lag5.fit(slctd_Features_Train5, trainLabel) #Get the Transformed Training Features features_Train_Trans1 = lag1.transform(slctd_Features_Train1) features_Train_Trans2 = lag2.transform(slctd_Features_Train2) features_Train_Trans3 = lag3.transform(slctd_Features_Train3) features_Train_Trans4 = lag4.transform(slctd_Features_Train4) features_Train_Trans5 = lag5.transform(slctd_Features_Train5) #Cascade all the LAG features features_Train_LAG = np.concatenate( (features_Train_Trans1, features_Train_Trans2, features_Train_Trans3, features_Train_Trans4, features_Train_Trans5), axis=1) return features_Train_LAG, lag1, lag2, lag3, lag4, lag5
def LAG_Transform(slctd_Features_Train, slctd_Features_Test, trainNum, testNum): lag1 = LAG(encode='distance', num_clusters=[12,12,12,12,12,12,12,12,12,12], alpha=10, learner=myLLSR(onehot=False)) lag2 = LAG(encode='distance', num_clusters=[12,12,12,12,12,12,12,12,12,12], alpha=10, learner=myLLSR(onehot=False)) lag3 = LAG(encode='distance', num_clusters=[12,12,12,12,12,12,12,12,12,12], alpha=10, learner=myLLSR(onehot=False)) #lag1 = LAG(encode='distance', num_clusters=[5,5,5,5,5,5,5,5,5,5], alpha=10, learner=myLLSR(onehot=False)) #lag2 = LAG(encode='distance', num_clusters=[5,5,5,5,5,5,5,5,5,5], alpha=10, learner=myLLSR(onehot=False)) #lag3 = LAG(encode='distance', num_clusters=[5,5,5,5,5,5,5,5,5,5], alpha=10, learner=myLLSR(onehot=False)) #Extract Training Features slctd_Features_Train1 = slctd_Features_Train[0].reshape(trainNum, -1) slctd_Features_Train2 = slctd_Features_Train[1].reshape(trainNum, -1) slctd_Features_Train3 = slctd_Features_Train[2].reshape(trainNum, -1) #Extract Testing Features slctd_Features_Test1 = slctd_Features_Test[0].reshape(testNum, -1) slctd_Features_Test2 = slctd_Features_Test[1].reshape(testNum, -1) slctd_Features_Test3 = slctd_Features_Test[2].reshape(testNum, -1) #Fit the three LAG units lag1.fit(slctd_Features_Train1, trainLabel) lag2.fit(slctd_Features_Train2, trainLabel) lag3.fit(slctd_Features_Train3, trainLabel) #Get the Transformed Training Features features_Train_Trans1 = lag1.transform(slctd_Features_Train1) features_Train_Trans2 = lag2.transform(slctd_Features_Train2) features_Train_Trans3 = lag3.transform(slctd_Features_Train3) #Get the Transformed Testing Features features_Test_Trans1 = lag1.transform(slctd_Features_Test1) features_Test_Trans2 = lag2.transform(slctd_Features_Test2) features_Test_Trans3 = lag3.transform(slctd_Features_Test3) #Cascade all the LAG features features_Test_LAG = np.concatenate((features_Test_Trans1,features_Test_Trans2,features_Test_Trans3), axis=1) features_Train_LAG = np.concatenate((features_Train_Trans1,features_Train_Trans2,features_Train_Trans3), axis=1) return features_Test_LAG, features_Train_LAG
fd0_ns = fd0_sorted[0:round(len(fd0_sorted)/2)] fd1_ns = fd1_sorted[0:round(len(fd1_sorted)/2)] fd2_ns = fd2_sorted[0:round(len(fd2_sorted)/2)] f_number0 = [i[0] for i in fd0_ns] f_number1 = [i[0] for i in fd1_ns] f_number2 = [i[0] for i in fd2_ns] from lag import LAG from llsr import LLSR as myLLSR lag = LAG(encode='distance', num_clusters=[5,5,5,5,5,5,5,5,5,5], alpha=10, learner=myLLSR(onehot=False)) lag.fit(X_reshape0[:,:,f_number0].reshape(X_reshape0.shape[0],-1), y_train[0:nti]) lag1 = LAG(encode='distance', num_clusters=[5,5,5,5,5,5,5,5,5,5], alpha=10, learner=myLLSR(onehot=False)) lag1.fit(X_reshape1[:,:,f_number1].reshape(X_reshape1.shape[0],-1), y_train[0:nti]) lag2 = LAG(encode='distance', num_clusters=[5,5,5,5,5,5,5,5,5,5], alpha=10, learner=myLLSR(onehot=False)) lag2.fit(X_reshape2[:,:,f_number2].reshape(X_reshape2.shape[0],-1), y_train[0:nti]) end = time.time() print('Module 2:',(end-start)) """Module 3"""
# 1st LAG Layer from sklearn.model_selection import train_test_split from llsr import LLSR as myLLSR from lag import LAG idx1 = np.argsort(output1Feature) slice1 = idx1[:4018] output_1 = output_1.reshape(len(output_1), -1) t1 = (np.transpose(output_1)) in_lag1 = t1[slice1] in_lag1 = in_lag1.reshape(len(in_lag1), -1) in_lag_1 = np.transpose(in_lag1) print(" input feature shape before reshape: %s" % str(in_lag_1.shape)) print(" > This is a test example: ") lag1 = LAG(encode='distance', num_clusters=[5, 5, 5, 5, 5, 5, 5, 5, 5, 5], alpha=10, learner=myLLSR(onehot=False)) lag1.fit(in_lag_1, label_train) X_train_trans1 = lag1.transform(in_lag_1) X_train_predprob1 = lag1.predict_proba(in_lag_1) print(X_train_trans1.shape) print("------- DONE -------\n") #2nd LAG Layer from sklearn.model_selection import train_test_split from llsr import LLSR as myLLSR from lag import LAG idx2 = np.argsort(output2Feature) slice2 = idx2[:2763] output_2 = output_2.reshape(len(output_2), -1) t2 = (np.transpose(output_2))
def LAG_Fit(slctd_Features_Train, trainLabel): #lag1 = LAG(encode='distance', num_clusters=[12,12,12,12,12,12,12,12,12,12], alpha=10, learner=myLLSR(onehot=False)) #lag2 = LAG(encode='distance', num_clusters=[12,12,12,12,12,12,12,12,12,12], alpha=10, learner=myLLSR(onehot=False)) #lag3 = LAG(encode='distance', num_clusters=[12,12,12,12,12,12,12,12,12,12], alpha=10, learner=myLLSR(onehot=False)) lag1 = LAG(encode='distance', num_clusters=[5, 5, 5, 5, 5, 5, 5, 5, 5, 5], alpha=10, learner=myLLSR(onehot=False)) lag2 = LAG(encode='distance', num_clusters=[5, 5, 5, 5, 5, 5, 5, 5, 5, 5], alpha=10, learner=myLLSR(onehot=False)) lag3 = LAG(encode='distance', num_clusters=[5, 5, 5, 5, 5, 5, 5, 5, 5, 5], alpha=10, learner=myLLSR(onehot=False)) #Extract Training Features slctd_Features_Train1 = slctd_Features_Train[0] slctd_Features_Train2 = slctd_Features_Train[1] slctd_Features_Train3 = slctd_Features_Train[2] #Fit the three LAG units lag1.fit(slctd_Features_Train1, trainLabel) lag2.fit(slctd_Features_Train2, trainLabel) lag3.fit(slctd_Features_Train3, trainLabel) #Get the Transformed Training Features features_Train_Trans1 = lag1.transform(slctd_Features_Train1) features_Train_Trans2 = lag2.transform(slctd_Features_Train2) features_Train_Trans3 = lag3.transform(slctd_Features_Train3) #Cascade all the LAG features features_Train_LAG = np.concatenate( (features_Train_Trans1, features_Train_Trans2, features_Train_Trans3), axis=1) return features_Train_LAG, lag1, lag2, lag3
entropy = np.zeros(output[2].shape[1]) rank_3 = [] for j in range(0, output[2].shape[1]): entropy[j] = ce.compute(output[2][:, j].reshape(-1, 1), y_train[0:train_num]) rank_3 = np.argsort(-entropy) output[2] = output[2][:, rank_3[0:Ns_3]] # ------------------ LAG ------------------ # print('NoC - %d' % NoC) lag_1 = LAG( encode='distance', num_clusters=[NoC, NoC, NoC, NoC, NoC, NoC, NoC, NoC, NoC, NoC], alpha=10, learner=myLLSR(onehot=False)) lag_2 = LAG( encode='distance', num_clusters=[NoC, NoC, NoC, NoC, NoC, NoC, NoC, NoC, NoC, NoC], alpha=10, learner=myLLSR(onehot=False)) lag_3 = LAG( encode='distance', num_clusters=[NoC, NoC, NoC, NoC, NoC, NoC, NoC, NoC, NoC, NoC], alpha=10, learner=myLLSR(onehot=False)) lag_1.fit(output[0], y_train[0:train_num]) output[0] = lag_1.transform(output[0])