def predict(self, num): src_jpg_path = CSVReader.get_path_list2('SUNRGBDMeta_reduced.csv',4) src_dep_path = CSVReader.get_path_list2('SUNRGBDMeta_reduced.csv',3) src_label_path = CSVReader.get_path_list2('SUNRGBDMeta_reduced.csv',9) jpg_path = src_jpg_path[2*num] dep_path = src_dep_path[2*num] label_path = src_label_path[2*num] print "making test data" print "inputting",jpg_path jpg_img = cv2.imread(jpg_path) label_img = cv2.imread(label_path,0) size = jpg_img.shape data = ImgToVec.get_vec_list( jpg_img, label_img, self.cut_size) jpg_data = data.vec_list print "test data Complete" output = self.forest.predict( jpg_data ) self.output_file(output , size , num) return output
def create_forest(self): start_time= time.time() forest = RandomForestClassifier(n_estimators = 10, class_weight = 'balanced') sp_vec_path_list = CSVReader.get_path_list2("./output/csvpath/spdata_path_m"+self.m+".csv",0) sp_neighbors_list = CSVReader.get_path_list2("./output/csvpath/spdata_path_m"+self.m+".csv",2) if (len(sp_vec_path_list)<self.end_num): print "end_num is larger than data length" return -1 trainingdata = [] traininglabel = [] print "making training data MLF : Layer",self.layer_num for i in range(self.start_num ,self.end_num): print "inputting",i,sp_vec_path_list[i] #ある画像に対するスーパーピクセルのデータ集合 data = CSVReader.read_csv_as_float(sp_vec_path_list[i]) #ある画像に対するスーパーピクセルの近傍データ neighbors = CSVReader.read_csv_as_int(sp_neighbors_list[i]) probs = self.load_probs(sp_vec_path_list, sp_neighbors_list, self.layer_num, i) label_col = len(data[0])-1 for j in range(len(data)): vector = [] vector.extend( probs[j] ) vector.extend( probs[neighbors[j][0]] ) vector.extend( probs[neighbors[j][1]] ) vector.extend( probs[neighbors[j][2]] ) vector.extend( probs[neighbors[j][3]] ) #print "training vector",len(vector) trainingdata += [ vector ] traininglabel+= [ data[j][label_col] ] print "training data Complete" #別のランダムフォレストとclassが一致しないとエラーを吐くので、 #0埋めした38ラベル分のtrainingdata , traininglabelセットを追加する for i in range(38): trainingdata += [ np.zeros_like(trainingdata[0])] traininglabel+= [i] forest.fit(trainingdata , traininglabel ) # Save print "save to", self.forest_path if not os.path.exists(self.forest_path): os.makedirs(self.forest_path) joblib.dump(forest, self.forest_path+'/forest.bin') print 'MultiLayerForest Layer',self.layer_num,'Complete', time.time() - start_time return forest
def create_forest(self): start_time = time.time() forest = RandomForestRegressor(n_estimators = 10) sp_vec_path_list = CSVReader.get_path_list2("./output/csvpath/spdata_path_m"+self.m+".csv",0) sp_neighbors_list = CSVReader.get_path_list2("./output/csvpath/spdata_path_m"+self.m+".csv",2) if (len(sp_vec_path_list)<self.end_num): print "end_num is larger than data length" return -1 trainingdata = [] traininglabel = [] print "ILRF making training data Layer",self.layer,'Label',self.label for i in range(self.start_num ,self.end_num): #print "inputting",i,sp_vec_path_list[i] data = CSVReader.read_csv_as_float(sp_vec_path_list[i]) neighbors = CSVReader.read_csv_as_int(sp_neighbors_list[i]) probs = self.load_probs(data, i ) #print len(data),len(probs) label_col = len(data[0])-1 for j in range(len(data)): vector = [] #print j,neighbors[j] vector.extend( probs[j] ) vector.extend( probs[neighbors[j][0]] ) vector.extend( probs[neighbors[j][1]] ) vector.extend( probs[neighbors[j][2]] ) vector.extend( probs[neighbors[j][3]] ) #print "training vector",len(vector) trainingdata += [ vector ] if data[j][label_col] == self.label: traininglabel +=[1] else: traininglabel +=[0] print "training data Complete" forest.fit(trainingdata , traininglabel ) # Save print self.layer,"save to", self.forest_path if not os.path.exists(self.forest_path): os.makedirs(self.forest_path) joblib.dump(forest, self.forest_path+'/forest.bin') print 'InterLabelRondomForest layer',self.layer,'Label', self.label,'Complete' ,time.time() - start_time return forest
def predict(self, num): src_jpg_path = CSVReader.get_path_list2('SUNRGBDMeta_reduced.csv',4) src_dep_path = CSVReader.get_path_list2('SUNRGBDMeta_reduced.csv',3) src_label_path = CSVReader.get_path_list2('SUNRGBDMeta_reduced.csv',9) jpg_path = src_jpg_path[2*num] dep_path = src_dep_path[2*num] label_path = src_label_path[2*num] if os.path.exists(jpg_path+"_m"+self.m+"spdata.csv"): print "exist",jpg_path else: cmd = u'"slic\SLICOSuperpixel.exe"' os.system(cmd+" "+ self.m + " "+jpg_path+" "+dep_path+" "+label_path+" "+ self.s_weight); vec_data_path = jpg_path+"_m"+self.m+"spdata.csv" sp_map_path = jpg_path+"_m"+self.m+"spmap.csv" sp_neighbors_path = jpg_path+"_m"+self.m+"neighbors.csv" data = CSVReader.read_csv_as_float(vec_data_path) sp_map = CSVReader.read_csv_as_int(sp_map_path) neighbors = CSVReader.read_csv_as_int(sp_neighbors_path) test_data = [] probs = self.load_probs_for_predict(data, neighbors, self.layer_num , num) label_col = len(data[0])-1 for j in range(len(data)): vector = [] vector.extend( probs[j] ) vector.extend( probs[neighbors[j][0]] ) vector.extend( probs[neighbors[j][1]] ) vector.extend( probs[neighbors[j][2]] ) vector.extend( probs[neighbors[j][3]] ) test_data += [ vector ] #print len(vector) #if len(vector) != 190: # raw_input(">>") print "test data Complete" #print len(test_data), len(test_data[0]) #self.show_detail() output = self.forest.predict( test_data ) self.output_file(output, sp_map, num) img_a = cv2.imread(label_path,0) img_b = cv2.imread('output_MLF'+str(self.start_num)+'-'+str(self.end_num-1)+'_layer'+str(self.layer_num)+'data'+str(num)+'.png',0) return [jpg_path, label_path ,Assessment.AssessmentByMIOU(img_a,img_b)]
def predict(self, num): src_jpg_path = CSVReader.get_path_list2('SUNRGBDMeta_reduced.csv',4) src_dep_path = CSVReader.get_path_list2('SUNRGBDMeta_reduced.csv',3) src_label_path = CSVReader.get_path_list2('SUNRGBDMeta_reduced.csv',9) jpg_path = src_jpg_path[2*num] dep_path = src_dep_path[2*num] label_path = src_label_path[2*num] if os.path.exists(jpg_path+"_m"+self.m+"spdata.csv"): print "exist",jpg_path else: cmd = u'"slic\SLICOSuperpixel.exe"' os.system(cmd+" "+ self.m + " "+jpg_path+" "+dep_path+" "+label_path+" "+ self.s_weight); vec_data_path = jpg_path+"_m"+self.m+"spdata.csv" sp_map_path = jpg_path+"_m"+self.m+"spmap.csv" sp_neighbors_path = jpg_path+"_m"+self.m+"neighbors.csv" data = CSVReader.read_csv_as_float(vec_data_path) sp_map = CSVReader.read_csv_as_int(sp_map_path) neighbors = CSVReader.read_csv_as_int(sp_neighbors_path) test_data = [] probs = self.load_probs(data) label_col = len(data[0])-1 for j in range(len(data)): vector = [] vector.extend( probs[j] ) vector.extend( probs[neighbors[j][0]] ) vector.extend( probs[neighbors[j][1]] ) vector.extend( probs[neighbors[j][2]] ) vector.extend( probs[neighbors[j][3]] ) test_data += [ vector ] os.remove('./output/probs.csv') print "test data Complete" output = self.forest.predict( test_data ) self.output_file(output, sp_map, num) img_a = cv2.imread(label_path,0) img_b = cv2.imread('output_CLRFdata'+str(num)+'.png',0) return [label_path ,Assessment.AssessmentByMIOU(img_a,img_b)]
def create_forest(self): forest = RandomForestClassifier() for i in range(38): self.lrforests += [ LRF(self.start_num, self.end_num-1, i) ] sp_vec_path_list = CSVReader.get_path_list2("./output/csvpath/spdata_path_m"+self.m+".csv",0) sp_neighbors_list = CSVReader.get_path_list2("./output/csvpath/spdata_path_m"+self.m+".csv",2) if (len(sp_vec_path_list)<self.end_num): print "end_num is larger than data length" return -1 trainingdata = [] traininglabel = [] print "making training data" for i in range(self.end_num - self.start_num): print "inputting",i,sp_vec_path_list[i] data = CSVReader.read_csv_as_float(sp_vec_path_list[i]) neighbors = CSVReader.read_csv_as_int(sp_neighbors_list[i]) probs = self.load_probs(data) label_col = len(data[0])-1 for j in range(len(data)): vector = [] vector.extend( probs[j] ) vector.extend( probs[neighbors[j][0]] ) vector.extend( probs[neighbors[j][1]] ) vector.extend( probs[neighbors[j][2]] ) vector.extend( probs[neighbors[j][3]] ) #print "training vector",len(vector) trainingdata += [ vector ] traininglabel+= [ data[j][label_col] ] os.remove('./output/probs.csv') print "training data Complete" forest.fit(trainingdata , traininglabel ) # Save print "save to", self.forest_path if not os.path.exists(self.forest_path): os.makedirs(self.forest_path) joblib.dump(forest, self.forest_path+'/forest.bin') return forest
def predict(self, num): src_jpg_path = CSVReader.get_path_list2('SUNRGBDMeta_reduced.csv',4) src_dep_path = CSVReader.get_path_list2('SUNRGBDMeta_reduced.csv',3) src_label_path = CSVReader.get_path_list2('SUNRGBDMeta_reduced.csv',9) jpg_path = src_jpg_path[2*num] dep_path = src_dep_path[2*num] label_path = src_label_path[2*num] if os.path.exists(jpg_path+"_m"+self.m+"spdata.csv"): print "exist",jpg_path else: cmd = u'"slic\SLICOSuperpixel.exe"' os.system(cmd+" "+ self.m + " "+jpg_path+" "+dep_path+" "+label_path+" "+ self.s_weight); vec_data_path = jpg_path+"_m"+self.m+"spdata.csv" sp_map_path = jpg_path+"_m"+self.m+"spmap.csv" data = CSVReader.read_csv_as_float(vec_data_path) sp_map = CSVReader.read_csv_as_int(sp_map_path) test_data = [] label_col = len(data[0])-1 print "making test data" print "inputting",vec_data_path data = CSVReader.read_csv_as_float(vec_data_path) for line in data: prob = [] for forest in self.lrforests: prob.extend( forest.get_prob (line[0:label_col]) ) test_data += [ prob ] print "test data Complete" output = self.forest.predict( test_data ) self.output_file(output, sp_map, num) img_a = cv2.imread(label_path,0) img_b = cv2.imread('output_MLRFdata'+str(num)+'.png',0) return Assessment.AssessmentByMIOU(img_a,img_b)
def predict(self, num): src_jpg_path = CSVReader.get_path_list2('SUNRGBDMeta_reduced.csv',4) src_dep_path = CSVReader.get_path_list2('SUNRGBDMeta_reduced.csv',3) src_label_path = CSVReader.get_path_list2('SUNRGBDMeta_reduced.csv',9) jpg_path = src_jpg_path[2*num] dep_path = src_dep_path[2*num] label_path = src_label_path[2*num] print "making test data" print "inputting",jpg_path print "inputting",label_path if os.path.exists(jpg_path+"_m"+self.m+"spdata.csv"): print "exist",jpg_path else: cmd = u'"slic\SLICOSuperpixel.exe"' os.system(cmd+" "+self.m+" "+jpg_path+" "+dep_path+" "+label_path+" "+ self.s_weight); vec_data_path = jpg_path+"_m"+self.m+"spdata.csv" sp_map_path = jpg_path+"_m"+self.m+"spmap.csv" data = CSVReader.read_csv_as_float(vec_data_path) sp_map = CSVReader.read_csv_as_int(sp_map_path) test_data = [] print "making test data" #print "inputting",jpg_path label_col = len(data[0])-1 for line in data: test_data += [ line[0:label_col] ] print "test data Complete" output = self.forest.predict( test_data ) #self.output_file(output, num) return output
def create_forest(self): print "save to", self.forest_path trainingdata = [] traininglabel = [] src_jpg_path = CSVReader.get_path_list2('SUNRGBDMeta_reduced.csv',4) src_dep_path = CSVReader.get_path_list2('SUNRGBDMeta_reduced.csv',3) src_label_path = CSVReader.get_path_list2('SUNRGBDMeta_reduced.csv',9) jpg_path = [] dep_path = [] label_path = [] for i in range(self.start_num, self.end_num): jpg_path.append(src_jpg_path[2*i-1]) dep_path.append(src_dep_path[2*i-1]) label_path.append(src_label_path[2*i-1]) print "making training data" for i in range(self.end_num - self.start_num): print "inputting",i,jpg_path[i] jpg_img = cv2.imread(jpg_path[i]) label_img = cv2.imread(label_path[i],0) depth_img = cv2.imread(dep_path[i],0) data = ImgToVec.GetRGBDVecList( jpg_img, depth_img, label_img, self.cut_size) trainingdata += data.vec_list traininglabel += data.label_list print "training data Complete" self.forest.fit(trainingdata , traininglabel ) # Save print "save to", self.forest_path if not os.path.exists(self.forest_path): os.makedirs(self.forest_path) joblib.dump(self.forest, self.forest_path+'/forest.bin') return self.forest
def create_forest(self): forest = RandomForestClassifier() for i in range(38): self.lrforests += [ LRF(self.start_num, self.end_num-1, i) ] sp_vec_path_list = CSVReader.get_path_list2("./output/csvpath/spdata_path_m"+self.m+".csv",0) sp_neighbors_list = CSVReader.get_path_list2("./output/csvpath/spdata_path_m"+self.m+".csv",2) if (len(sp_vec_path_list)<self.end_num): print "end_num is larger than data length" return -1 trainingdata = [] traininglabel = [] print "making training data" for i in range(self.end_num - self.start_num): print "inputting",i,sp_vec_path_list[i] data = CSVReader.read_csv_as_float(sp_vec_path_list[i]) label_col = len(data[0])-1 for line in data: prob = [] for lrf in self.lrforests: prob.extend( lrf.get_prob (line[0:label_col]) ) #print "prob",prob trainingdata += [ prob ] traininglabel+= [ line[label_col] ] print "training data Complete" forest.fit(trainingdata , traininglabel ) # Save print "save to", self.forest_path if not os.path.exists(self.forest_path): os.makedirs(self.forest_path) joblib.dump(forest, self.forest_path+'/forest.bin') return forest
def data_statistics(self): if not os.path.exists('./output/statistics'): os.makedirs('./output/statistics') sp_vec_path_list = CSVReader.get_path_list2("./output/csvpath/spdata_path_m1000.csv",0) for i in range(5000): f = open('./output/statistics/data'+str(i)+'.csv', 'w') writer = csv.writer(f, lineterminator='\n') output = [0 for x in range(38)] data = CSVReader.read_csv_as_float(sp_vec_path_list[i]) label_col = len(data[0])-1 for line in data: output[int(line[label_col])] += 1 print sp_vec_path_list[i] for j in range( len(output) ): print j,output[j] writer.writerow([j,output[j]]) f.close()
def create_forest(self): start_time= time.time() forest = RandomForestRegressor(n_estimators = 10) #sp_vec_path_list = CSVReader.get_path_list2("./output/csvpath/spdata_path_m"+self.m+"cs"+self.cs+"w"+self.s_weight+".csv",0) sp_vec_path_list = CSVReader.get_path_list2("./output/csvpath/spdata_path_m"+self.m+".csv",0) if (len(sp_vec_path_list)<self.end_num): print "end_num is larger than data length" return -1 trainingdata = [] traininglabel = [] count = 0 n_count = 0 print "making training data" for i in range(self.start_num ,self.end_num): #print i,"inputting",i,sp_vec_path_list[i] data = CSVReader.read_csv_as_float(sp_vec_path_list[i]) #self.data_deteil(sp_vec_path_list[i],i) label_col = len(data[0])-1 for line in data: trainingdata += [ line[0:label_col] ] if line[label_col] == self.label: traininglabel +=[1] else: traininglabel +=[0] print "training data Complete" forest.fit(trainingdata , traininglabel ) # Save print "save to", self.forest_path if not os.path.exists(self.forest_path): os.makedirs(self.forest_path) joblib.dump(forest, self.forest_path+'/forest.bin') print 'LabelRandomForest',self.label,'Complete',time.time() - start_time return forest
def create_forest(self): print "save to", self.forest_path trainingdata = [] traininglabel = [] sp_vec_path_list = CSVReader.get_path_list2("./output/csvpath/spdata_path_m"+self.m+"cs"+self.cs+"w"+self.s_weight+".csv",0) if (len(sp_vec_path_list)<self.end_num): print "end_num is larger than data length" return -1 print "making training data" for i in range(self.end_num - self.start_num): print "inputting",i,sp_vec_path_list[i] data = CSVReader.read_csv_as_float(sp_vec_path_list[i]) label_col = len(data[0]) -1 for line in data: #print line[0:256/int(self.cs)*4] trainingdata += [ line[0:label_col] ] #print int(line[label_col]) traininglabel+= [int(line[label_col])] print "training data Complete" #print traininglabel self.forest.fit(trainingdata , traininglabel ) # Save print "save to", self.forest_path if not os.path.exists(self.forest_path): os.makedirs(self.forest_path) joblib.dump(self.forest, self.forest_path+'/forest.bin') return self.forest