def predict(self,data_set): """ This method uses the model to assign a label to each item in the data set. The return value is a Prediction object. """ test_file_name=self._test_file_name+'_'+str(LibSVMModel._file_suffix) model_filename=self._model_file output_filename=self._output_filename predictor_package_path=self._predictor_package_path item_order=self.create_validation_set_file_from_dataset(data_set,test_file_name) args=[] args.append(predictor_package_path) args.append(test_file_name) args.append(model_filename) args.append(output_filename) #logging.debug( 'args for predict' #logging.debug( args fnull=open(os.devnull,'w') #p=subprocess.Popen(args,stdout=subprocess.PIPE) p=subprocess.call(args, stdout=fnull) fnull.close() ''' for line in p.stdout: #logging.debug( line if(line.startswith('Accuracy')): accuracy=float(line.split()[2].split('%')[0])/100.0 logging.debug( accuracy break ''' # predicted_values=Prediction(data_set) #logging.debug( 'Count '+str(predicted_values.predicted_count()) #logging.debug( output_filename fin=open(output_filename,'r') i=0 for line in fin: #logging.debug( 'Output file :'+line item=item_order[i] predicted_values.set_est_label(item,line.split()[0]) logging.debug( 'setting label for '+item) i+=1 fin.close() #LibSVMModel._file_suffix+=1 return(predicted_values)
def myAPI(url_path): # 2- download the image path_of_image = downloadImage(url_path) if path_of_image.startswith("Non"): return render_template('error.html') # 3- get the path of the image .... DONE IN STEP #2 # 4- form the proper main method for core_classification result = mainWeb(path_of_image) # 5- display the output # 5'- slice the result into useful parts parts = result.split('\n') # 6- extract info from the parts to form the FinalResult object and jsonify it predictions = [] for part in parts: part = part.strip() if part.startswith("Prediction") or part == "": continue myParts = part.split(' ') print myParts percentage = myParts[0] #percentage value = myParts[2] #value prediction prediction = Prediction(percentage, value) predictions.append(prediction) finalResult = FinalResult(url_path, predictions) return jsonify(Result=finalResult.serialize)
def classifyImage(): if request.method == 'POST': # 1- get the url if request.form['url']: url_path = request.form['url'] # 2- download the image path_of_image = downloadImage(url_path) if path_of_image.startswith("Non"): return render_template('error.html') # 3- get the path of the image .... DONE IN STEP #2 # 4- form the proper main method for core_classification result = mainWeb(path_of_image) # 5- display the output # 5'- slice the result into useful parts # list of percentages predictions = [] parts = result.split('\n') for part in parts: part = part.strip() if part.startswith("Prediction") or part == "": continue myParts = part.split(' ') percentage = myParts[0] # percentage value = myParts[2] # value prediction prediction = Prediction(percentage, value) predictions.append(prediction) path_parts = path_of_image.split('/') betterPath = path_parts[5] + "/" + path_parts[ 6] + "/" + path_parts[7] """ RETRIEVAL CODE GOES HERE """ script_start_time = time.time() features_array = async_result.get() print 'async_result took %f ' % (time.time() - script_start_time, ) list_of_paths = findSimilar([path_of_image], features_array) print list_of_paths # parse the location from disk to a location in server server_similar_images_parsed = parseImageSimilarPath(list_of_paths) # pass list_of_paths which represents the similar images to the template #return render_template('result.html', result=predictions, path_of_image=betterPath) return render_template('result.html', result=predictions, path_of_image=betterPath, similar=server_similar_images_parsed) else: return "REJECTED"
def predict(self,data_set): """ This method uses the model to assign a label to each item in the data set. The return value is a Prediction object. """ predicted_values=Prediction(data_set) gbm_package=self._gbm_package gbm_object=self._gbm_model_object (features,labels,items)=self.create_validation_set(data_set) predicted=gbm_package.predict_gbm(gbm_object,features,n_trees=self._n_trees,type='response',verbose=False) item_count=0 for item in items: predicted_values.set_est_label(item,self.convert_label(predicted[item_count])) item_count+=1 return(predicted_values)
def predict(self, data_set): """ This method uses the model to assign a label to each item in the data set. The return value is a Prediction object. """ predicted_values = Prediction(data_set) gbm_package = self._gbm_package gbm_object = self._gbm_model_object (features, labels, items) = self.create_validation_set(data_set) predicted = gbm_package.predict_gbm(gbm_object, features, n_trees=self._n_trees, type='response', verbose=False) item_count = 0 for item in items: predicted_values.set_est_label( item, self.convert_label(predicted[item_count])) item_count += 1 return (predicted_values)