def matchPers(p1, rgdP, conf, score = None): global svmModel, mt_tmp, SVMfeatures if not svmModel: if 'featureSet' in conf: SVMfeatures = getattr(importlib.import_module('featureSet'), conf['featureSet']) svmModel = svm_load_model('conf/person_' + conf['featureSet'] + '.model') else: #default SVMfeatures = getattr(importlib.import_module('featureSet'), 'personDefault') svmModel = svm_load_model('conf/personDefault.model') nodeScore = nodeSim(p1, rgdP) #pFam = conf['families'].find_one({ 'children': p1['_id']}) #find fam if p in 'children' pFam = getFamilyFromChild(p1['_id'], conf['families'], conf['relations']) #rgdFam = conf['match_families'].find_one({ 'children': rgdP['_id']}) rgdFam = getFamilyFromChild(rgdP['_id'], conf['match_families'], conf['match_relations']) famScore = familySim(pFam, conf['persons'], rgdFam, conf['match_persons']) cand_matchtxt = mt_tmp.matchtextPerson(rgdP, conf['match_persons'], conf['match_families'], conf['match_relations']) matchtxt = mt_tmp.matchtextPerson(p1, conf['persons'], conf['families'], conf['relations']) cosScore = cos(matchtxt, cand_matchtxt) if score is None and 'featureSet' in conf: #score not used by deault try: #Lucene FIX #from luceneUtils import search import traceback candidates = search(matchtxt, p1['sex'], ant=100, config=conf) #Lucene search score = 0.0 for (kid,sc) in candidates: if str(kid) == str(rgdP['_id']): score = sc break #except: #use cos instead ?? if problems running Java in Bottle except Exception, e: exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_traceback)
def cons_train_sample_for_cla(filename,indexes,dic_path,glo_aff_path,result_save_path,model_path,LSA_path,LSA_model_path,decom_meas,delete): dic_list = read_dic(dic_path,dtype=str) glo_aff_list = read_list(glo_aff_path) f= file(filename,'r') fs = file(result_save_path,'w') fd = file(dust_save_path,'w') m= svm_load_model(model_path) lsa_m = svm_load_model(LSA_model_path) U = load_lsa_model(LSA_path,"U") for line in f.readlines(): text = line.strip().split(tc_splitTag) if len(text)!=line_length: fd.write(line) continue text_temp="" for i in indexes: text_temp+=str_splitTag+text[i] vec = cons_vec_for_cla(text_temp.strip().split(str_splitTag),dic_list,glo_aff_list) y,x=cons_svm_problem(text[0],vec) p_lab,p_acc,p_sc=svm_predict(y,x,m) if decom_meas==1: weight = cal_weight(p_sc[0][0]) #vec = [value*weight for value in vec ] vec = [0]*len(vec) for key in x[0].keys(): vec[int(key)-1]= weight*float(x[0][key]) vec = pre_doc_svds(vec,U) y,x=cons_svm_problem(text[0],vec) lsa_lab,lsa_acc,lsa_sc = svm_predict(y,x,lsa_m) fs.write(text[0]+"\t"+str(p_sc[0][0])+"\t"+str(lsa_sc[0][0])+"\t"+text[1]+"\t"+text[2]+"\n") else : fs.write(text[0]+"\t"+str(p_sc[0][0])+"\t"+text[1]+"\t"+text[2]+"\n") f.close() fs.close()
def __setstate__(self, state): self.svm_model_fp = state['svm_model_fp'] self.svm_label_map_fp = state['svm_label_map_fp'] self.train_params = state['train_params'] self.normalize = state['normalize'] # C libraries/pointers don't survive across processes. if '__LOCAL__' in state: fd, fp = tempfile.mkstemp() try: os.close(fd) self.svm_label_map = state['__LOCAL_LABELS__'] # write model binary to file, then load via libSVM with open(fp, 'wb') as model_f: model_f.write(state['__LOCAL_MODEL__']) self.svm_model = svmutil.svm_load_model(fp) finally: os.remove(fp) else: self.svm_model = None self._reload_model()
def predict_all(request): '''Predicts points in an array''' width = float( request.POST.get("width", "None") ) height = float( request.POST.get("height", "None") ) model = svmutil.svm_load_model('libsvm.model') # Get grid of points to query points = [] for counterY in [ 1.0 / 15.0 * y for y in range(0, 15) ]: for counterX in [ 1.0 / 15.0 * x for x in range(0, 15) ]: points.append([counterX, counterY]) #for counterY in [ 1.0 / 10.0 * x for x in range(0, 10) ]: # for counterX in [ 1.0 / 10.0 * y for y in range(0, 10) ]: # label , acc, val = svmutil.svm_predict( [0], [[counterX, counterY]], model ) # results[i] = [counterX, counterY, label] # i = i + 1 #results["length"] = i # Get labels labels, acc, val = svmutil.svm_predict([0] * len(points), points, model) results = {} for index, value in enumerate(points): results[index] = { "x" : points[index][0], "y" : points[index][1], "label" : labels[index] } results["length"] = len(points) return json(results)
def predict(request): predictX = float( request.POST.get("x", -1) ) predictY = float( request.POST.get("y", -1) ) predictLabel = int( request.POST.get("label", -1) ) if predictX == -1 or predictY == -1 or predictLabel == -1: return django.http.HttpResponse("Missing Params") points = models.Point2d.objects.all() # Storing the information to be presented to SVM labels = [] inputs = [] # For each point, store the information into arrays #for p in points: # labels.append( p.label ) # inputs.append([p.x, p.y]) #prob = svm.svm_problem(labels, inputs) #param = svm.svm_parameter('-t 2 -c 100') #model = svmutil.svm_train(prob, param) #svmutil.svm_save_model('libsvm.model', model) model = svmutil.svm_load_model('libsvm.model') p_label , acc, val = svmutil.svm_predict([0], [[predictX, predictY]], model) data = {'x': predictX, 'y': predictY, 'label': int( p_label[0] ) } return json(data)
def __setstate__(self, state): self.__dict__.update(state) self.svm_model_elem = \ self.svm_model_uri and from_uri(self.svm_model_uri) self.svm_label_map_elem = \ self.svm_label_map_uri and from_uri(self.svm_label_map_uri) # C libraries/pointers don't survive across processes. if '__LOCAL__' in state: # These would have gotten copied into dict during the updated. # The instance doesn't need to keep them around after this. del self.__dict__['__LOCAL__'] del self.__dict__['__LOCAL_LABELS__'] del self.__dict__['__LOCAL_MODEL__'] fd, fp = tempfile.mkstemp() try: os.close(fd) self.svm_label_map = state['__LOCAL_LABELS__'] # write model to file, then load via libSVM with open(fp, 'wb') as model_f: model_f.write(state['__LOCAL_MODEL__']) fp_bytes = fp.encode('utf8') self.svm_model = svmutil.svm_load_model(fp_bytes) finally: os.remove(fp) else: self.svm_model = None self._reload_model()
def __init__(self, init_data_dir, logger=None): # load model self.model = svmutil.svm_load_model( os.path.join(init_data_dir, "allmodel")) self.logger = logger if self.logger is not None: self.logger.debug("BRISQUE image metric has been loaded")
def load_model(model_path): """Load a libsvm model from a path on disk. This currently supports: * C-SVC * NU-SVC * Epsilon-SVR * NU-SVR Parameters ---------- model_path: str Path on disk where the libsvm model representation is. Returns ------- model: libsvm_model A model of the libsvm format. """ if not (HAS_LIBSVM): raise RuntimeError( 'libsvm not found. libsvm conversion API is disabled.') from svmutil import svm_load_model # From libsvm import os if (not os.path.exists(model_path)): raise IOError("Expected a valid file path. %s does not exist" % model_path) return svm_load_model(model_path)
def _get_classifier(svm_name=None): """ If need be, initializes, and then returns a classifier trained to differentiate between different ions and water. Also returns of options for gathering features. To use the classifier, you will need to pass it to svm.libsvm.svm_predict_probability. Ion prediction is already encapsulated by predict_ion, so most users should just call that. Parameters ---------- svm_name : str, optional The SVM to use for prediction. By default, the SVM trained on heavy atoms and calcium in the presence of anomalous data is used. See chem_data/classifiers for a full list of SVMs available. Returns ------- svm.svm_model The libsvm classifier used to predict the identities of ion sites. dict of str, bool Options to pass to ion_vector when collecting features about these sites. tuple of ((tuple of numpy.array of float, numpy.array of float), tuple of float) The scaling parameters passed to scale_to. numpy.array of bool The features of the vector that were selected as important for classification. Useful for both asserting that ion_vector is returning something of the correct size as well as only selection features that actually affect classification. See Also -------- svm.libsvm.svm_predict_probability mmtbx.ions.svm.predict_ion phenix_dev.ion_identification.nader_ml.ions_train_svms """ assert (svmutil is not None) global _CLASSIFIER, _CLASSIFIER_OPTIONS if not svm_name or str(svm_name) == "Auto": svm_name = _DEFAULT_SVM_NAME if svm_name not in _CLASSIFIER: svm_path = os.path.join(CLASSIFIERS_PATH, "{}.model".format(svm_name)) options_path = os.path.join(CLASSIFIERS_PATH, "{}_options.pkl".format(svm_name)) try: _CLASSIFIER[svm_name] = svmutil.svm_load_model(svm_path) except IOError as err: if err.errno != errno.ENOENT: raise err else: _CLASSIFIER[svm_name] = None _CLASSIFIER_OPTIONS[svm_name] = (None, None, None) _CLASSIFIER_OPTIONS[svm_name] = load(options_path) vector_options, scaling, features = _CLASSIFIER_OPTIONS[svm_name] return _CLASSIFIER[svm_name], vector_options, scaling, features
def classify(filename, classLabel=0): str = "/Thu_Life/CS/SVM/data/trainData/Test_SVMFile/singleSVM_TestFile" f = open(str, "wb") t = VSM.TextToVector2(filename) slabel = ("%d ") % classLabel if len(t) > 0: f.write(slabel) for k in range(len(t)): str1 = ("%d:%d ") % (t[k][0], t[k][1]) f.write(str1) f.write("\r\n") else: print "The text can't be classified to the Four Labels!" return "Can't be classified ! " f.close() y, x = svmutil.svm_read_problem(str) model = svmutil.svm_load_model("../SVMTrainFile250.model") label, b, c = svmutil.svm_predict(y, x, model) print "label", label if label[0] == 1: print "类别:财经" return "财经" elif label[0] == 2: print "类别:IT" return "IT" elif label[0] == 3: print "类别:旅游" return "旅游" elif label[0] == 4: print "类别:体育" return "体育"
def __on_click_train__(self, instance): if len(self.first_folder.selection) > 0: if self.first_folder.selection[0].endswith('.model'): self.svm = svmutil.svm_load_model( self.first_folder.selection[0]) #self.remove_widget(self.first_folder_label) self.remove_widget(self.first_folder) self.remove_widget(self.second_folder_label) self.remove_widget(self.second_folder) self.remove_widget(self.start_training) self.first_folder_label.text = "Carpeta de pruebas" #self.add_widget(self.test_folder_label) self.add_widget(self.test_folder) self.add_widget(self.read_images) return if len(self.first_folder.selection) < 1 or len( self.second_folder.selection) < 1: popup = Popup( title='Error', content=Label( text='Necesitas seleccionar una carpeta para iniciar.'), size=(400, 100), size_hint=(None, None)) popup.open() else: popup = Popup( title='Entrenando', content=Label(text='Realizando entrenamiento...'), size=(400, 150), size_hint=(None, None), auto_dismiss=False, ) popup.open() self.__start_train__(popup)
def trainSVMAndSave(modelLoc, kernel, labels): if os.path.exists(modelLoc): return svm_load_model(modelLoc) else: model = trainSVM(kernel, labels) svm_save_model(modelLoc, model) return model
def get_fearure_weights(yprob, years=range(2004,2017), normalize=False, binary_features=False, top10_type='sum', reg=1e-3): from svmutil import svm_load_model d=(102,103)[binary_features] W=np.array([]).reshape(0,d) periods=[] for y in years: periods.append(get_year_str(y)) # alpha, SV = get_alpha_and_SV('/home/arya/out/model.'+periods[-1]) model = svm_load_model('/home/arya/out/model.'+periods[-1]) alpha = np.array(map(lambda x: abs(x[0]), model.get_sv_coef())) SV = model.get_SV() X=np.zeros((len(SV),d)) for i in range(len(SV)): for k,v in SV[i].items(): if k>0: X[i,k-1]=v W=np.append(W,alpha.dot(X)[None,:],axis=0) np.set_printoptions(linewidth='1000', precision=3, edgeitems=55, suppress=True) W=W+reg if normalize: W=W/ W.sum(1)[:,None] print ('UnNormalized','Normalized')[normalize], 'Feature Weights:' print W if top10_type=='sum': sumW= W.sum(0) indices = range(len(sumW)) indices.sort(lambda x,y: -cmp(sumW[x], sumW[y])) top10=indices[:10] elif top10_type =='genbank': yprob = yprob / yprob.sum(0) err0 = abs(W[:11,:]-yprob[:,0][:,None]).sum(0) indices = range(len(err0)) indices.sort(lambda x,y: -cmp(err0[x], err0[y])) top10=indices[:10] elif top10_type=='geo': yprob = yprob / yprob.sum(0) err1 = abs(W[:11,:]-yprob[:,1][:,None]).sum(0) indices = range(len(err1)) indices.sort(lambda x,y: -cmp(err1[x], err1[y])) top10=indices[:10] elif top10_type == 'all': top10 = range(W.shape[1]) else: print top10_type , 'not found' exit(1) top10=sorted(top10) print top10, top10_type # exit(1) info=""" W: t x d matrix of weights which each line contains A weight correponding to time t (periods[t] periods: t x 1 string list which each element contains the period, e.g. 2004-2008 top10: Top 10 features which has A larger sum over all the periods """ Data={'W':W, 'periods':periods, 'top10':top10, 'info':info} save_data_pkl(Data, '/home/arya/out/trends{}{}.pkl'.format(('_unnormalized','_normalized')[normalize],('_integer','_binary')[binary_features])) return W,periods, top10
def libSVM_converter(args): import svmutil source_model = svmutil.svm_load_model(args.source) from onnxmltools.convert.common.data_types import FloatTensorType onnx_model = winmltools.convert_libsvm(source_model, get_opset(args.ONNXVersion), initial_types=[('input', FloatTensorType([1, 'None']))]) return onnx_model
def _get_classifier(svm_name=None): """ If need be, initializes, and then returns a classifier trained to differentiate between different ions and water. Also returns of options for gathering features. To use the classifier, you will need to pass it to svm.libsvm.svm_predict_probability. Ion prediction is already encapsulated by predict_ion, so most users should just call that. Parameters ---------- svm_name : str, optional The SVM to use for prediction. By default, the SVM trained on heavy atoms and calcium in the presence of anomalous data is used. See chem_data/classifiers for a full list of SVMs available. Returns ------- svm.svm_model The libsvm classifier used to predict the identities of ion sites. dict of str, bool Options to pass to ion_vector when collecting features about these sites. tuple of ((tuple of numpy.array of float, numpy.array of float), tuple of float) The scaling parameters passed to scale_to. numpy.array of bool The features of the vector that were selected as important for classification. Useful for both asserting that ion_vector is returning something of the correct size as well as only selection features that actually affect classification. See Also -------- svm.libsvm.svm_predict_probability mmtbx.ions.svm.predict_ion phenix_dev.ion_identification.nader_ml.ions_train_svms """ assert (svmutil is not None) global _CLASSIFIER, _CLASSIFIER_OPTIONS if not svm_name or str(svm_name) == "Auto" : svm_name = _DEFAULT_SVM_NAME if svm_name not in _CLASSIFIER: svm_path = os.path.join(CLASSIFIERS_PATH, "{}.model".format(svm_name)) options_path = os.path.join(CLASSIFIERS_PATH, "{}_options.pkl".format(svm_name)) try: _CLASSIFIER[svm_name] = svmutil.svm_load_model(svm_path) except IOError as err: if err.errno != errno.ENOENT: raise err else: _CLASSIFIER[svm_name] = None _CLASSIFIER_OPTIONS[svm_name] = (None, None, None) _CLASSIFIER_OPTIONS[svm_name] = load(options_path) vector_options, scaling, features = _CLASSIFIER_OPTIONS[svm_name] return _CLASSIFIER[svm_name], vector_options, scaling, features
def predict(request): predictX = float(request.POST.get("x", -1)) predictY = float(request.POST.get("y", -1)) predictLabel = int(request.POST.get("label", -1)) if predictX == -1 or predictY == -1 or predictLabel == -1: return django.http.HttpResponse("Missing Params") points = models.Point2d.objects.all() # Storing the information to be presented to SVM labels = [] inputs = [] # For each point, store the information into arrays #for p in points: # labels.append( p.label ) # inputs.append([p.x, p.y]) #prob = svm.svm_problem(labels, inputs) #param = svm.svm_parameter('-t 2 -c 100') #model = svmutil.svm_train(prob, param) #svmutil.svm_save_model('libsvm.model', model) model = svmutil.svm_load_model('libsvm.model') p_label, acc, val = svmutil.svm_predict([0], [[predictX, predictY]], model) data = {'x': predictX, 'y': predictY, 'label': int(p_label[0])} return json(data)
def predict_all(request): '''Predicts points in an array''' width = float(request.POST.get("width", "None")) height = float(request.POST.get("height", "None")) model = svmutil.svm_load_model('libsvm.model') # Get grid of points to query points = [] for counterY in [1.0 / 15.0 * y for y in range(0, 15)]: for counterX in [1.0 / 15.0 * x for x in range(0, 15)]: points.append([counterX, counterY]) #for counterY in [ 1.0 / 10.0 * x for x in range(0, 10) ]: # for counterX in [ 1.0 / 10.0 * y for y in range(0, 10) ]: # label , acc, val = svmutil.svm_predict( [0], [[counterX, counterY]], model ) # results[i] = [counterX, counterY, label] # i = i + 1 #results["length"] = i # Get labels labels, acc, val = svmutil.svm_predict([0] * len(points), points, model) results = {} for index, value in enumerate(points): results[index] = { "x": points[index][0], "y": points[index][1], "label": labels[index] } results["length"] = len(points) return json(results)
def __init__(self, train_feature_file=TRAIN_FEATURE_FILE): if os.path.exists(SAVED_MODEL): self.model = svmutil.svm_load_model(SAVED_MODEL) else: y, x = svmutil.svm_read_problem(train_feature_file) self.model = svmutil.svm_train(y, x, '-c 4') svmutil.svm_save_model(SAVED_MODEL, self.model)
def cracker(imageRaw, tempFolder, modelFilePath, mode="default"): ''' imageRaw is the object returned by PIL.Image.open() tempFolder is the temporary folder to store some temp files modelFilePath is the path of trained model if mode="quiet",then this function produces no output This function returns the result of cracking ''' imageGrey = imageRaw.convert("L") imageBin = imageGrey.point(lambda x: x > 140) CollectAndPreprocess.ridNoise(imageBin) imagePaths = CollectAndPreprocess.cutAndSaveImage(imageBin, i, tempFolder) result = "" for eachPath in imagePaths: featureList = GetFeatureAndTrain.getFeature(eachPath) os.remove(eachPath) #print(featureList) featureDict = {} for (index, value) in enumerate(featureList): featureDict[index + 1] = value xt = [] xt.append(featureDict) yt = [0] model = svmutil.svm_load_model(modelFilePath) p_label, p_acc, p_val = svmutil.svm_predict(yt, xt, model, "-q") for item in p_label: result += chr(int(item)) if mode == "default": print(result) return result
def do_predicting(classifier_name, test_x, test_y): model_save_file = str('./models/') + classifier_name + str('.model') if classifier_name == 'LIBSVM': model = svm_load_model('./models/{}.model'.format(classifier_name)) p_labels, p_acc, p_vals = svm_predict(test_y, np.array(test_x).tolist(), model) return p_labels classifiers = { 'NB': naive_bayes_classifier, 'KNN': knn_classifier, 'LR': logistic_regression_classifier, 'RF': random_forest_classifier, 'DT': decision_tree_classifier, 'SVM': svm_classifier, 'SVMCV': svm_cross_validation, 'GBDT': gradient_boosting_classifier, 'ADA': ada_boosting_classifier, 'MLP': mlp_classifier } model = jl.load(model_save_file)[classifier_name] predict = model.predict(test_x) #accuracy = metrics.accuracy_score(test_y, predict) #print 'accuracy: %.2f%%' % (100 * accuracy) return predict
def __init__(self,train_feature_file = TRAIN_FEATURE_FILE): if os.path.exists(SAVED_MODEL): self.model = svmutil.svm_load_model(SAVED_MODEL) else: y, x = svmutil.svm_read_problem(train_feature_file) self.model = svmutil.svm_train(y, x, '-c 4') svmutil.svm_save_model(SAVED_MODEL,self.model)
def predict(V, yy): m = svmutil.svm_load_model("sample.model") x = [list(map(lambda z: z * 10, list(t))) for t in V] y = [0 if t < 0 else 1 for t in yy] p_label, p_acc, p_val = svmutil.svm_predict(y, x, m) print(y) print(p_label) print(x[10])
def predict(V, yy): m = svmutil.svm_load_model('sample.model') x = ([list(map(lambda z: z * 10, list(t))) for t in V]) y = [0 if t < 0 else 1 for t in yy] p_label, p_acc, p_val = svmutil.svm_predict(y, x, m) print(y) print(p_label) print(x[10])
def doRecognize(): yt, xt = svm_read_problem(testTxt) model = svm_load_model(cdir + "model") p_label, p_acc, p_val = svm_predict(yt, xt, model) #p_label即为识别的结果 code = '' for item in p_label: code = code + str(int(item)) print code
def _reload_model(self): """ Reload SVM model from configured file path. """ if self.svm_model_fp and os.path.isfile(self.svm_model_fp): self.svm_model = svmutil.svm_load_model(self.svm_model_fp) if self.svm_label_map_fp and os.path.isfile(self.svm_label_map_fp): with open(self.svm_label_map_fp, "rb") as f: self.svm_label_map = cPickle.load(f)
def _reload_model(self): """ Reload SVM model from configured file path. """ if self.svm_model_fp and os.path.isfile(self.svm_model_fp): self.svm_model = svmutil.svm_load_model(self.svm_model_fp) if self.svm_label_map_fp and os.path.isfile(self.svm_label_map_fp): with open(self.svm_label_map_fp, 'rb') as f: self.svm_label_map = cPickle.load(f)
def think(self,text): from twss.twss import twss_lite import pickle from svmutil import svm_load_model input = open(self.vocab) vocabList = pickle.load(input) input.close() model = svm_load_model(self.model) return "That's what she said!" if twss_lite(text,vocabList,model) == 1 else ""
def get_BRISQUE_score(cv_image): # convert to gray scale dis = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY) # compute feature vectors of the image features = compute_features(dis) # rescale the brisqueFeatures vector from -1 to 1 x = [0] # pre loaded lists from C++ Module to rescale brisquefeatures vector to [-1, 1] min_ = [ 0.336999, 0.019667, 0.230000, -0.125959, 0.000167, 0.000616, 0.231000, -0.125873, 0.000165, 0.000600, 0.241000, -0.128814, 0.000179, 0.000386, 0.243000, -0.133080, 0.000182, 0.000421, 0.436998, 0.016929, 0.247000, -0.200231, 0.000104, 0.000834, 0.257000, -0.200017, 0.000112, 0.000876, 0.257000, -0.155072, 0.000112, 0.000356, 0.258000, -0.154374, 0.000117, 0.000351 ] max_ = [ 9.999411, 0.807472, 1.644021, 0.202917, 0.712384, 0.468672, 1.644021, 0.169548, 0.713132, 0.467896, 1.553016, 0.101368, 0.687324, 0.533087, 1.554016, 0.101000, 0.689177, 0.533133, 3.639918, 0.800955, 1.096995, 0.175286, 0.755547, 0.399270, 1.095995, 0.155928, 0.751488, 0.402398, 1.041992, 0.093209, 0.623516, 0.532925, 1.042992, 0.093714, 0.621958, 0.534484 ] # append the rescaled vector to x for i in range(0, 36): min = min_[i] max = max_[i] x.append(-1 + (2.0 / (max - min) * (features[i] - min))) # load model model = svmutil.svm_load_model("allmodel") # create svm node array from python list x, idx = gen_svm_nodearray( x[1:], isKernel=(model.param.kernel_type == PRECOMPUTED)) x[36].index = -1 # set last index to -1 to indicate the end. # get important parameters from model svm_type = model.get_svm_type() is_prob_model = model.is_probability_model() nr_class = model.get_nr_class() if svm_type in (ONE_CLASS, EPSILON_SVR, NU_SVC): # here svm_type is EPSILON_SVR as it's regression problem nr_classifier = 1 dec_values = (c_double * nr_classifier)() # calculate the quality score of the image using the model and svm_node_array qualityscore = svmutil.libsvm.svm_predict_probability(model, x, dec_values) return qualityscore
def predict(self, document): """ tag the new data basde on a given model :param document: document object :returns: list of predictions """ logger.info('Start to predict') y,x = self.fit(document) if Svm._auto_config: file_name = self._auto_load('file', self.model_file) m = svmutil.svm_load_model(file_name) pkg_resources.cleanup_resources() else : m = svmutil.svm_load_model(self.model_file) y_pred, p_acc, p_val = svmutil.svm_predict(y, x, m, "-q") return y_pred
def think(self, text): from twss.twss import twss_lite import pickle from svmutil import svm_load_model input = open(self.vocab) vocabList = pickle.load(input) input.close() model = svm_load_model(self.model) return "That's what she said!" if twss_lite(text, vocabList, model) == 1 else ""
def main(): try: lModel = svmutil.svm_load_model(sys.argv[1]) lRanges = read_ranges(sys.argv[2]) lFile = sys.argv[3] lBlockSize = int(sys.argv[4]) except IndexError, pExc: print "Usage: " + sys.argv[0] + " <model-file> <range-file> "\ "<problem-file> <block-size>" sys.exit(-1)
def _load_model(self) -> svmutil.svm_model: """ 加载模型 :return: acc_model,gyro_model """ model_full_path = os.path.join(MODEL_DIR, self.model_name) if not os.path.exists(model_full_path): logger.info("模型{0}不存在,开始训练".format(model_full_path)) self._train() return svmutil.svm_load_model(model_full_path)
def solve(im): im = denoise(im) im_list = crop(im) tmp = str(round(time.time() * 1000)) with open(tmp, "w") as f: for im in im_list: f.write("0" + feature(im)) y, x = svmutil.svm_read_problem(tmp) model = svmutil.svm_load_model(MODEL_FILE) p_label, p_acc, p_val = svmutil.svm_predict(y, x, model) os.remove(tmp) return "".join([chr(round(x)) for x in p_label])
def _reload_model(self): """ Reload SVM model from configured file path. """ if self.svm_model_elem and not self.svm_model_elem.is_empty(): svm_model_tmp_fp = self.svm_model_elem.write_temp() self.svm_model = svmutil.svm_load_model(svm_model_tmp_fp) self.svm_model_elem.clean_temp() if self.svm_label_map_elem and not self.svm_label_map_elem.is_empty(): self.svm_label_map = \ cPickle.loads(self.svm_label_map_elem.get_bytes())
def train_grasp(grasp_type, side): """ train_grasp(grasp_type): train linear svm classifier for specific grasp type\n grasp_type: hand grasping type\n side: left hand or right hand\n """ #train datafile = "model/traindata_grasp_" + grasp_type + "_" + side if not os.path.isfile(datafile): srcfile = "data/feature_grasp_train.csv" write_svmdata_grasp(srcfile, datafile, grasp_type, side, 0) label_train, data_train = svmutil.svm_read_problem(datafile) modelfile = "model/model_grasp_" + grasp_type + "_" + side m = [] if not os.path.isfile(modelfile): print("train model: " + grasp_type + "_" + side) label_weight = {} for v in label_train: if label_weight.has_key(v): label_weight[v] += 1 else: label_weight[v] = 1 sorted_label = sorted(label_weight) param_weight = ' ' for v in sorted_label: label_weight[v] = float( len(label_train)) / len(sorted_label) / label_weight[v] param_weight += '-w%d %f ' % (v, label_weight[v]) prob = svmutil.svm_problem(label_train, data_train) param = svmutil.svm_parameter('-t 0 -b 1 -q' + param_weight) print '-t 0 -b 1 -q' + param_weight # param = svmutil.svm_parameter('-t 0 -c 4 -b 1 -q') m = svmutil.svm_train(prob, param) svmutil.svm_save_model(modelfile, m) else: print("load model: " + grasp_type + "_" + side) m = svmutil.svm_load_model(modelfile) #test grasp_info = read_info("data/feature_grasp_test.csv", side) datafile = "model/testdata_grasp_" + grasp_type + "_" + side if not os.path.isfile(datafile): srcfile = "data/feature_grasp_test.csv" write_svmdata_grasp(srcfile, datafile, grasp_type, side, 1) label_test, data_test = svmutil.svm_read_problem(datafile) p_label, p_acc, p_val = svmutil.svm_predict(label_test, data_test, m, '-b 1') f_result = open("result/grasp_" + grasp_type + "_" + side + ".csv", "w") for i in range(len(p_label)): f_result.write(grasp_info[i] + ", " + str(int(label_test[i])) + ", " + str(int(p_label[i])) + ", ") f_result.write("[%.4f]\n" % p_val[i][0]) f_result.close()
def calculate_image_quality_score(brisque_features): model = svmutil.svm_load_model('brisque_svm.txt') scaled_brisque_features = scale_features(brisque_features) x, idx = svmutil.gen_svm_nodearray( scaled_brisque_features, isKernel=(model.param.kernel_type == svmutil.PRECOMPUTED)) nr_classifier = 1 prob_estimates = (svmutil.c_double * nr_classifier)() return svmutil.libsvm.svm_predict_probability(model, x, prob_estimates)
def GetCode_SKB_123(picpath,types='path'): model=svmutil.svm_load_model(modelpath_SKB_123) if types=='path':img=Image.open(picpath).convert('L').point([0]*165+[1]*(256-165),'1') elif types=='img':img=picpath pixel_cnt_list=GetFeature(img,'img') tempath=os.path.join(os.getcwd(),'temp.txt') with open(tempath,'w') as f: f.writelines(GetFeatureStr(pixel_cnt_list,0)) y0,x0=svmutil.svm_read_problem(tempath) os.remove(tempath) p_label,p_acc,p_val=svmutil.svm_predict(y0,x0,model,'-q') return chr(int(p_label[0]))
def load_model(self, model_file, check_size=True): """ Taken from: https://github.com/Duke-GCB/Predict-TF-Binding Loads a svm model from a file and computes its size :param model_file: The file name of the model to load :return: A dictionary with keys model, file, and size """ model = svmutil.svm_load_model(model_file) model_dict = {'model': model} if check_size: model_dict['size'] = len(model.get_SV()[0]) - 1 # sv includes a -1 term that is not present in the model file, so subtract 1 return model_dict
def _reload_model(self): """ Reload SVM model from configured file path. """ if self.svm_model_elem and not self.svm_model_elem.is_empty(): svm_model_tmp_fp = self.svm_model_elem.write_temp() self.svm_model = svmutil.svm_load_model(svm_model_tmp_fp) self.svm_model_elem.clean_temp() if self.svm_label_map_elem and not self.svm_label_map_elem.is_empty(): self.svm_label_map = \ pickle.loads(self.svm_label_map_elem.get_bytes())
def predictSVMFromFiles(tp, datasetOutDir, fileIdentifier): jobs = [] for testKernelFileLoc in getAllKernelFileLocs(datasetOutDir, fileIdentifier, False): testKernel = loadPickle(testKernelFileLoc) mfl = getModelFileLoc(getKernelDir(testKernelFileLoc), 'svm', fileIdentifier) model = svm_load_model(mfl) predictFileLoc = getPredictFileLoc(getModelDir(mfl), fileIdentifier) if not os.path.exists(predictFileLoc): jobs.append(tp.apply_async(predictSVMAndSave(model, testKernel, predictFileLoc))) return jobs
def predict_main(file, model, out, now_path): file_path = os.path.join(now_path, file) out_path = os.path.join(now_path, out) y_p, x_p = svm_read_problem(file_path) model = svm_load_model(model) p_label, p_acc, p_val = svm_predict(y_p, x_p, model) result = 'True,Predict\n' for i in range(len(y_p)): result += str(y_p[i]) + ',' + str(p_label[i]) + '\n' with open(out_path + '_predict.csv', 'w', encoding="utf-8") as f: f.write(result) f.close()
def load_pretrained_targetmodels(): """ """ #left target model m_target_l = [] modelfile = "model/model_targetX_left" m = svmutil.svm_load_model(modelfile) m_target_l.append(m) modelfile = "model/model_targetY_left" m = svmutil.svm_load_model(modelfile) m_target_l.append(m) modelfile = "model/model_targetS_left" m = svmutil.svm_load_model(modelfile) m_target_l.append(m) #right target model m_target_r = [] modelfile = "model/model_targetX_right" m = svmutil.svm_load_model(modelfile) m_target_r.append(m) modelfile = "model/model_targetY_right" m = svmutil.svm_load_model(modelfile) m_target_r.append(m) modelfile = "model/model_targetS_right" m = svmutil.svm_load_model(modelfile) m_target_r.append(m) return [m_target_l, m_target_r]
def matchPers(p1, rgdP, conf, score=None): global svmModel, mt_tmp, SVMfeatures if not svmModel: if 'featureSet' in conf: SVMfeatures = getattr(importlib.import_module('featureSet'), conf['featureSet']) svmModel = svm_load_model('conf/person_' + conf['featureSet'] + '.model') else: #default SVMfeatures = getattr(importlib.import_module('featureSet'), 'personDefault') svmModel = svm_load_model('conf/personDefault.model') nodeScore = nodeSim(p1, rgdP) #pFam = conf['families'].find_one({ 'children': p1['_id']}) #find fam if p in 'children' pFam = getFamilyFromChild(p1['_id'], conf['families'], conf['relations']) #rgdFam = conf['match_families'].find_one({ 'children': rgdP['_id']}) rgdFam = getFamilyFromChild(rgdP['_id'], conf['match_families'], conf['match_relations']) famScore = familySim(pFam, conf['persons'], rgdFam, conf['match_persons']) cand_matchtxt = mt_tmp.matchtextPerson(rgdP, conf['match_persons'], conf['match_families'], conf['match_relations']) matchtxt = mt_tmp.matchtextPerson(p1, conf['persons'], conf['families'], conf['relations']) cosScore = cos(matchtxt, cand_matchtxt) if score is None and 'featureSet' in conf: #score not used by deault try: #Lucene FIX #from luceneUtils import search import traceback candidates = search(matchtxt, p1['sex'], ant=100, config=conf) #Lucene search score = 0.0 for (kid, sc) in candidates: if str(kid) == str(rgdP['_id']): score = sc break #except: #use cos instead ?? if problems running Java in Bottle except Exception, e: exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_traceback)
def __init__(self, name, model, labels, n_features, ftype, error): _p, _n = labels super(self.__class__, self).__init__(name, _p, _n, None, n_features, ftype, error) self.model = svm_load_model(model) def clf(x): if hasattr(x, 'tolist'): xx = x.tolist() else: xx = x p_label, _, _ = svm_predict([0], [xx], self.model, '-q') return p_label[0] self.clf1 = clf
def live_test(): subdir = 'data/' img_kinds = ["happy", "anger", "neutral", "surprise"] models = {} # load all the models print "Loading Models" for img_kind in img_kinds: print 'loading for: ' + img_kind models[img_kind] = svmutil.svm_load_model(subdir + img_kind + '.model') print "---------------------" print "Loading cascade" face_cascade = "haarcascades/haarcascade_frontalface_alt.xml" hc = cv.Load(face_cascade) print "---------------------" capture = cv.CaptureFromCAM(0) while True: img = cv.QueryFrame(capture) cv.ShowImage("camera",img) key_pressed = cv.WaitKey(50) if key_pressed == 27: break elif key_pressed == 32: print '~> KEY PRESSED <~' # do face detection print 'detecting face' returned = face.handel_camera_image(img, hc) if returned == None: print "No face || more than one face" pass else: (img_o, img_face) = returned cv.ShowImage("face",img_face) # get features from the face results = {} for img_kind in img_kinds: test_data = get_image_features(img_face, True, img_kind) predict_input_data = [] predict_input_data.append(test_data) # do svm query (val, val_2, label) = svmutil.svm_predict([1] ,predict_input_data, models[img_kind]) results[img_kind] = label[0][0] print img_kind + str(results[img_kind]) sorted_results = sorted(results.iteritems(), key=operator.itemgetter(1)) print sorted_results[len(sorted_results)-1][0] print "---------------------"
def read_model(self,filename): """ read a model an overwrites the old one """ try: self.model=su.svm_load_model(filename) except IOError: print("Error in reading file: ", filename) return False m=re.search("(.*)\.(svm)",filename) # read the names from npy basename=m.group(1) try: self.names=np.load(basename+".npy") except IOError: self.names=['x1','x2','x3'] return True
def test_model(img_kind): subdir = "data/" model = svmutil.svm_load_model(subdir + img_kind + '.model') print "Finished Loading Model" total_count = 0 correct_count = 0 wrong_count = 0 the_ones = glob.glob(subdir + "f_" + img_kind + "*.jpg") all_of_them = glob.glob(subdir + "f_*_*.jpg") the_others = [] for x in all_of_them: total_count += 1 if the_ones.count(x) < 1: the_others.append(x) for x in the_ones: img = cv.LoadImageM(x) cv.ShowImage("img", img) cv.WaitKey(10) img_features = get_image_features(img, True, img_kind) predict_input_data = [] predict_input_data.append(img_features) (val, val_2, val_3) = svmutil.svm_predict([1], predict_input_data, model) if int(val[0]) == 1: print 'correct' correct_count += 1 else: wrong_count += 1 for x in the_others: img = cv.LoadImageM(x) cv.ShowImage("img", img) cv.WaitKey(10) img_features = get_image_features(img, True, img_kind) predict_input_data = [] predict_input_data.append(img_features) (val, val_2, val_3) = svmutil.svm_predict([1], predict_input_data, model) if int(val[0]) == -1: correct_count += 1 else: wrong_count += 1 print "Total Pictures: " + str(total_count) print "Correct: " + str(correct_count) print "Wrong: " + str(wrong_count) print "Accuracy: " + str(correct_count/float(total_count) * 100) + '%'
def readModels(self, Dir): print "Reading models..." if not os.path.isdir(Dir): print "[E]Cannot find out models, Please check the given directory!" return self.models = [] for index in xrange(self.inputDim): filename = Dir + "/model_" + str(index) if not os.path.isfile(filename): print "[E]Wrong models!" self.models = [] return model = svmutil.svm_load_model(filename) self.models.append(model) print "Reading models...Done."
def test_measure_BRISQUE(imgPath): # read image from given path dis = cv2.imread(imgPath, 1) if(dis is None): print("Wrong image path given") print("Exiting...") sys.exit(0) # convert to gray scale dis = cv2.cvtColor(dis, cv2.COLOR_BGR2GRAY) # compute feature vectors of the image features = compute_features(dis) # rescale the brisqueFeatures vector from -1 to 1 x = [0] # pre loaded lists from C++ Module to rescale brisquefeatures vector to [-1, 1] min_= [0.336999 ,0.019667 ,0.230000 ,-0.125959 ,0.000167 ,0.000616 ,0.231000 ,-0.125873 ,0.000165 ,0.000600 ,0.241000 ,-0.128814 ,0.000179 ,0.000386 ,0.243000 ,-0.133080 ,0.000182 ,0.000421 ,0.436998 ,0.016929 ,0.247000 ,-0.200231 ,0.000104 ,0.000834 ,0.257000 ,-0.200017 ,0.000112 ,0.000876 ,0.257000 ,-0.155072 ,0.000112 ,0.000356 ,0.258000 ,-0.154374 ,0.000117 ,0.000351] max_= [9.999411, 0.807472, 1.644021, 0.202917, 0.712384, 0.468672, 1.644021, 0.169548, 0.713132, 0.467896, 1.553016, 0.101368, 0.687324, 0.533087, 1.554016, 0.101000, 0.689177, 0.533133, 3.639918, 0.800955, 1.096995, 0.175286, 0.755547, 0.399270, 1.095995, 0.155928, 0.751488, 0.402398, 1.041992, 0.093209, 0.623516, 0.532925, 1.042992, 0.093714, 0.621958, 0.534484] # append the rescaled vector to x for i in range(0, 36): min = min_[i] max = max_[i] x.append(-1 + (2.0/(max - min) * (features[i] - min))) # load model model = svmutil.svm_load_model("allmodel") # create svm node array from python list x, idx = gen_svm_nodearray(x[1:], isKernel=(model.param.kernel_type == PRECOMPUTED)) x[36].index = -1 # set last index to -1 to indicate the end. # get important parameters from model svm_type = model.get_svm_type() is_prob_model = model.is_probability_model() nr_class = model.get_nr_class() if svm_type in (ONE_CLASS, EPSILON_SVR, NU_SVC): # here svm_type is EPSILON_SVR as it's regression problem nr_classifier = 1 dec_values = (c_double * nr_classifier)() # calculate the quality score of the image using the model and svm_node_array qualityscore = svmutil.libsvm.svm_predict_probability(model, x, dec_values) return qualityscore
def test(test_file, model_file, fold_num, mapping=None, multilabel=None, debug=False): ''' Returns predicted labels, prediction values and the as the the test labels (potentially remapped). Requires a test instance file and a corresponding model file. Remaps the labels in the test file (optional), classifies the test instances against the model. ''' if multilabel: temp_labels, instances = alt_read_problem(test_file) temp_labels = [[mapping[l] for l in label] for label in temp_labels] labels = [] for temp_labs in temp_labels: if multilabel[1] in temp_labs: labels.append(multilabel[1]) else: assert len(set(temp_labs)) == 1, "Something appears to be wrong with the intermediate mapping. There's still more than one label present for an instance: {0}".format(temp_labs) labels.append([l for l in temp_labs if l != multilabel[1]][0]) else: labels, instances = svm_read_problem(test_file) labels = reMap(labels, mapping) # Exclude instances which have 0 as their label labels, instances = zip(*[(label, instance) for label, instance in zip(labels, instances) if label != 0]) if debug: with open(os.path.basename(test_file) + '.remap', 'w') as fout: for label, instance in zip(labels, instances): output = '{0} '.format(str(label)) for idx, val in instance.items(): output += '{0}:{1} '.format(str(idx), str(val)) output = output.strip() + '\n' fout.write(output) model = svm_load_model(model_file) print '---testing' if classifier == 'liblinear': pred_labels, ACC, pred_values, label_order = svm_predict(labels, instances, model) elif classifier == 'libsvm': pred_labels, (ACC, MSC, SCC), pred_values = svm_predict(labels, instances, model, options='-b 1') label_order = model.get_labels() return pred_labels, pred_values, label_order, labels
def parse_compact_nonlinear_svm(file_compact_svm, flag_load_model=True): """ Parse configurations and/or actual models, based on a config file written by write_compact_nonlinear_svm. """ print "Loading (compact) nonlinear SVM configuration:\n%s..." % file_compact_svm model = dict() model["file_svm_model"] = None model["svm_model"] = None model["target_class"] = None model["file_SVs"] = None model["SVs"] = None model["str_kernel"] = None model["func_kernel"] = None model_keys = model.keys() with open(file_compact_svm) as fin: for line in fin: strs = line.strip().split("=") if len(strs) == 2: key = strs[0].strip() if key in model_keys: model[key] = strs[1].strip() # string to integer model["target_class"] = int(model["target_class"]) print model if flag_load_model: print "... finding kernel.." model["func_kernel"] = getattr(kernels, model["str_kernel"]) dir_compact = os.path.dirname(file_compact_svm) print "... loading SVM model.." model["svm_model"] = svmutil.svm_load_model(os.path.join(dir_compact, model["file_svm_model"])) print "... loading SVs (may take some time).." tmp = os.path.join(dir_compact, model["file_SVs"]) if not os.path.exists(tmp): tmp += ".npy" model["SVs"] = np.load(tmp) return model
def main(argv=None): inimg = sys.argv[1] #input image file name #load in the k-means centroids of bar chart, pie chart, and scatter plot centroids = [] centroids.extend(pickle.load(open(centroid_bar))) centroids.extend(pickle.load(open(centroid_pie))) centroids.extend(pickle.load(open(centroid_scatter_plot))) centroids = numpy.array(centroids) D, __ = centroids.shape img = Image.open(inimg).convert('L') #image resize img_resized = imageResize(img, img_size) #get the luma of the image luma_resized = imageToLuma(img_resized) #get all the patches of the image luma_patches = getAllPatches(luma_resized, patch_size) #starndardize patches patches_standardized = map(patchStandardize, luma_patches) #flatten all patch matrix in patches_pool list patches_vector = arrayLstFlatten(patches_standardized) #get the closest centroid index based fon features idx, __ = vq(patches_vector, centroids) idx = idx.tolist() #based on index form the feature feature = map(idx.count, range(0, D)) model = svm_load_model(inmodel) label, __, __ = svm_predict([0], [feature], model, '-q') if label[0] == 1: print '***bar chart***' elif label[0] == 2: print '***pie chart***' elif label[0] == 3: print '***scatter plot***'
def main(): model = sv.svm_load_model("model") size = width, height = 400, 400 black = 0,0,0 white = 255,255,255 screen = pygame.display.set_mode(size) pygame.display.set_caption("HW_Recon | Ingrese un caracter.") data_flag = False last_pos = [] while True: for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit() mg = pygame.mouse.get_pressed() mpos = pygame.mouse.get_pos() if mg == (0,0,1) and data_flag: a = predecir(screen, model) if DEBUG: print a pygame.display.set_caption("HW_Recon| Letra: {0}".format(a)) if mg == (1,0,0) and not last_pos: last_pos = mpos data_flag = True if mg == (1,0,0) and last_pos: if np.linalg.norm(np.array(mpos) - np.array(last_pos)) < 30: pygame.draw.line(screen, white, last_pos, mpos, 30) last_pos = mpos if mg == (0,1,0) and data_flag: data_flag = False last_pos = [] screen.fill(black) pygame.display.set_caption("HW_Recon | Ingrese un caracter.") pygame.display.flip()
def __init__(self, c=None, gamma=None, filename=None, neighbours=3, verbose=0): self.neighbours = neighbours self.verbose = verbose if filename: # If a filename is given, load a model from the given filename if verbose: print 'Loading classifier from "%s"...' % filename self.model = svm_load_model(filename) elif c == None or gamma == None: raise Exception("Please specify both C and gamma.") else: self.param = svm_parameter() self.param.C = c # Soft margin self.param.kernel_type = RBF # Radial kernel type self.param.gamma = gamma # Parameter for radial kernel self.model = None
def classify2(filename, classLabel=0): str = "/Thu_Life/CS/SVM/data/trainData/Test_SVMFile/singleSVM_TestFile" f = open(str, "wb") t = VSM.TextToVector2(filename) slabel = ("%d ") % classLabel if len(t) > 0: f.write(slabel) for k in range(len(t)): str1 = ("%d:%d ") % (t[k][0], t[k][1]) f.write(str1) f.write("\r\n") else: return 0 f.close() y, x = svmutil.svm_read_problem(str) model = svmutil.svm_load_model("../SVMTrainFile250.model") label, b, c = svmutil.svm_predict(y, x, model) return label[0]
def easy_predict(train_name, test_name): range_file = train_name + ".range" model_file = train_name + ".model" assert os.path.exists(test_name),"testing file not found" assert os.path.exists(model_file),"model file not found" assert os.path.exists(range_file),"range file not found" file_name = os.path.split(test_name)[1] scaled_test_file = file_name + ".scale" predict_test_file = file_name + ".predict" cmd = '{0} -r "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, test_name, scaled_test_file) print('Scaling testing data...') Popen(cmd, shell = True, stdout = PIPE).communicate() (prob_y, prob_x) = svmutil.svm_read_problem(scaled_test_file) model = svmutil.svm_load_model(model_file) pred_labels, (ACC, MSE, SCC), pred_values = svmutil.svm_predict(prob_y, prob_x, model, "-b 1") return pred_values,MSE,SCC
def iqr_model_test(filepath_model, matrix_kernel_test, clipids_test, target_class=1): """ Apply an SVM model on test data @param filepath_model: a full path to load the learned SVM model @param matrix_kernel_test: n-by-m kernel maxtrix where n (row) is |SVs| & m (col) is |test data| @type matrix_kernel_test: 2D numpy.array @param clipids_test: list of clipids ordered @param target_class: positive class id. Default = 1. @return: dictionary with 'probs','clipids' @rtype: dictionary with 'probs' (np.array), 'clipids' (int list) """ model = svmutil.svm_load_model(filepath_model) weights = svmtools.get_SV_weights_nonlinear_svm(model, target_class=target_class) # compute margins margins = weights[0] * matrix_kernel_test[0] for i in range(1, matrix_kernel_test.shape[0]): margins += weights[i] * matrix_kernel_test[i] if matrix_kernel_test.ndim == 1: # case where there was single test data margins = np.array([margins]) # make a single number margin into an np array # compute probs, using platt scaling rho = model.rho[0] probA = model.probA[0] probB = model.probB[0] probs = 1.0 / (1.0 + np.exp((margins - rho) * probA + probB)) del margins # case when the label of positive data was 2nd in SVM model symbol list # since platt scaling was parameterized for negative data, swap probs idx_target = svmtools.get_column_idx_for_class(model, target_class) if idx_target == 1: probs = 1.0 - probs output = dict() output['probs'] = probs output['clipids'] = clipids_test return output