Exemplo n.º 1
0
def hash(features, num_train_samples=58000, L=8):
    bits = []
    for i in range(L):
        start = timeit.default_timer()
        m = liblinearutil.load_model(
            'models/tr{0:05d}-L{1:02d}-b{2:02d}.model'.format(
                num_train_samples, L, i))
        p_label, p_acc, p_val = liblinearutil.predict([0] * features.shape[0],
                                                      features.tolist(), m,
                                                      str('-q'))
        bits.append(p_label)
        end = timeit.default_timer()
        print('[HASH] {0:3d}th bit hashed. {1:.4f} seconds elapsed'.format(
            i, end - start))

    start = timeit.default_timer()
    bits = np.vstack(bits).transpose().astype(np.int)
    bits[np.nonzero(bits == 0)] = -1

    with open('hash/tr{0:05d}-L{1:02d}'.format(num_train_samples, L),
              'wb') as fo:
        cPickle.dump(bits, fo)
    end = timeit.default_timer()
    print('[HASH] Hash codes saved. {0:.4f} seconds elapsed'.format(end -
                                                                    start))
    return
Exemplo n.º 2
0
    def load(self, model_path):
        """
		Loads the model from the directory at the given path

		@type model_path: str
		@param model_path: path to load the trained model
		"""

        # load feature_extractor object (used to extract features for the test set)
        ## if-else statement added on 06.02.2017

        # if (self.__feature_extractor.feature_template == "relational") and (self.__feature_extractor.parser_type == "spacy"):
        # 	print("Relational model with spaCy parser cannot be loaded")
        # else:
        # 	self.__feature_extractor = pickle.load(open(os.path.join(model_path, "feature_extractor"), "rb"))
        # 	# load trained model
        # 	self.__liblinear_model = liblinearutil.load_model(os.path.join(model_path, "liblinear_model"))
        try:
            print(self.model_path)
            self.__feature_extractor = pickle.load(
                open(os.path.join(self.model_path, "feature_extractor"), "rb"))
            # load trained model
            self.__liblinear_model = liblinearutil.load_model(
                os.path.join(self.model_path, "liblinear_model"))
        except:
            raise Exception("No files found in the model path " +
                            self.model_path)
Exemplo n.º 3
0
	def load(path):
		obj = SVM()
		f = open(path, 'rb')
		obj._labels, obj._features = pickle.load(f)
		f.close()
		obj._model = liblinear.load_model(path + '-model')
		return obj
Exemplo n.º 4
0
    def predictTraitValue(self,
                          imagelist_file='image_list.txt',
                          class_label=1,
                          outfile='output',
                          norm=-1,
                          debug=False):
        """
        Calls external functions to obtain predicted value between 0 and 1 for given image, updates class values
        Args:
            imagelist_file: file that contains the list of image/images to be processed
            class_label: label given images as either positive or negative for SVM classification
            outfile: filename of output for Dense SIFT analysis
        Return: True if successful
        """
        with open(imagelist_file, 'w') as f:
            f.write('tmp.jpg\n')
        cv2.imwrite('tmp.jpg', self.norm_image)

        hog_histogram(imagelist_file, class_label, outfile, norm, debug)

        prob_y, prob_x = svm_read_problem(outfile)
        model_file = models[self.trait]
        model = load_model(model_file)
        self.p_val = predict(prob_y, prob_x, model)[2][0][0]

        return True
Exemplo n.º 5
0
 def __init__(self,
              model_filename,
              features_filename,
              output_probability=False):
     self.model = liblinearutil.load_model(model_filename)
     self.feature_indices = self._load_features(features_filename)
     self.output_probability = output_probability
Exemplo n.º 6
0
 def load(path):
     obj = SVM()
     f = open(path, 'rb')
     obj._labels, obj._features = pickle.load(f)
     f.close()
     obj._model = liblinear.load_model(path + '-model')
     return obj
Exemplo n.º 7
0
 def load_model(self , path) :
     logging.info("load model of %s" % self.name)
     self.model = liblinearutil.load_model(path)
     model_ext = self._load_svm_model(".".join([path , "ext"]))
     self.feature_dict = model_ext["feature_dict"]
     self.gram_n = model_ext["gram_n"]
     logging.info("done") 
Exemplo n.º 8
0
 def load_svmmodel(self, filepath):
     """
     load svm model
     """
     if(os.path.exists(filepath)):
         self._model = load_model(filepath)
     else:
         raise IOError(filepath + ': Model file not found.')
Exemplo n.º 9
0
 def load_model(self , path) :
     logging.info("load model of %s" % self.name)
     self.model = liblinearutil.load_model(path)
     model_ext = self._load_svm_model(".".join([path , "ext"]))
     self.words2idx = model_ext["words2idx"]
     self.words_idf = model_ext["words_idf"]
     self.gram_n = model_ext["gram_n"]
     logging.info("done.")
Exemplo n.º 10
0
def load(name):
    '''Loads the model and data.
    
    Columns is a dictionnary used when the feature keys were converted to 
    numbers, for liblinear.
    '''
    model = llb.load_model('models/{}.model'.format(name))
    columns = dts.Dataset.from_columns(name)._features
    return model, columns
def batchProcess(imgPath, outPath, opts):
    # read eDN model(s)
    descFile = open(opts.descPath, 'r')
    desc = pickle.load(descFile)
    descFile.close()

    nFeatures = np.sum([
        d['desc'][-1][0][1]['initialize']['n_filters'] for d in desc
        if d != None
    ])

    # load SVM model and whitening parameters
    svm = load_model(opts.svmPath)
    f = open(opts.whitePath, 'r')
    whiteParams = np.asarray([map(float, line.split(' ')) for line in f]).T
    f.close()

    # assemble svm model
    svmModel = {}
    svmModel['svm'] = svm
    svmModel['whitenParams'] = whiteParams

    biasToCntr = (svm.get_nr_feature() - nFeatures) == 1

    model = EDNSaliencyModel(desc, svmModel, biasToCntr)

    puts(colored.yellow("Collecting image filenames..."))
    todo_list = [f for f in listdir(imgPath) if isfile(join(imgPath, f))]
    existing_list = [f for f in listdir(outPath) if isfile(join(outPath, f))]
    #image_list.sort()
    puts("Done")

    for f in todo_list:
        if f not in existing_list:
            print '-------------------------------------------------------------'
            print 'processing', f
            print

            try:
                # read image
                img = misc.imread(join(imgPath, f))
                # compute saliency map
                salMap = model.saliency(img, normalize=False)
                # normalize and save the saliency map to disk
                normSalMap = (255.0 / (salMap.max() - salMap.min()) *
                              (salMap - salMap.min())).astype(np.uint8)
                misc.imsave(join(outPath, f), normSalMap)

            except Exception as e:
                puts(colored.red('ERROR: {}'.format(e)))
                puts(colored.yellow('filename: {}'.format(f)))
        else:
            print '-------------------------------------------------------------'
            puts(colored.yellow('Saliency map already exists! ({})'.format(f)))
            print
Exemplo n.º 12
0
def liblinear_classifier(svm_input=None, y=[], x=[]):
    """调用训练好的liblinear分类器做垃圾过滤
    """
    svm_model = load_model(SVM_MODEL_FILE)

    if svm_input:
        y, x = svm_read_problem(svm_input)

    p_label, p_acc, p_val = predict(y, x, svm_model, "-q")

    return p_label
Exemplo n.º 13
0
def liblinear_classifier(svm_input=None, y=[], x=[]):
    """调用训练好的liblinear分类器做垃圾过滤
    """
    svm_model = load_model(SVM_MODEL_FILE)

    if svm_input:
        y, x = svm_read_problem(svm_input)

    p_label, p_acc, p_val = predict(y, x, svm_model, "-q")

    return p_label
Exemplo n.º 14
0
 def __init__(self, featureSet, options):
     self.featureSet = featureSet
     self.params = '-b 1'
     self.lmw = options['lmw']
     modelName = options['modelName']
     sys.stderr.write('loading transition model...')
     self.transProbs = Bigram.getModelFromFile(options['bigramModelFile'])
     sys.stderr.write('done\nloading observation model...')
     self.model = load_model('{0}.model'.format(modelName))
     self.labelCounter = options['labelCounter']
     self.featCounter = options['featCounter']
     sys.stderr.write('done\n')
def batchProcess(imgPath, outPath, opts):
    # read eDN model(s)
    descFile = open(opts.descPath, 'r')
    desc = pickle.load(descFile)
    descFile.close()

    nFeatures = np.sum([d['desc'][-1][0][1]['initialize']['n_filters']
                    for d in desc if d != None])

    # load SVM model and whitening parameters
    svm = load_model(opts.svmPath)
    f = open(opts.whitePath, 'r')
    whiteParams = np.asarray([map(float, line.split(' ')) for line in f]).T
    f.close()

    # assemble svm model
    svmModel = {}
    svmModel['svm'] = svm
    svmModel['whitenParams'] = whiteParams

    biasToCntr = (svm.get_nr_feature()-nFeatures) == 1

    model = EDNSaliencyModel(desc, svmModel, biasToCntr)

    puts(colored.yellow("Collecting image filenames..."))
    todo_list = [ f for f in listdir(imgPath) if isfile(join(imgPath,f)) ]
    existing_list = [ f for f in listdir(outPath) if isfile(join(outPath,f)) ]
    #image_list.sort()
    puts("Done")

    for f in todo_list:
        if f not in existing_list:
            print '-------------------------------------------------------------'
            print 'processing', f
            print

            try:
                # read image
                img = misc.imread(join(imgPath,f))
                # compute saliency map
                salMap = model.saliency(img, normalize=False)
                # normalize and save the saliency map to disk
                normSalMap = (255.0 / (salMap.max()-salMap.min()) *
                         (salMap-salMap.min())).astype(np.uint8)
                misc.imsave(join(outPath,f), normSalMap)

            except Exception as e:
                puts(colored.red('ERROR: {}'.format(e)))
                puts(colored.yellow('filename: {}'.format(f)))
        else:
            print '-------------------------------------------------------------'
            puts(colored.yellow('Saliency map already exists! ({})'.format(f)))
            print
Exemplo n.º 16
0
 def __init__(self, featureSet, options):
     self.featureSet = featureSet
     self.params = '-b 1'
     self.lmw = options['lmw']
     modelName = options['modelName']
     sys.stderr.write('loading transition model...')
     self.transProbs = Bigram.getModelFromFile(options['bigramModelFile'])
     sys.stderr.write('done\nloading observation model...')
     self.model = load_model('{0}.model'.format(modelName))
     self.labelCounter = options['labelCounter']
     self.featCounter = options['featCounter']
     sys.stderr.write('done\n')
Exemplo n.º 17
0
def test( C, Y_test, X_test, x_lines ):
    """
    This function takes in the test labels and features and prints out the accuracy
    :param C      : list containing parameter C
    :param X_test : test features
    :param Y_test : test labels
    :return None
    """
    # for c in C:
    model = lu.load_model("model/lmods2_tamper" + str(round(C,2)) + "_" + str(x_lines) + "l.model")
    p_letters, p_acc, p_val = lu.predict(Y_test, X_test, model)
    return p_letters
def eDNsaliency(imgPath, outPath, opts):
    # read image
    img = misc.imread(imgPath)

    # read eDN model(s)
    descFile = open(opts.descPath, 'r')
    desc = pickle.load(descFile)
    descFile.close()

    nFeatures = np.sum([
        d['desc'][-1][0][1]['initialize']['n_filters'] for d in desc
        if d != None
    ])

    # load SVM model and whitening parameters
    svm = load_model(opts.svmPath)
    f = open(opts.whitePath, 'r')
    whiteParams = np.asarray([map(float, line.split(' ')) for line in f]).T
    f.close()

    # assemble svm model
    svmModel = {}
    svmModel['svm'] = svm
    svmModel['whitenParams'] = whiteParams

    biasToCntr = (svm.get_nr_feature() - nFeatures) == 1

    # compute saliency map
    model = EDNSaliencyModel(desc, svmModel, biasToCntr)
    salMap = model.saliency(img, normalize=False)

    salMap = salMap.astype('f')
    if not opts.noBlur:
        salMap = ndimage.gaussian_filter(salMap, sigma=30)

    # read fixation map / empirical saliency map
    if opts.fixMap:
        fixMap = misc.imread(opts.fixMap)

        # compute AUC
        if opts.auc:
            auc = evaluate_sal_map(salMap, fixMap)
            logging.info("AUC = %f" % auc)

        # for fair visual comparison, perform histogram equalization with
        # empirical saliency map
        if opts.histeq:
            salMap = hist_equalize_maps(fixMap, salMap)

    # normalize and save the saliency map to disk
    normSalMap = (255.0 / (salMap.max() - salMap.min()) *
                  (salMap - salMap.min())).astype(np.uint8)
    misc.imsave(outPath, normSalMap)
Exemplo n.º 19
0
def load_model(lang):
    """
        Loads the model from file and returns it.
        Models are stored in the resources directory
    """
    print "Using", lang, "linear model.."
    if lang:
        return linu.load_model(os.path.join(
                                os.path.abspath(os.path.dirname(__file__)),
                                'resources',
                                lang + '_sentiment.model'))
    else:
        raise Exception
def eDNsaliency(imgPath, outPath, opts):
    # read image
    img = misc.imread(imgPath)

    # read eDN model(s)
    descFile = open(opts.descPath, 'r')
    desc = pickle.load(descFile)
    descFile.close()

    nFeatures = np.sum([d['desc'][-1][0][1]['initialize']['n_filters']
                    for d in desc if d != None])

    # load SVM model and whitening parameters
    svm = load_model(opts.svmPath)
    f = open(opts.whitePath, 'r')
    whiteParams = np.asarray([map(float, line.split(' ')) for line in f]).T
    f.close()

    # assemble svm model
    svmModel = {}
    svmModel['svm'] = svm
    svmModel['whitenParams'] = whiteParams

    biasToCntr = (svm.get_nr_feature()-nFeatures) == 1

    # compute saliency map
    model = EDNSaliencyModel(desc, svmModel, biasToCntr)
    salMap = model.saliency(img, normalize=False)

    salMap = salMap.astype('f')
    if not opts.noBlur:
        salMap = ndimage.gaussian_filter(salMap, sigma=30)

    # read fixation map / empirical saliency map
    if opts.fixMap:
        fixMap = misc.imread(opts.fixMap)

        # compute AUC
        if opts.auc:
            auc = evaluate_sal_map(salMap, fixMap)
            logging.info("AUC = %f" % auc)

        # for fair visual comparison, perform histogram equalization with
        # empirical saliency map
        if opts.histeq:
            salMap = hist_equalize_maps(fixMap, salMap)

    # normalize and save the saliency map to disk
    normSalMap = (255.0 / (salMap.max()-salMap.min()) *
                 (salMap-salMap.min())).astype(np.uint8)
    misc.imsave(outPath, normSalMap)
Exemplo n.º 21
0
def predict_liblinear(args):
    model_name, files = args[0], args[1:]
    model = load_model(model_name)
    file_objs = [open(fn) for fn in files]
    while True:
        lines = [f.readline() for f in file_objs]
        if '' in lines:
            assert len(set(lines)) == 1
            break
        vector = [float(line.strip()) for line in lines]
        xi, max_idx = gen_feature_nodearray(vector)
        label = liblinear.predict(model, xi)
        #p_label, p_acc, p_val = predict([1], [vector], model)
        print class_to_num(label)
Exemplo n.º 22
0
def predict_liblinear(args):
    model_name, files = args[0], args[1:]
    model = load_model(model_name)
    file_objs = [open(fn) for fn in files]
    while True:
        lines = [f.readline() for f in file_objs]
        if '' in lines:
            assert len(set(lines)) == 1
            break
        vector = [float(line.strip()) for line in lines]
        xi, max_idx = gen_feature_nodearray(vector)
        label = liblinear.predict(model, xi)
        #p_label, p_acc, p_val = predict([1], [vector], model)
        print class_to_num(label)
Exemplo n.º 23
0
 def LoadModel(self):
     if(self.classifierType == "SVM" and self.packageType == "liblinear"):
         from liblinearutil import load_model
         self.classifierModel = load_model(self.featuresSerializationFileName)
     elif(self.classifierType == "DecisionTree" and self.packageType == "nltk"):
         # Load the model
         serializationFile = open(self.featuresSerializationFileName, 'rb')
         
         self.classifierModel = pickle.load(serializationFile)
         
         serializationFile.close()
     else:
         print("Only SVM with liblinear is supported to LoadModel")
     
     '''
Exemplo n.º 24
0
 def LoadModel(self, model_dir):
     # load config
     input = codecs.open(os.path.join(model_dir, "config.json"), "r", "utf-8")
     config_json = json.load(input)
     input.close()
     self.attrs = config_json["attrs"]
     # load feature
     self.feature = attr_feature()
     self.feature.load_Lexicon(os.path.join(model_dir, config_json["feature_lexicon_file"]))
     if not self.feature.is_set:
         raise Exception("Fail to load feature module!")
         # load svm model
     for attr in self.attrs:
         self.models[attr] = load_model(os.path.join(model_dir, "%s.svm.m" % (attr)))
     self.is_set = True
Exemplo n.º 25
0
    def LoadModel(self):
        if (self.classifierType == "SVM" and self.packageType == "liblinear"):
            from liblinearutil import load_model
            self.classifierModel = load_model(
                self.featuresSerializationFileName)
        elif (self.classifierType == "DecisionTree"
              and self.packageType == "nltk"):
            # Load the model
            serializationFile = open(self.featuresSerializationFileName, 'rb')

            self.classifierModel = pickle.load(serializationFile)

            serializationFile.close()
        else:
            print("Only SVM with liblinear is supported to LoadModel")
        '''
Exemplo n.º 26
0
	def LoadModel(self, model_dir):
		# load config
		input = codecs.open(os.path.join(model_dir,'config.json'), 'r', 'utf-8')
		config_json = json.load(input)
		input.close()
		self.slots = config_json['slots']
		# load ontology
		self.ontology_file = os.path.join(model_dir,config_json['ontology_file'])
		self.tagsets = ontology_reader.OntologyReader(self.ontology_file).get_tagsets()
		# load feature
		self.feature = feature(self.tagsets)
		self.feature.load_Lexicon(os.path.join(model_dir,config_json['feature_lexicon_file']))
		if not self.feature.is_set:
			raise Exception('Fail to load feature module!')
		# load svm model
		for slot in self.slots:
			self.models[slot] = load_model(os.path.join(model_dir, '%s.svm.m' %(slot)))
		self.is_set = True
Exemplo n.º 27
0
    def predictTraitValue(self, imagelist_file='image_list.txt', class_label=1, outfile='output', norm=-1, debug=False):
        """
        Calls external functions to obtain predicted value between 0 and 1 for given image, updates class values
        Args:
            imagelist_file: file that contains the list of image/images to be processed
            class_label: label given images as either positive or negative for SVM classification
            outfile: filename of output for Dense SIFT analysis
        Return: True if successful
        """
        with open(imagelist_file, 'w') as f:
                f.write('tmp.jpg\n')
        cv2.imwrite('tmp.jpg', self.norm_image)

        hog_histogram(imagelist_file, class_label, outfile, norm, debug)

        prob_y, prob_x = svm_read_problem(outfile)
        model_file = models[self.trait]
        model = load_model(model_file)
        self.p_val = predict(prob_y, prob_x, model)[2][0][0]

        return True
Exemplo n.º 28
0
def liblinear_predict(problem_filepath, model_filepath):
    """
    Using LibLinear to predict result of a problem

    Returns
    -------
        (ids, labels)
    """

    # Reading a problem
    ids, x = liblinearutil.svm_read_problem(problem_filepath)

    print "len(x) = ", len(x)

    # Preparing a model
    model = liblinearutil.load_model(model_filepath)

    # Predicting
    y = [-2] * len(x)
    p_label, p_acc, p_val = liblinearutil.predict(y, x, model)

    return (ids, p_label)
Exemplo n.º 29
0
 def load(self, modelname, featuresname):
     self._svm_model = svm.load_model(modelname)
     self._features.load(open(featuresname, 'rb'))
Exemplo n.º 30
0
 def load(self, model_path):
     """Loads the model from the directory at the given path."""
     self.__feature_extractor = pickle.load(
         open(os.path.join(model_path, "feature_extractor"), "rb"))
     self.__liblinear_model = liblinearutil.load_model(
         os.path.join(model_path, "liblinear_model"))
Exemplo n.º 31
0
def readScales(scalefile):
    scales = {}
    with open(scalefile) as f:
        for line in f:
            k, v = line.strip().split("\t")
            scales[int(k)] = float(v)
        f.close()
    return scales


brnclst = utils.readMetaOptimizeBrownCluster()
embeddings = utils.readMetaOptimizeEmbeddings()
brnspace = initBrnSpace()
scales_shallow = readScales(SHALLOWSCALEFILE)
scales_neuralbrn = readScales(NEURALBRNSCALEFILE)
model_shallow = ll.load_model(SHALLOWMODELFILE)
model_neuralbrn = ll.load_model(NEURALBRNMODELFILE)


def simpleScale(x, trainmaxes=None):
    maxes = trainmaxes if trainmaxes != None else {}
    if trainmaxes == None:
        for itemd in x:
            for k, v in itemd.items():
                if k not in maxes or maxes[k] < abs(v): maxes[k] = abs(v)
    newx = []
    for itemd in x:
        newd = dict.fromkeys(itemd)
        for k, v in itemd.items():
            if k in maxes and maxes[k] != 0: newd[k] = (v + 0.0) / maxes[k]
            else: newd[k] = 0.0
Exemplo n.º 32
0
def predict(instance_file, model_file, param):
    y, x = ll.svm_read_problem(instance_file)
    prob = ll.problem(y, x)
    m = ll.load_model(model_file)
    dist = ll.predict(y, x, m, param)[2]
    print dist[0]
Exemplo n.º 33
0
Arquivo: pos.py Projeto: Adderty/Pyrus
	def load(self, modelname, featuresname):
		self._svm_model = svm.load_model(modelname)
		self._features.load(open(featuresname, 'rb'))
Exemplo n.º 34
0
 def load(self, model_path):
     """Loads the model from the directory at the given path."""
     self.__feature_extractor = pickle.load(
         open(os.path.join(model_path, "feature_extractor"), "rb"))
     self.__liblinear_model = liblinearutil.load_model(
         os.path.join(model_path, "liblinear_model"))
Exemplo n.º 35
0
def predict(input_file, model0_file, model1_file, mapping_file, output_file):
    global bos, eos
    out = open(output_file, 'w')
    m0 = ll.load_model(model0_file)
    m1 = ll.load_model(model1_file)
    mapping = Mapping(mapping_file)
    bos = mapping.map_pos('BOS')
    eos = mapping.map_pos('EOS')

    print '# of features:', len(mapping.feature_dict)

    # for easier mapping from neighbouring pos tags to features
    p_f = {}
    for i in mapping.pos_dict_rev: #[0, 1, 2, 3, ...] bos and eos are also included
        pi = mapping.map_pos_rev(i)
        p_f[(-1, i)] = mapping.map_features('POS_P1:%s' % pi)
        p_f[(-2, i)] = mapping.map_features('POS_P2:%s' % pi)
        p_f[(+1, i)] = mapping.map_features('POS_N1:%s' % pi)
        p_f[(+2, i)] = mapping.map_features('POS_N2:%s' % pi)


        for j in mapping.pos_dict_rev:
            pj = mapping.map_pos_rev(j)
            p_f[(-1, -2, i, j)] = mapping.map_features('POS_P1_P2:%s_%s' % (pi, pj))
            p_f[(+1, +2, i, j)] = mapping.map_features('POS_N1_N2:%s_%s' % (pi, pj))
            p_f[(-1, +1, i, j)] = mapping.map_features('POS_P1_N1:%s_%s' % (pi, pj))

    s0, s1, s2 = 0, 0, 0
    total = 0
    for sent in read_sentence(input_file):
        x_ = []
        g_ = []
        for t in sent:
            feat = t.maxent_features(mapping.map_features)
            x_.append(feat)
            g_.append(mapping.map_pos(t.gold_pos))
        y_0 = map(int, ll.predict([], [{k : 1 for k in f} for f in x_], m0, '-q')[0])

        # y_2 = [choice(xrange(1, len(mapping.pos_dict))) for i in y_1]

        y_1, y_2 = inference(m1, p_f, y_0[:], x_, propose_deterministic)

        # y_2 = inference(m1, p_f, y_1[:], x_, propose_probabilistic)

        for y, y0, y1, y2, t in zip(g_, y_0, y_1, y_2, sent):
            if y == y0:
                s0 += 1
            if y == y1:
                s1 += 1
            if y == y2:
                s2 += 1
            total += 1

            p0 = mapping.map_pos_rev(y0)
            p1 = mapping.map_pos_rev(y1)
            p2 = mapping.map_pos_rev(y2)

            out.write('%s\t%s\n' % (t.word, p1))
        out.write('\n')

    out.close()
    print 'acc 0: %d / %d = %.4f' % (s0, total, s0 / total)
    print 'acc 1: %d / %d = %.4f' % (s1, total, s1 / total)
    print 'acc 2: %d / %d = %.4f' % (s2, total, s2 / total)
Exemplo n.º 36
0
    def __init__(self):
        
        ## load SIFT feature extractor
        if cv2.__version__[0] == '2':
            self.sift_extractor = cv2.DescriptorExtractor_create("SIFT")
        else:
            self.sift_extractor = cv2.xfeatures2d.SIFT_create()
        module_path = os.path.dirname(os.path.realpath(__file__))
        self.model_path = os.path.join(module_path, 'model')
        ## construct face detector
        self.face_detector = {}
        self.face_detector['detector'] = cv2.CascadeClassifier(os.path.join(self.model_path, 'haarcascade_frontalface_alt2.xml'))
        self.face_detector['minNeighbors'] = 4
        self.face_detector['minSize'] = (20, 20)
        # confidence LBP
        transform_svm_mat2file(os.path.join(self.model_path, 'confidence_face_LBP.mat'))
        self.face_detector['confidence_LBP'] = {'model':load_model(os.path.join(self.model_path, 'confidence_face_LBP')), 
                                            'thre': 0.01}                                       
        # confidence SIFT
        matdata = loadmat(os.path.join(self.model_path, 'meanImg_sing_20.mat'))
        kpt_small = []
        kpt_small.append(cv2.KeyPoint(20, 20, 3.1, -1, 1, 0, 1))
        descriptors_mean_small = np.zeros((1, 0))
        for idx_pt in np.arange(17, 68):
            _, temp = self.sift_extractor.compute(matdata['meanImg'][idx_pt]['roi'], kpt_small)
            descriptors_mean_small = np.concatenate((descriptors_mean_small, temp.reshape((1, -1))), axis=1)
        # Lower the confidence, higher the tolerance.  
        # For this version, we can set different threhsolds for detection and tracking phases. 
        # For near-frontal face, the confidence is about 0.65. 
        # For non-frontal faces the confidence is about 0.55. 
        self.face_detector['confidence_SIFT'] = {'descriptor':descriptors_mean_small, 
                                                 'thre_detect': 0.56,  
                                                 'thre_track': 0.46}     
        ## construct landmark detector/tracker
        self.face_lmks_model = {}

        prior = loadmat(os.path.join(self.model_path, 'Prior.mat'))
        mm = prior['mm'].reshape(-1, 2)
        self.face_lmks_model['mm'] = mm[range(17, 68), :].flatten()
        

        self.face_lmks_model['num_pts'] = 51 
        self.face_lmks_model['num_iter'] = 4
        self.face_lmks_model['norm_width'] = 200
        self.face_lmks_model['margin'] = 50

        self.face_lmks_model['para_detect'] = {}
        self.face_lmks_model['para_track'] = {}

        for it in range(self.face_lmks_model['num_iter']):
            x = loadmat(os.path.join(self.model_path, 'Detect_it%d.mat'%(it+1)))
            self.face_lmks_model['para_detect'][it] = x
            x = loadmat(os.path.join(self.model_path, 'Tracking_it%d.mat'%(it+1)))
            self.face_lmks_model['para_track'][it] = x

        ## face motion parameters
        self.face_motion = {'queue_size': 60,
                            'threshold': 10,
                            'queue':100*np.random.normal(size=(60, self.face_lmks_model['num_pts']*2))}

        ## construct eye detector
        self.eye_detector_SDM = {}
        self.eye_detector_SDM['norm_width'] = 25
        self.eye_detector_SDM['num_pts'] = 27
        self.eye_detector_SDM['num_iter'] = 3
        matdata = loadmat(os.path.join(self.model_path, 'leftPara_20160516.mat'))
        mmc = matdata['leftmm'].reshape((-1, 2))
        pair_set = np.array([[i, j] for i in np.arange(self.eye_detector_SDM['num_pts']) for j in np.arange(i+1, self.eye_detector_SDM['num_pts'])])
        pt1 = mmc[pair_set[:, 0], :]
        pt2 = mmc[pair_set[:, 1], :]
        mmshapefea1 = (pt1 - pt2).reshape((1, -1))
        mmshapefea2 = np.sqrt(np.sum((pt1 - pt2)**2, axis=1))
        self.eye_detector_SDM['pair_set'] = pair_set
        self.eye_detector_SDM['mmshapefea1'] = mmshapefea1
        self.eye_detector_SDM['mmshapefea2'] = mmshapefea2
        #for it in range(self.eye_detector_SDM['num_iter']):
        self.eye_detector_SDM['para'] = matdata['leftPara']
        self.eye_detector_SDM['mm'] = matdata['leftmm']
Exemplo n.º 37
0
def readScales(scalefile):
    scales = {}
    with open(scalefile) as f:
        for line in f:
            k,v = line.strip().split("\t")
            scales[int(k)] = float(v)
        f.close()
    return scales

brnclst = utils.readMetaOptimizeBrownCluster()
embeddings = utils.readMetaOptimizeEmbeddings()
brnspace = initBrnSpace()
scales_shallow = readScales(SHALLOWSCALEFILE)
scales_neuralbrn = readScales(NEURALBRNSCALEFILE)
model_shallow = ll.load_model(SHALLOWMODELFILE)
model_neuralbrn = ll.load_model(NEURALBRNMODELFILE)

def simpleScale(x, trainmaxes=None):
    maxes = trainmaxes if trainmaxes!=None else {}
    if trainmaxes == None:
        for itemd in x:
            for k,v in itemd.items():
                if k not in maxes or maxes[k] < abs(v): maxes[k] = abs(v)
    newx = []
    for itemd in x:
        newd = dict.fromkeys(itemd)
        for k,v in itemd.items():
            if k in maxes and maxes[k] != 0: newd[k] = (v+0.0)/maxes[k]
            else: newd[k] = 0.0
        newx.append(newd)
Exemplo n.º 38
0
import os
pkg_path = os.environ["geoloc"]
import ujson as json
import xmlrpclib
from geoloc.adapters import twitter_adapter, feature_adapter, city_adapter
from geoloc.util import lib_grid_search, gcd_dist, lib_log
import liblinearutil

fea_num = city_adapter.get_feature_number()
#NOTE: Only text, loc, tz models are adopted, as they achieve comparable performance, but consumes much less memory than using the full model.
text_decoder = xmlrpclib.ServerProxy("http://localhost:9001")
loc_decoder = xmlrpclib.ServerProxy("http://localhost:9002")
tz_decoder = xmlrpclib.ServerProxy("http://localhost:9003")
#desc_decoder = xmlrpclib.ServerProxy("http://localhost:9004")
#rname_decoder = xmlrpclib.ServerProxy("http://localhost:9005")
stacked_model = liblinearutil.load_model(
    '{0}/models/world.l1.train.model.text_loc_tz'.format(pkg_path))
#stacked_model = liblinearutil.load_model('{0}/models/world.l1.train.model.text_loc_tz_desc_rname'.format(pkg_path))
print "Geolocation models loaded"


def seek_cache(sname):
    """    Seek on disk cache (predicted GT-JSON)    """
    jobj = None
    try:
        assert (sname)
        cache_file = "{0}/cache/{1}".format(pkg_path, sname)
        jstr = open(cache_file).readline()
        jobj = json.loads(jstr.rstrip())
    except (AssertionError, IOError, ValueError):
        return None
    return jobj
Exemplo n.º 39
0
pkg_path = os.environ["geoloc"]
import ujson as json
import xmlrpclib
from geoloc.adapters import twitter_adapter, feature_adapter, city_adapter
from geoloc.util import lib_grid_search, gcd_dist, lib_log
import liblinearutil


fea_num = city_adapter.get_feature_number()
#NOTE: Only text, loc, tz models are adopted, as they achieve comparable performance, but consumes much less memory than using the full model.
text_decoder = xmlrpclib.ServerProxy("http://localhost:9001")
loc_decoder = xmlrpclib.ServerProxy("http://localhost:9002")
tz_decoder = xmlrpclib.ServerProxy("http://localhost:9003")
#desc_decoder = xmlrpclib.ServerProxy("http://localhost:9004")
#rname_decoder = xmlrpclib.ServerProxy("http://localhost:9005")
stacked_model = liblinearutil.load_model('{0}/models/world.l1.train.model.text_loc_tz'.format(pkg_path))
#stacked_model = liblinearutil.load_model('{0}/models/world.l1.train.model.text_loc_tz_desc_rname'.format(pkg_path))
print "Geolocation models loaded"


def seek_cache(sname):
    """    Seek on disk cache (predicted GT-JSON)    """
    jobj = None
    try:
        assert(sname)
        cache_file = "{0}/cache/{1}".format(pkg_path, sname)
        jstr = open(cache_file).readline()
        jobj = json.loads(jstr.rstrip())
    except (AssertionError, IOError, ValueError):
        return None
    return jobj
Exemplo n.º 40
0
 def train(self):
     if os.path.isfile("svm.model") and self.useModel:
         self.model = llu.load_model("svm.model")
     else:
         self.model = llu.train(self.ys, self.xs, self.train_param)
         llu.save_model("svm.model", self.model)
Exemplo n.º 41
0
Arquivo: svm.py Projeto: Imperat/Pyrus
 def load(path):
     obj = ml.Classifier.load(path)
     obj._model = liblinear.load_model(path + "-model")
     return obj
Exemplo n.º 42
0
def compute_eDN_saliency_map(
        img,
        no_normalization=True,
        eDN_desc_path='edn_cvpr2014/slmBestDescrCombi.pkl',
        eDN_svm_path='edn_cvpr2014/svm-slm',
        eDN_whitening_params_path='edn_cvpr2014/whiten-slm',
        verbose=False,
        **kwargs):
    # determine the image dimensions
    proportions = float(img.shape[1]) / img.shape[0]
    if abs(proportions - 2.0) < 1e-5:
        # 2:1
        insize = (768, 384)
    elif abs(proportions - 1.0) < 1e-5:
        # 1:1
        insize = (512, 512)
    elif abs(proportions - 6.0 / 5.0) < 1e-5:
        # 6:5
        insize = (600, 500)
    elif abs(proportions - 4.0 / 3.0) < 1e-5:
        # 4:3
        insize = (600, 450)
    else:
        # default, 4:3, smaller
        insize = (512, 384)
    if verbose:
        print >> sys.stderr, 'Choosing internal image resolution of', insize
    # read eDN model(s)
    descFile = open(eDN_desc_path, 'r')
    desc = pickle.load(descFile)
    descFile.close()

    nFeatures = np.sum([
        d['desc'][-1][0][1]['initialize']['n_filters'] for d in desc
        if d != None
    ])

    # load SVM model and whitening parameters
    svm = load_model(eDN_svm_path)
    f = open(eDN_whitening_params_path, 'r')
    white_params = np.asarray([map(float, line.split(' ')) for line in f]).T
    f.close()

    # assemble svm model
    svm_model = {}
    svm_model['svm'] = svm
    svm_model['whitenParams'] = white_params

    bias_to_center_validation = (svm.get_nr_feature() - nFeatures) == 1
    assert bias_to_center_validation == False

    # compute saliency map
    model = EDNSaliencyModel(descriptions=desc,
                             svmModel=svm_model,
                             biasToCntr=False,
                             insize=insize)
    sal_map = model.saliency(img, normalize=False)

    sal_map = sal_map.astype('f')

    if not no_normalization:
        sal_map = (255.0 / (sal_map.max() - sal_map.min()) *
                   (sal_map - sal_map.min())).astype(np.uint8)
    return sal_map
Exemplo n.º 43
0
 def load(self, model_file):
     model = liblinearutil.load_model(model_file)
Exemplo n.º 44
0
os.environ['GLOG_minloglevel'] = '3'  # Suppress logging.

if __name__ == "__main__":
    desc_file_path = 'slmBestDescrCombi.pkl'
    with open(desc_file_path) as fp:
        desc = pickle.load(fp)

    nFeatures = np.sum([
        d['desc'][-1][0][1]['initialize']['n_filters'] for d in desc
        if d != None
    ])

    # load SVM model and whitening parameters
    svm_path = 'svm-slm-cntr'
    svm = load_model(svm_path)

    whiten_path = 'whiten-slm-cntr'
    with open(whiten_path) as fp:
        whitenParams = np.asarray([map(float, line.split(' '))
                                   for line in fp]).T

    # assemble svm model
    svmModel = {'svm': svm, 'whitenParams': whitenParams}

    biasToCntr = (svm.get_nr_feature() - nFeatures) == 1

    def eDNsaliency(image_path):
        img = misc.imread(image_path, mode='RGB')

        # compute saliency map
Exemplo n.º 45
0
 def load(path):
     obj = ml.Classifier.load(path)
     obj._model = liblinear.load_model(path + '-model')
     return obj
def multipule_eval_for_logistic(test_corpus_dir, feature_map_character, feature_map_numeric, feature_show, args):
    """
    liblinearのlogisticで作成したモデル(一文ごとにラベルを判断)を行う
    一回でも+1が発生すれば,文書にラベルが付与されたと見なす
    """
    env = "pine"
    if env == "pine":
        # change below by an environment
        libsvm_wrapper_path = "/home/kensuke-mi/opt/libsvm-3.17/python/"
    elif env == "local":
        libsvm_wrapper_path = "/Users/kensuke-mi/opt/libsvm-3.17/python/"
        liblinear_wrapper_path = "/Users/kensuke-mi/opt/liblinear-1.94/python/"
        sys.path.append(liblinear_wrapper_path)
    sys.path.append(libsvm_wrapper_path)
    import liblinearutil
    import svmutil

    if args.save_performance == True:
        performance_out = codecs.open("./performance_result." + args.experiment_no, "w", "utf-8")
        performance_out.write(args.experiment_no + u"\n")
        performance_out.write(u"-" * 30 + u"\n")

    # 確信度の閾値
    threshold = float(args.threshold)
    # 確信度を表示するか?オプション
    show_confidence = False
    # 確信度の平均値
    average_confidence = 0
    # +1のインスタンス数
    times_plus_1_ins = 0

    num_docs_having_motif = {}
    stop = args.stop
    tfidf_flag = args.tfidf
    exno = args.experiment_no

    model_dir_path = "../get_thompson_motif/classifier/logistic_2nd/"
    model_path_list = load_files(model_dir_path, "logistic." + exno)
    # 分類器が正しい判断をした回数を保存する.つまりCAP(gold motif tag, candidate by classifier)
    num_of_correct_decision = {}
    precision_sum = 0
    recall_sum = 0
    F_sum = 0
    h_loss_sum = 0
    subset_acc_sum = 0
    ex_p_sum = 0
    ex_r_sum = 0
    ex_f_sum = 0
    acc_sum = 0
    classifier_return_1_sum = 0
    for test_file in load_files(test_corpus_dir):
        # ============================================================
        result_map = {}
        gold_map = {}
        # ------------------------------------------------------------
        if args.persian_test == True:
            # 文ごとにインスタンスの作成
            sentences_in_document, motif_stack = file_loader_sentence(test_file, stop)
        elif args.dutch_test == True:
            sentences_in_document, motif_stack = file_loader_dutch_sentence(test_file, stop)
        # ------------------------------------------------------------
        out_libsvm_format_sentence(
            sentences_in_document, feature_map_character, feature_map_numeric, feature_show, tfidf_flag
        )
        test_y, test_x = svmutil.svm_read_problem("test.data")
        # ------------------------------------------------------------
        for model_file in model_path_list:
            decision_flag = False
            alphabet_label = unicode(os.path.basename(model_file)[0], "utf-8")
            result_map[alphabet_label] = 0

            model = liblinearutil.load_model(model_file)
            p_label, p_acc, p_val = liblinearutil.predict(test_y, test_x, model, "-b 1")

            for index, result_label in enumerate(p_label):
                if result_label == 1.0:
                    decision_flag = True
                # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                if decision_flag == True and p_val[index][0] > threshold:
                    result_map[alphabet_label] = 1
                # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        # ------------------------------------------------------------
        for gold_motif in motif_stack:
            alphabet_label = gold_motif[0]
            gold_map[alphabet_label] = 1
        # ------------------------------------------------------------
        gold_cap_result = {}
        for label in result_map:
            if result_map[label] == 1 and label in gold_map:
                gold_cap_result[label] = 1
        # ------------------------------------------------------------
        try:
            average = average_confidence / times_plus_1_ins
        except ZeroDivisionError:
            average = 0

        print "-" * 30
        print "Filename:{}\nEstimated:{}\nGold:{}\nCorrect Estimation:{}".format(
            test_file, result_map, gold_map, gold_cap_result
        )
        print "average confidence is {}".format(average)
        print "-" * 30
        # ------------------------------------------------------------
        h_loss_sum = calc_h_loss(result_map, gold_map, h_loss_sum)
        subset_acc_sum = calc_subset_acc(result_map, gold_map, subset_acc_sum)
        ex_p_sum, ex_r_sum, ex_f_sum, acc_sum = calc_p_r_f(result_map, gold_map, ex_p_sum, ex_r_sum, ex_f_sum, acc_sum)
        classifier_return_1_sum += get_the_num_of_1_classifier(result_map)
    # ============================================================
    num_of_files = len(load_files(test_corpus_dir))
    h_loss = h_loss_sum / num_of_files
    subset_acc = float(subset_acc_sum) / num_of_files
    ex_p = ex_p_sum / num_of_files
    ex_r = ex_r_sum / num_of_files
    ex_f = ex_f_sum / num_of_files
    acc = acc_sum / num_of_files
    classifier_return_1 = float(classifier_return_1_sum) / num_of_files
    precision_ave = precision_sum / len(load_files(test_corpus_dir))
    recall_ave = recall_sum / len(load_files(test_corpus_dir))
    F_ave = F_sum / len(load_files(test_corpus_dir))
    print "-" * 30
    print "RESULT for {} files classification".format(len(load_files(test_corpus_dir)))

    hamming_format = u"Hamming Loss:{}".format(h_loss)
    subset_format = u"Subset Accuracy(classification accuracy):{}".format(subset_acc)
    else_format = u"example-based precision:{} example-based recall:{} example-based F:{} accuracy:{}".format(
        ex_p, ex_r, ex_f, acc
    )
    classifier_format = u"Ave. number of classifier which returns 1:{}".format(classifier_return_1)
    print hamming_format
    print subset_format
    print else_format
    print classifier_format

    if args.save_performance == True:
        performance_out.write(hamming_format + u"\n")
        performance_out.write(subset_format + u"\n")
        performance_out.write(else_format + u"\n")
        performance_out.write(classifier_format + u"\n")
        performance_out.close()
Exemplo n.º 47
0
def init_model(filename="model"):
	return svm.load_model(filename)
 def __init__(self, model_filename, features_filename, output_probability=False):
     self.model = liblinearutil.load_model(model_filename)
     self.feature_indices = self._load_features(features_filename)
     self.output_probability = output_probability