def preparation(x_list, y_list,feature_symbols,Func = None, ref = None, **kwargs):
    """
    feature symbols are a list of interested feature names (Not with the prefix g_ or H_ )
    """
    global perparation_called_times
    features.DT = 0.1
    print(perparation_called_times)
    xy_vector = np.array(list(x_list) + list(y_list))

    assert(len(x_list)==len(y_list))
    # ref = ref_bottom if x_list[0]<1000 else ref_top
    ref = ref_select(x_list,y_list) if ref is None else ref
    if(Func is None):
        feature = Features(vec=xy_vector, referenceCurv=ref,**kwargs)
    else:
        feature = featureFuncWrapper(vec=xy_vector,  Func = Func, referenceCurv=ref,**kwargs)
    ftrvalues = [np.array([feature.featureValue(f) for f in feature_symbols])]
    return_list = []
    for k in feature_symbols:
        print("symbol:",k)
        g,H,N = feature.featureGradJacobNormalizer(k)
        return_list.append(g)
        return_list.append(H)
        return_list.append(N)
    perparation_called_times +=1
    return return_list, ftrvalues
示例#2
0
def test():
    f = Features(**PARAMS)
    for i in range(300,400):
        path = 'test/'+str(i)+'.png'
        im = Image(path)
        f.set_image(im)
        f.extract_blobs()
示例#3
0
 def __init__(self, host_tcp_port, host_address, mode):
     self.mode = mode
     self.locally_reserved_ports = []
     self.local_udp_port = 0
     self.client_ip = ""
     self.client_udp_port = 0
     self.client_address = (self.client_ip, self.client_udp_port)
     self.host_ip = host_address
     self.host_tcp_port = host_tcp_port
     self.host_udp_port = 0
     self.host_address = (self.host_ip, self.host_udp_port)
     self.server_udp_socket = socket(AF_INET, SOCK_DGRAM)
     self.server_tcp_socket = socket(AF_INET, SOCK_STREAM)
     self.tcp_connection_socket = socket(AF_INET, SOCK_STREAM)
     self.multipart_message_buffer = ""
     self.multipart_message_handling_over = True
     self.features = Features()
     if mode == 'client':
         self.   add_multipart()
         self.init_connections()
         #self.udp_client_socket = socket(AF_INET, SOCK_DGRAM)
         self.start_udp_communication()
     if mode == 'proxy':
         self.init_connections()
         self.start_proxy_service()
         self.client_ip = ('', 0)
示例#4
0
def test():
    f = Features(**PARAMS)
    minx,maxx = 0,0
    miny,maxy = 0,0
    while True:
        path = 'test/tmp.png'
        screenshot(path, region=REGION)
        im = Image(path)
        f.set_image(im)
        blobs = f.extract_blobs()
        if not blobs[0]:
            break
        dl = f.small.dl()
        bottom_right_corner = blobs[0].bottomRightCorner()
        dl.circle(bottom_right_corner, 5, Color.RED)
        if blobs[1]:
            top_right = blobs[1].topRightCorner()
            x = bottom_right_corner[0] - top_right[0]
            y = bottom_right_corner[1] - top_right[1]
            if x < minx:
                minx = x
            elif x > maxx:
                maxx = x
            if y < miny:
                miny = y
            elif y > maxy:
                maxy = y
            dl.circle(top_right, 5, Color.RED)
        f.small.show()
    print 'minx, maxx', minx, maxx
    print 'miny, maxy', miny, maxy
示例#5
0
文件: bovw.py 项目: apacha/mscr
 def __init__(self, feat=SURF(), cls=RF(n_estimators=40), verbose=True):
     self._ft = Features(feat)
     self._da = Data(self._ft)
     self._fm = FitBoVW(cls)
     self._verbose = verbose
     self._cl = cls
     self._vq = None
 def __init__(self, feature_extractor, parameters, unique_templates):
     self.feature_extractor = feature_extractor
     self.parameters = parameters
     self.unique_templates = unique_templates
     feature_count = len(feature_extractor.ordered_features)
     self.features = Features(feature_extractor.ordered_features,
                              [0 for _ in range(feature_count)])
示例#7
0
文件: model.py 项目: awav/carnd-p5
 def __init__(self, mode='svm'):
     if mode != 'svm' and mode != 'xgboost':
         raise ValueError('Unknown mode for CarModel')
     self._f = Features()
     self._model = None
     self._mode = mode
     self.input_shape = None
示例#8
0
def read_images():
    galaxy_ids = get_galaxy_ids()
    for galaxy_id in galaxy_ids:
        image_file = '.'.join([str(galaxy_id), 'jpg'])
        feature_vector = [galaxy_id]
        features = Features(os.path.join(cfg.TRAIN_IMAGE_DIR, image_file))
        feature_vector.extend(features.get_feature_vector())
        yield feature_vector
示例#9
0
 def __init__(self, cls_path=None):
     """ Init function"""
     if cls_path is not None:
         self.cls = load_cls(cls_path)
     else:
         self.cls = None
     self.feat = Features()
     self.percent_training = 80
示例#10
0
class CRFTest(unittest.TestCase):
    def setUp(self):
        # 訓練データ
        self.train_word_data = [["Peter", "Daniel", "Blackburn"], ["1966", "World", "Cup"]]
        self.train_pos_data = [["NNP", "NNP", "NNP"], ["CD", "NNP", "NNP"]]
        word_set = set([word for words in self.train_word_data for word in words])
        self.pos_set = set([pos for poses in self.train_pos_data for pos in poses])
        self.features = Features(list(self.pos_set))
        self.features.create_feature(word_set)
        # テストデータ
        self.test_word_data =  [["Peter", "Daniel", "Blackburn"], ["1980", "World"]]

    def test_predict(self):
        """
        系列x_listに対して,予測させた系列ラベルy_listの長さが一致している
        """
        predicts = []
        crf = CRF(self.features, self.pos_set)
        for x_list in self.test_word_data:
            predict = crf.predict(x_list)
            self.assertEqual(len(predict), len(x_list))

    def test_forward_backward_algorithm(self):
        """
        前向きアルゴリズムと後ろ向きアルゴリズムで求めた状態和が一致する
        """
        crf = CRF(self.features, self.pos_set)
        for x_list in self.test_word_data:
            crf.predict(x_list)
            alpha_z = round(crf._forward_algorithm(x_list)[-1]["<EOS>"], 3)
            beta_z = round(crf._backward_algorithm(x_list)[0]["<BOS>"], 3)
            self.assertEqual(alpha_z, beta_z)

    def test_marginal_probability(self):
        """
        各時点m-1からmへのラベル遷移の確率の合計が1になる
        """
        crf = CRF(self.features, self.pos_set)
        x_list = self.test_word_data[0]
        crf.predict(x_list)
        crf._forward_algorithm(x_list)
        crf._backward_algorithm(x_list)
        for m in range(1, len(x_list)):
            p_sum = 0
            for i_label in self.pos_set:
                for j_label in self.pos_set:
                    p_sum += crf._marginal_probability(x_list, i_label, j_label, m)
            self.assertEqual(1, round(p_sum))

    def test_fit(self):
        """
        学習前と学習後で重みが異なる
        """
        crf = CRF(self.features, self.pos_set)
        before_w = copy.deepcopy(crf.w_lambda)
        crf.fit(self.train_word_data, self.train_pos_data)
        current_w = copy.deepcopy(crf.w_lambda)
        self.assertNotEqual(np.sum(before_w), np.sum(current_w))
示例#11
0
class FeaturesPerceptronRanker(BasePerceptronRanker):
    """Base class for global ranker for whole trees, based on features."""
    def __init__(self, cfg):
        super(FeaturesPerceptronRanker, self).__init__(cfg)
        if not cfg:
            cfg = {}
        self.feats = ['bias: bias']
        self.vectorizer = None
        self.normalizer = None
        self.binarize = cfg.get('binarize', False)
        # initialize feature functions
        if 'features' in cfg:
            self.feats.extend(cfg['features'])
        self.feats = Features(self.feats, cfg.get('intermediate_features', []))

    def _extract_feats(self, tree, da):
        feats = self.vectorizer.transform(
            [self.feats.get_features(tree, {'da': da})])
        if self.normalizer:
            feats = self.normalizer.transform(feats)
        return feats[0]

    def _init_training(self, das_file, ttree_file, data_portion):

        super(FeaturesPerceptronRanker,
              self)._init_training(das_file, ttree_file, data_portion)

        # precompute training data features
        X = []
        for da, tree in zip(self.train_das, self.train_trees):
            X.append(self.feats.get_features(tree, {'da': da}))
        if self.prune_feats > 1:
            self._prune_features(X)
        # vectorize and binarize or normalize (+train vectorizer/normalizer)
        if self.binarize:
            self.vectorizer = DictVectorizer(sparse=False,
                                             binarize_numeric=True)
            self.train_feats = self.vectorizer.fit_transform(X)
        else:
            self.vectorizer = DictVectorizer(sparse=False)
            self.normalizer = StandardScaler(copy=False)
            self.train_feats = self.normalizer.fit_transform(
                self.vectorizer.fit_transform(X))

        log_info('Features matrix shape: %s' % str(self.train_feats.shape))

    def _prune_features(self, X):
        """Prune features – remove all entries from X that involve features not having a
        specified minimum occurrence count.
        """
        counts = defaultdict(int)
        for inst in X:
            for key in inst.iterkeys():
                counts[key] += 1
        for inst in X:
            for key in inst.keys():
                if counts[key] < self.prune_feats:
                    del inst[key]
示例#12
0
 def setUp(self):
     # 訓練データ
     self.train_word_data = [["Peter", "Daniel", "Blackburn"], ["1966", "World", "Cup"]]
     self.train_pos_data = [["NNP", "NNP", "NNP"], ["CD", "NNP", "NNP"]]
     word_set = set([word for words in self.train_word_data for word in words])
     self.pos_set = set([pos for poses in self.train_pos_data for pos in poses])
     self.features = Features(list(self.pos_set))
     self.features.create_feature(word_set)
     # テストデータ
     self.test_word_data =  [["Peter", "Daniel", "Blackburn"], ["1980", "World"]]
示例#13
0
文件: rank.py 项目: UFAL-DSG/tgen
class FeaturesPerceptronRanker(BasePerceptronRanker):
    """Base class for global ranker for whole trees, based on features."""

    def __init__(self, cfg):
        super(FeaturesPerceptronRanker, self).__init__(cfg)
        if not cfg:
            cfg = {}
        self.feats = ['bias: bias']
        self.vectorizer = None
        self.normalizer = None
        self.binarize = cfg.get('binarize', False)
        # initialize feature functions
        if 'features' in cfg:
            self.feats.extend(cfg['features'])
        self.feats = Features(self.feats, cfg.get('intermediate_features', []))

    def _extract_feats(self, tree, da):
        feats = self.vectorizer.transform([self.feats.get_features(tree, {'da': da})])
        if self.normalizer:
            feats = self.normalizer.transform(feats)
        return feats[0]

    def _init_training(self, das_file, ttree_file, data_portion):

        super(FeaturesPerceptronRanker, self)._init_training(das_file, ttree_file, data_portion)

        # precompute training data features
        X = []
        for da, tree in zip(self.train_das, self.train_trees):
            X.append(self.feats.get_features(tree, {'da': da}))
        if self.prune_feats > 1:
            self._prune_features(X)
        # vectorize and binarize or normalize (+train vectorizer/normalizer)
        if self.binarize:
            self.vectorizer = DictVectorizer(sparse=False, binarize_numeric=True)
            self.train_feats = self.vectorizer.fit_transform(X)
        else:
            self.vectorizer = DictVectorizer(sparse=False)
            self.normalizer = StandardScaler(copy=False)
            self.train_feats = self.normalizer.fit_transform(self.vectorizer.fit_transform(X))

        log_info('Features matrix shape: %s' % str(self.train_feats.shape))

    def _prune_features(self, X):
        """Prune features – remove all entries from X that involve features not having a
        specified minimum occurrence count.
        """
        counts = defaultdict(int)
        for inst in X:
            for key in inst.iterkeys():
                counts[key] += 1
        for inst in X:
            for key in inst.keys():
                if counts[key] < self.prune_feats:
                    del inst[key]
示例#14
0
class WafEnv_v0(gym.Env):
    metadata = {
        'render.modes': ['human', 'rgb_array'],
    }

    def __init__(self):
        self.action_space = spaces.Discrete(len(ACTION_LOOKUP))

        #xss样本特征集合
        #self.samples=[]
        #当前处理的样本
        self.current_sample = ""
        #self.current_state=0
        self.features_extra = Features()
        self.waf_checker = Waf_Check()
        #根据动作修改当前样本免杀
        self.xss_manipulatorer = Xss_Manipulator()

        self._reset()

    def _step(self, action):

        r = 0
        is_gameover = False
        #print "current sample:%s" % self.current_sample

        _action = ACTION_LOOKUP[action]
        #print "action is %s" % _action

        self.current_sample = self.xss_manipulatorer.modify(
            self.current_sample, _action)
        #print "change current sample to %s" % self.current_sample

        if not self.waf_checker.check_xss(self.current_sample):
            #给奖励
            r = 10
            is_gameover = True
            print "Good!!!!!!!avoid waf:%s" % self.current_sample

        self.observation_space = self.features_extra.extract(
            self.current_sample)

        return self.observation_space, r, is_gameover, {}

    def _reset(self):
        self.current_sample = random.choice(samples_train)
        print "reset current_sample=" + self.current_sample

        self.observation_space = self.features_extra.extract(
            self.current_sample)
        return self.observation_space

    def render(self, mode='human', close=False):
        return
示例#15
0
def print_quote(endpoint, human_input):
    with open(endpoint) as f:
        quotes = json.load(f)
    if human_input == '':
        print random.choice(quotes)
        print 0
        return
    f = Features(human_input, quotes)
    quote, score = f.getBestQuote()
    print quote
    print score
示例#16
0
def test_binary_class():
    stock_d = testdata()
    ti = TechnicalIndicators(stock_d)
    ti.calc_ret_index()

    ret_index = ti.stock['ret_index']
    f = Features()
    train_X, train_y = f.binary_class(ret_index, range=90)

    expected = [
        1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0,
        1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0,
        0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0,
        1, 1, 0
    ]
    for r, e in zip(train_y, expected):
        eq_(r, e)

    r = round(train_X[-1][-1], 5)
    expected = 1.35486
    eq_(r, expected)

    r = round(train_X[0][0], 5)
    expected = 1.19213
    eq_(r, expected)

    expected = 14
    r = len(train_X[0])
    eq_(r, expected)

    expected = 75
    r = len(train_X)
    eq_(r, expected)

    train_X, train_y = f.binary_class(ret_index)

    expected = 0
    eq_(train_y[0], expected)

    expected = 1
    eq_(len(train_y), expected)

    r = round(train_X[0][0], 5)
    expected = 1.30311
    eq_(r, expected)

    expected = 14
    r = len(train_X[0])
    eq_(r, expected)

    expected = 1
    r = len(train_X)
    eq_(r, expected)
示例#17
0
def predict():
    spam_detect = Spam_Detect()
    features_extract = Features(vocabulary_file)
    if request.method == 'POST':
        if 'train' in request.form:
            print('Predict and Train')
            train_nb_spam()
        email = request.form['email']
        data = [email]
        featurevectors = features_extract.extract(data)
        my_prediction = spam_detect.detect(featurevectors)
        return render_template('result.html', prediction=my_prediction)
示例#18
0
 def __init__(self, cfg):
     super(FeaturesPerceptronRanker, self).__init__(cfg)
     if not cfg:
         cfg = {}
     self.feats = ['bias: bias']
     self.vectorizer = None
     self.normalizer = None
     self.binarize = cfg.get('binarize', False)
     # initialize feature functions
     if 'features' in cfg:
         self.feats.extend(cfg['features'])
     self.feats = Features(self.feats, cfg.get('intermediate_features', []))
示例#19
0
def test_binary_class():
    stock_d = testdata()
    ti = TechnicalIndicators(stock_d)
    ti.calc_ret_index()

    ret_index = ti.stock['ret_index']
    f = Features()
    train_X, train_y = f.binary_class(ret_index, range=90)

    expected = [1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1,
                0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1,
                0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1,
                1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0,
                1, 1, 0]
    for r, e in zip(train_y, expected):
        eq_(r, e)

    r = round(train_X[-1][-1], 5)
    expected = 1.35486
    eq_(r, expected)

    r = round(train_X[0][0], 5)
    expected = 1.19213
    eq_(r, expected)

    expected = 14
    r = len(train_X[0])
    eq_(r, expected)

    expected = 75
    r = len(train_X)
    eq_(r, expected)

    train_X, train_y = f.binary_class(ret_index)

    expected = 0
    eq_(train_y[0], expected)

    expected = 1
    eq_(len(train_y), expected)

    r = round(train_X[0][0], 5)
    expected = 1.30311
    eq_(r, expected)

    expected = 14
    r = len(train_X[0])
    eq_(r, expected)

    expected = 1
    r = len(train_X)
    eq_(r, expected)
示例#20
0
def train_ctc_model(train_file, test_file):
    """ Function of training Code Recognizer """

    # training and test dataset (default)
    train_file = parameters_ctc['train_file']
    test_file = parameters_ctc['test_file']
    
    # extract features from two language models trained on Gigaword and StackOverflow
    features = Features(RESOURCES)
    train_tokens, train_features, train_labels = features.get_features(train_file, True)
    test_tokens, test_features, test_labels = features.get_features(test_file, False)
    
    # fastText embedding
    vocab_size, word_to_id, id_to_word, word_to_vec = get_word_dict_pre_embeds(train_file, test_file)
    train_ids, test_ids = get_train_test_word_id(train_file, test_file,  word_to_id)
    
    # transform each ngram probability into a k-dimensional vector using Gaussian binning
    word_embeds = np.random.uniform(-np.sqrt(0.06), np.sqrt(0.06), (vocab_size, parameters_ctc['word_dim']))
    for word in word_to_vec:
        word_embeds[word_to_id[word]]=word_to_vec[word]
    
    # concatenate the outputs with fastText embedding
    ctc_classifier = NeuralClassifier(len(train_features[0]), max(train_labels) + 1, vocab_size, word_embeds)
    ctc_classifier.to(device)
    
    # binary classifier
    optimizer = torch.optim.Adam(ctc_classifier.parameters(), lr=parameters_ctc["LR"])
    step_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.8)
    
    # prepare dataset
    train_x = Variable(torch.FloatTensor(train_features).to(device))
    train_x_words = Variable(torch.LongTensor(train_ids).to(device))
    train_y = Variable(torch.LongTensor(train_labels).to(device))

    test_x = Variable(torch.FloatTensor(test_features).to(device))
    test_x_words = Variable(torch.LongTensor(test_ids).to(device))
    test_y = Variable(torch.LongTensor(test_labels).to(device))

    # training
    for epoch in range(parameters_ctc['epochs']):
        loss = ctc_classifier.CrossEntropy(train_features, train_x_words, train_y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        train_scores,  train_preds = ctc_classifier(train_features, train_x_words)
        test_scores, test_preds = ctc_classifier(test_features, test_x_words)
        
        eval(test_preds, test_labels, "test")

    return ctc_classifier, vocab_size, word_to_id, id_to_word, word_to_vec, features
示例#21
0
    def __init__(self):
        self.action_space = spaces.Discrete(len(ACTION_LOOKUP))

        #xss样本特征集合
        #self.samples=[]
        #当前处理的样本
        self.current_sample = ""
        #self.current_state=0
        self.features_extra = Features()
        self.waf_checker = Waf_Check()
        #根据动作修改当前样本免杀
        self.xss_manipulatorer = Xss_Manipulator()

        self._reset()
示例#22
0
    def __init__(self):

        # initialise webcams
        self.webcam_one = Webcam(0)
        self.webcam_two = Webcam(1)

        # initialise config
        self.config_provider = ConfigProvider()

        # initialise features
        self.features = Features(self.config_provider)

        # initialise texture
        self.texture_background = None
 def classify(self):
     for i, x in enumerate(self.channel_decode):
         feature_obj = Features(self.data_raw[int(x) - 1],
                                self.sampling_freq, self.features_id)
         features = feature_obj.extract_features()
         try:
             prediction = self.clf[i].predict([features]) - 1
             if prediction != (self.prediction >> i
                               & 1):  # if prediction changes
                 self.prediction = self.output(i, prediction,
                                               self.prediction)
                 print('Prediction: %s' % format(self.prediction, 'b'))
         except ValueError:
             print('prediction failed...')
示例#24
0
    def train(self, arr, remember=True, classifier="Decision Tree"):
        f = Features()

        if os.path.exists(self.filename):
            self.clf = self._load_clf()
            train_X, train_y = f.binary_class(arr)
        else:
            self.clf = self.new_clf(classifier=classifier)
            train_X, train_y = f.binary_class(arr, len(arr))

        self.clf.fit(train_X, train_y)
        if remember:
            self._save_clf()

        return train_X, train_y
示例#25
0
    def __init__(self, pitchnum, stdout, sourcefile, resetPitchSize, resetThresholds, displayBlur, normalizeAtStartup, noDribbling):
               
        self.running = True
        self.connected = False
   
        self.stdout = stdout 

        if sourcefile is None:  
            self.cap = Camera()
        else:
            filetype = 'video'
            if sourcefile.endswith(('jpg', 'png')):
                filetype = 'image'

            self.cap = VirtualCamera(sourcefile, filetype)
        
        calibrationPath = os.path.join('calibration', 'pitch{0}'.format(pitchnum))
        self.cap.loadCalibration(os.path.join(sys.path[0], calibrationPath))

        self.preprocessor = Preprocessor(pitchnum, resetPitchSize)
        if self.preprocessor.hasPitchSize:
            self.gui = Gui(self.preprocessor.pitch_size)
        else:
            self.gui = Gui()
        self.threshold = Threshold(pitchnum, resetThresholds, displayBlur, normalizeAtStartup)
        self.thresholdGui = ThresholdGui(self.threshold, self.gui)
        self.features = Features(self.gui, self.threshold)
        self.filter = Filter(noDribbling)
        
        eventHandler = self.gui.getEventHandler()
        eventHandler.addListener('q', self.quit)

        while self.running:
            try:
                if not self.stdout:
                    self.connect()
                else:
                    self.connected = True

                if self.preprocessor.hasPitchSize:
                    self.outputPitchSize()
                    self.gui.setShowMouse(False)
                else:
                    eventHandler.setClickListener(self.setNextPitchCorner)

                while self.running:
                    self.doStuff()

            except socket.error:
                self.connected = False
                # If the rest of the system is not up yet/gets quit,
                # just wait for it to come available.
                time.sleep(1)

                # Strange things seem to happen to X sometimes if the
                # display isn't updated for a while
                self.doStuff()

        if not self.stdout:
            self.socket.close()
示例#26
0
        async def gather_features_for_prediction(image_slice: ImageSlice,
                                                 wbs: WindowBoxSlice,
                                                 search_params: SearchParams):
            features = Features([])
            gather_co_list = []
            window = wbs.bbox_slice

            async def gather_hog_features(window):
                return image_slice.window_hog_features(window)

            async def gather_spatial_features(window):
                return image_slice.window_bin_spatial_features(window)

            async def gather_color_hist_features(window):
                return image_slice.window_color_hist_features(window)

            if search_params.hog_feat is True:
                gather_co_list.append(gather_hog_features(window))

            if search_params.spatial_feat is True:
                gather_co_list.append(gather_spatial_features(window))

            if search_params.hist_feat is True:
                gather_co_list.append(gather_color_hist_features(window))

            for feature in await asyncio.gather(*gather_co_list):
                features += feature

            float_values = features.values.astype(np.float64)
            return float_values
示例#27
0
    def train(self, arr, remember=True,
              classifier="Decision Tree"):
        f = Features()

        if os.path.exists(self.filename):
            self.clf = self._load_clf()
            train_X, train_y = f.binary_class(arr)
        else:
            self.clf = self.new_clf(classifier=classifier)
            train_X, train_y = f.binary_class(arr, len(arr))

        self.clf.fit(train_X, train_y)
        if remember:
            self._save_clf()

        return train_X, train_y
    def window_hog_features(self, window):
        # add all the features from all hog channel windows
        features = Features([])
        for hif in self.hog_features_list:
            features += hif.window_hog_features(window)

        return features
示例#29
0
    def __init__(self, exchanges, logger, db_prices, db_other, db_client):
        self.exchanges = exchanges
        self.logger = logger
        self.db_prices = db_prices
        self.db_other = db_other
        self.db_client = db_client
        self.db_collections_price = {
            i.get_name(): db_prices[i.get_name()]
            for i in self.exchanges
        }

        # Save-later queue.
        self.signals_save_to_db = queue.Queue(0)

        # DataFrame container: data[exchange][symbol][timeframe].
        self.data = {}
        self.init_dataframes(empty=True)

        # Strategy models.
        self.models = self.load_models(self.logger)

        # Signal container: signals[exchange][symbol][timeframe].
        self.signals = {}

        # persistent reference to features library.
        self.feature_ref = Features()
示例#30
0
def get_actual_value(row, column, key, has_fill_rows=True):
    row, column = merge_data.parse_coordinates(row, column)
    if not has_fill_rows:
        row += merge_data.NO_FILL_ROW_OFFSET
    # Handle both a string key and a Features key.
    key = Features(key).value
    return OUTPUT_CONTENTS.get(row, {}).get(column, {}).get(key, '')
示例#31
0
    def train(self, arr, remember=True,
              regression_type="Ridge"):
        f = Features()

        if os.path.exists(self.filename):
            self.clf = self._load_clf()
            train_X, train_y = f.proportion_class(arr)
        else:
            self.clf = self.new_clf(regression_type=regression_type)
            train_X, train_y = f.proportion_class(arr, len(arr))

        self.clf.fit(train_X, train_y)
        if remember:
            self._save_clf()

        return train_X, train_y
示例#32
0
def lambda_handler(event, context):
    # TODO implement

    json_data = json.loads(event['body'])
    preprocess = Preprocess(json_data=json_data)
    preprocess.scale_points(calculate_scale=False)

    pose_objects = preprocess.new_pose_objects

    features = []

    features_obj = Features(pose_objects=pose_objects)
    features_obj.compute_features()
    features = features_obj.get_features()
    # pca_model = pickle.load(open('pca.pkl', 'rb'))
    # reduced_feature_matrix = pca_model.transform(features)

    s3 = boto3.resource('s3')

    svm_classifier = pickle.loads(
        s3.Bucket("gesture-recognition").Object("SVM_model.pkl").get()
        ['Body'].read())

    logreg_classifier = pickle.loads(
        s3.Bucket("gesture-recognition").Object("LogReg_model.pkl").get()
        ['Body'].read())

    lda_classifier = pickle.loads(
        s3.Bucket("gesture-recognition").Object("LDA_model.pkl").get()
        ['Body'].read())

    random_forest_classifier = pickle.loads(
        s3.Bucket("gesture-recognition").Object("RForest_model.pkl").get()
        ['Body'].read())

    prediction_rf = random_forest_classifier.predict(features)
    prediction_svm = svm_classifier.predict(features)
    prediction_lda = lda_classifier.predict(features)
    prediction_logreg = logreg_classifier.predict(features)

    data = {
        "1": prediction_svm[0],
        "2": prediction_logreg[0],
        "3": prediction_lda[0],
        "4": prediction_rf[0]
    }
    return {'statusCode': 200, 'body': json.dumps(data)}
示例#33
0
 def __init__(self, conf, poseidon, hermes):
     self.conf = conf
     self.poseidon = poseidon
     self.processor = Processor(conf)
     self.hermes = hermes
     self.features = Features()
     self.thunder = Thunder(self.conf, self.features, self.processor, self.poseidon, self.hermes)
     self.scepter = Scepter(conf, self.hermes, self.processor, self.features, self.thunder)
示例#34
0
def grab_data(path):
    # grab the data and extract the MFCC feature of every music audio
    print "grab_data"
    for filename in os.listdir(path):
        print filename
        mfcc = Features(path + '/' + filename, feature_params)
        datamfcc = mfcc.MFCC  # MFCC features
        dataset.append((datamfcc, filename))
示例#35
0
 def calibrate(self, reps=3, skip_time=2, hold_time=5, gap_time=0.25):
     feat_len = int(hold_time / gap_time)
     features = Features(feat_len, reps)
     reps_completed = 0
     printed = False
     extended = True
     index_middle_finger = 2
     # array of middle finger averages of lengths. size is equal to number of reps
     middle_len = []
     time_elapsed = 0
     feat_index = 0
     while self.controller.is_connected:
         if reps_completed == reps:
             print "Calibration is finished!"
             self.middle_len = np.mean(middle_len)
             self.write_calibration()
             return
         else:
             frame = self.controller.frame()
             hands = frame.hands
             if len(hands) == 0:
                 feat_index = 0
                 time_elapsed = 0
                 if not printed:
                     print 'Bring hand in view and extend all the fingers'
                     printed = True
                     extended = True
             elif feat_index < feat_len:
                 for hand in hands:
                     # only for right hand as of now
                     if hand.is_right and time_elapsed > skip_time:
                         pointables = frame.pointables
                         if len(pointables.extended()) != 5:
                             print "Please extend all the fingers for calibration"
                             extended = True
                         else:
                             if extended:
                                 print "Good! Calibration is starting. Do NOT move the hand..."
                                 #time.sleep(10 * gap_time)
                                 extended = False
                             # Relative origin(used to calculate the relative distances)
                             hand_center = hand.stabilized_palm_position
                             for pointable in pointables:
                                 finger = Leap.Finger(pointable)
                                 pointable_pos = pointable.stabilized_tip_position
                                 relative_pos = pointable_pos - hand_center
                                 features.finger_lengths[feat_index][finger.type] = relative_pos.magnitude
                             print "Finger lengths", features.finger_lengths[feat_index]
                             feat_index += 1
             elif feat_index == feat_len:
                 feat_index += 1
                 middle_len.append(np.mean(features.finger_lengths, axis=0)[index_middle_finger])
                 reps_completed += 1
                 print "Remove hand from view"
                 printed = False
                 extended = False
             time.sleep(gap_time)
             time_elapsed += gap_time
示例#36
0
    def __init__(self, stateDim, actionDim, agentParams):
        self.__stateDim = stateDim
        self.__actionDim = actionDim
        self.__action = np.random.random(actionDim)
        self.__step = 0

        self.__alpha = 0.001
        self.__gamma = 0.9
        self.__decision_every = 6
        self.__explore_probability = 0.2
        self.__max_replay_samples = 20

        self.__features = Features()
        self.__previous_action = None
        self.__current_out = None
        self.__previous_out = None
        self.__previous_meta_state = None
        self.__previous_state = None

        self.__test = agentParams[0] if agentParams else None
        self.__exploit = False

        self.__segments = 2
        self.__actions = 3**self.__segments

        try:
            self.__net = load_model('net')
        except:
            print('Creating new model')
            self.__net = Sequential([
                Dense(50, activation='elu', input_dim=self.__features.dim),
                Dense(30, activation='elu'),
                Dense(self.__actions),
                Reshape((self.__actions, 1))
            ])

        self.__net.compile(optimizer=SGD(lr=self.__alpha), loss='mean_squared_error', sample_weight_mode='temporal')

        try:
            self.__replay = Replay.load('replay')
        except Exception as a:
            self.__replay = Replay(self.__actions)

        self.__replay_X = []
        self.__replay_Y = []
示例#37
0
文件: vision.py 项目: SSabev/SDPCode
    def __init__(self, pitchnum, stdout, sourcefile, resetPitchSize, noGui, debug_window,  pipe):

        self.noGui = noGui
        self.lastFrameTime = self.begin_time = time.time()
        self.processed_frames = 0

        self.running = True
        self.stdout = stdout

        self.pipe = pipe
        

        
        if sourcefile is None:
            self.camera = Camera()
        else:
            self.filetype = 'video'
            if sourcefile.endswith(('jpg', 'png')):
                self.filetype = 'image'

        self.gui = Gui(self.noGui)
        self.threshold = Threshold(pitchnum)
        self.thresholdGui = ThresholdGui(self.threshold, self.gui)
        self.preprocessor = Preprocessor(resetPitchSize)
        self.features = Features(self.gui, self.threshold)
        # if self.debug_window:
        #     self.debug_window = DebugWindow()
        # else:
        #     self.debug_window = None

        calibrationPath = os.path.join('calibration', 'pitch{0}'.format(pitchnum))
        self.camera.loadCalibration(os.path.join(sys.path[0], calibrationPath))

        eventHandler = self.gui.getEventHandler()
        eventHandler.addListener('q', self.quit)

        # Ugly stuff for smoothing coordinates - should probably move it
        self._pastSize = 5
        self._pastCoordinates = {
                            'yellow': [(0, 0)] * self._pastSize,
                            'blue': [(0, 0)] * self._pastSize,
                            'ball': [(0, 0)] * self._pastSize
                            }
        self._pastAngles = {
                            'yellow': [1.0] * self._pastSize,
                            'blue': [1.0] * self._pastSize
                           }

        while self.running:
            if self.preprocessor.hasPitchSize:
                self.outputPitchSize()
                self.gui.setShowMouse(False)
            else:
                eventHandler.setClickListener(self.setNextPitchCorner)

            while self.running:
                self.doStuff()
示例#38
0
文件: merge_data.py 项目: bparr/dap
def parse_harvest_data(lines, cells):
    labels = [Features(v) for v in lines[0][3:]]
    for line in lines[1:]:
        row, column = line[2], line[1]
        row = parse_coordinate(row) + NO_FILL_ROW_OFFSET
        cell = cells.get(row, column)
        cell.add_data(Features.PLOT_ID, line[0])
        for i, value in enumerate(line[3:]):
            cell.add_data(labels[i], value)
示例#39
0
def process_results(result):
    
    trips = []
    feature_array = []
    
    for r in range(0,len(result)):
        for j in range(0,len(result[r])):
            features = Features()
            trip = features._process_reittiopas_results_to_features(result[r][j])
            trips.append(trip)
            feature_array.append(features)
            
            
    return trips, feature_array

    
    #Array of routes, get route
    '''
示例#40
0
文件: data.py 项目: fpeder/GPTrack
class DataHandler():

    def __init__(self, dataconf, dbconf, featdesc):
        self._config = {'data': dataconf, 'db': dbconf}
        self._c2l = Color2Label(dataconf.labels, dataconf.discard)
        self._features = Features(featdesc)

    def run(self):
        X = np.array([])
        y = np.array([])
        for im, gt in self._config['db'].glob():
            print im, gt
            im, gt = self.__load_data(im, gt)
            tx, ty = self._features.run(im, gt)
            X = np.vstack((X, tx)) if X.any() else tx
            y = np.hstack((y, ty)) if y.any() else ty
        return X, y

    def get_features(self, img):
        X, _ = self._features.run(img)
        return X

    def reshape(self, y, M, N):
        s = self._features.skip
        w = self._features.bs
        Ms, Ns = np.ceil((M-w)/s), np.ceil((N-w)/s)
        y = y.reshape(Ms, Ns)
        y = np.kron(y, np.ones((s, s), np.int32))
        return y

    def __load_data(self, img, gt):
        im = cv2.imread(img)
        gt = cv2.imread(gt)
        ds = self._config['data'].ds
        if ds:
            im = im[::ds, ::ds]
            gt = gt[::ds, ::ds]
        gt = self._c2l.run(gt)
        return im, gt

    @property
    def labels(self):
        return self._config['data'].labels
示例#41
0
文件: rank.py 项目: UFAL-DSG/tgen
 def __init__(self, cfg):
     super(FeaturesPerceptronRanker, self).__init__(cfg)
     if not cfg:
         cfg = {}
     self.feats = ['bias: bias']
     self.vectorizer = None
     self.normalizer = None
     self.binarize = cfg.get('binarize', False)
     # initialize feature functions
     if 'features' in cfg:
         self.feats.extend(cfg['features'])
     self.feats = Features(self.feats, cfg.get('intermediate_features', []))
示例#42
0
文件: irobot.py 项目: PhloxAR/irobot
    def __init__(self):
        self.config_provider = ConfigProvider()

        self.irobot = Robot()
        self.webcam = Webcam()

        self.marker = Marker()
        self.markers_cache = None

        self.features = Features(self.config_provider)

        self.texture_background = None
示例#43
0
文件: bovw.py 项目: fpeder/mscr
class BoVW(object):
    NFEAT_THR = 40

    def __init__(self, feat=SURF(), cls=RF(n_estimators=40), verbose=True):
        self._ft = Features(feat)
        self._da = Data(self._ft)
        self._fm = FitBoVW(cls)
        self._verbose = verbose
        self._cl = cls
        self._vq = None

    def predict(self, img):
        x = self._ft.run(img)[0]
        if x is not None and len(x) > self.NFEAT_THR:
            x = self._vq.run([x])
            p, prob = self._cl.predict(x), self._cl.predict_proba(x)
        else:
            p, prob = -1, 1.0
        return p, prob

    def fit(self, X, y, ndim):
        self._cl, H, w = self._fm.run(X, y, ndim)
        self._da.H = H.copy()
        self._da.w = w.copy()
        self._da.y = y.copy()

    def fit_from_db(self, dbroot, classes, ndim):
        X, y = self._da.load(dbroot, classes)
        self.fit(X, y, ndim)

    def fit_from_prep(self, infile):
        H, y, w = self._da.load_from_file(infile)
        self._vq = VQ(w, hist=w.shape[0])
        self._cl.fit(H, y)
        if self._verbose:
            print cross_validation.cross_val_score(
                self._cl, H, y, cv=3).mean()

    def save(self, outfile):
        if self._vq and self._cl:
            pickle.dump((self._vq, self._cl), open(outfile, 'wb'))

    def save_prep(self, outfile):
        self._da.dump(outfile, ['H', 'w', 'y'])

    def load(self, infile):
        assert pex(infile), 'bovw.py: %s dosen\'t exist' % infile
        self._vq, self._cl = pickle.load(open(infile, 'rb'))
示例#44
0
def test():
    f = Features(**PARAMS)
    while True:
        path = 'test/tmp.png'
        screenshot(path, region=REGION)
        im = Image(path)
        f.set_image(im)
        blobs = f.extract_blobs()
        if not blobs[0]:
            break

        blobs = [blob for blob in blobs if blob]

        f.show_blobs_on_image(blobs)
示例#45
0
文件: main.py 项目: anqfsh/SaltwashAR
    def __init__(self):
        # initialise config
        self.config_provider = ConfigProvider()

        # initialise robots
        self.rocky_robot = RockyRobot()
        self.sporty_robot = SportyRobot()

        # initialise webcam
        self.webcam = Webcam()

        # initialise markers
        self.markers = Markers()
        self.markers_cache = None

        # initialise features
        self.features = Features(self.config_provider)

        # initialise texture
        self.texture_background = None
示例#46
0
	result_iters = open("reg_result_iters.txt", "w")
	result_iters.write("alpha iters\n")

	result_accs = open("reg_result_accs.txt", "w")
	result_accs.write("alpha acc\n")

	result_alphas = open("reg_result_alphas.txt", "w")
	result_alphas.write("alpha custo\n")

	lalphas = [0.01, 0.03, 0.1, 0.3, 1.0, 3.0, 6.0, 12.0, 24.0, 48.0]

	for nb_degree in range(1, max_degree+1):
		for k, alpha in enumerate(lalphas):

			X = Utils.add_column_with_ones(Features.map(X_orig, degree=nb_degree))

			validation_split = 0.2
			use_shuffle = False
			use_validation = False

			nb_input  = X.shape[0]
			nb_features = X.shape[1]
			nb_labels = len(set(np.squeeze(np.asarray(Y))))

			nb_iters  = 50000
			nb_epochs = 1

			# alpha	  = 3.0
			lbda	  = 0.0
			momentum  = 0.9
示例#47
0
from parse import Parse
from features import Features
from classification import Classification

if __name__ == '__main__':
    if len(sys.argv) > 1:
        print 'Parsing...',
        sys.stdout.flush()
        p = Parse(sys.argv[1])
        p.compute_fqdn()
        print 'DONE'

        print 'Computing features (Can take some time because of whois queries)...',
        sys.stdout.flush()
        features = Features(p)
        features.compute()
        print 'DONE'

        print 'Classification...',
        sys.stdout.flush()
        classification = Classification(features, p)
        classification.compute()
        print 'DONE'

        print 'Launching webserver...',
        sys.stdout.flush()
        flask_app = Flask('caphaw-dns-classifier')
        print 'DONE'

        @flask_app.route('/')
示例#48
0
文件: main.py 项目: anqfsh/SaltwashAR
class SaltwashAR:
 
    # constants
    INVERSE_MATRIX = np.array([[ 1.0, 1.0, 1.0, 1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [ 1.0, 1.0, 1.0, 1.0]])

    def __init__(self):
        # initialise config
        self.config_provider = ConfigProvider()

        # initialise robots
        self.rocky_robot = RockyRobot()
        self.sporty_robot = SportyRobot()

        # initialise webcam
        self.webcam = Webcam()

        # initialise markers
        self.markers = Markers()
        self.markers_cache = None

        # initialise features
        self.features = Features(self.config_provider)

        # initialise texture
        self.texture_background = None

    def _init_gl(self):
        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluPerspective(33.7, 1.3, 0.1, 100.0)
        glMatrixMode(GL_MODELVIEW)

        # load robots frames
        self.rocky_robot.load_frames(self.config_provider.animation)
        self.sporty_robot.load_frames(self.config_provider.animation)

        # start webcam thread
        self.webcam.start()

        # assign texture
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)

    def _draw_scene(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()

        # reset robots
        self.rocky_robot.reset()
        self.sporty_robot.reset()

        # get image from webcam
        image = self.webcam.get_current_frame()

        # handle background
        self._handle_background(image.copy())

        # handle markers
        self._handle_markers(image.copy())
       
        # handle features
        self.features.handle(self.rocky_robot, self.sporty_robot, image.copy())

        glutSwapBuffers()

    def _handle_background(self, image):
        
        # let features update background image
        image = self.features.update_background_image(image)

        # convert image to OpenGL texture format
        bg_image = cv2.flip(image, 0)
        bg_image = Image.fromarray(bg_image)     
        ix = bg_image.size[0]
        iy = bg_image.size[1]
        bg_image = bg_image.tobytes('raw', 'BGRX', 0, -1)
 
        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)
        
        # draw background
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()
        glTranslatef(0.0,0.0,-10.0)
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0); glVertex3f(-4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 1.0); glVertex3f( 4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 0.0); glVertex3f( 4.0,  3.0, 0.0)
        glTexCoord2f(0.0, 0.0); glVertex3f(-4.0,  3.0, 0.0)
        glEnd( )
        glPopMatrix()

    def _handle_markers(self, image):

        # attempt to detect markers
        markers = []

        try:
            markers = self.markers.detect(image)
        except Exception as ex:
            print(ex)

        # manage markers cache
        if markers:
            self.markers_cache = markers
        elif self.markers_cache: 
            markers = self.markers_cache
            self.markers_cache = None
        else:
            return

        for marker in markers:
            
            rvecs, tvecs, marker_rotation, marker_name = marker

            # build view matrix
            rmtx = cv2.Rodrigues(rvecs)[0]

            view_matrix = np.array([[rmtx[0][0],rmtx[0][1],rmtx[0][2],tvecs[0]],
                                    [rmtx[1][0],rmtx[1][1],rmtx[1][2],tvecs[1]],
                                    [rmtx[2][0],rmtx[2][1],rmtx[2][2],tvecs[2]],
                                    [0.0       ,0.0       ,0.0       ,1.0    ]])

            view_matrix = view_matrix * self.INVERSE_MATRIX

            view_matrix = np.transpose(view_matrix)

            # load view matrix and draw cube
            glPushMatrix()
            glLoadMatrixd(view_matrix)

            if marker_name == ROCKY_ROBOT:
                self.rocky_robot.next_frame(marker_rotation, self.features.is_speaking(), self.features.get_emotion())
            elif marker_name == SPORTY_ROBOT:
                self.sporty_robot.next_frame(marker_rotation, self.features.is_speaking(), self.features.get_emotion())

            glColor3f(1.0, 1.0, 1.0)
            glPopMatrix()

    def main(self):
        # setup and run OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(640, 480)
        glutInitWindowPosition(100, 100)
        self.window_id = glutCreateWindow('SaltwashAR')
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        self._init_gl()
        glutMainLoop()
示例#49
0
				   help='SQLite database path')
parser.add_argument("-a", dest="address", default=None, help="Classify a single address.")
parser.add_argument("-c", dest="cluster", type=int, default=None, help="Classify a single cluster.")
parser.add_argument("--all-clusters", action="store_true", dest="all_clusters", default=False, help="Classify every cluster.")
options = parser.parse_args()

db = SQLiteWrapper(options.db)

try:
	db_blockchain = SQLiteWrapper("../blockchain/blockchain.sqlite")
	max_block = int(db_blockchain.query(max_block_query, fetch_one=True))
	db_blockchain.close()
except:
	max_block = 0

f = Features()

scores = f.features
labels = f.labels
labels_string = f.labels_string

with open("../grapher/tx_graph.dat", "rb") as gf:
	G = pickle.load(gf)
print("Graph loaded.")

with open("../clusterizer/clusters.dat", "rb") as cf:
	users = pickle.load(cf)
print("Clusters loaded.")

users = stripSingletons(users)
    
csv_path, sep = 'bg_tfidf_data.csv', '\t'

cnx =  get_remote_db_context()
select_cur = cnx.cursor()
data_query = '''

select _guid, body 
from labeled
;
'''
test_ids_query = 'select id from test_ids;'

select_cur.execute(test_ids_query)

print 'fetching data...',
df = pd.read_sql(data_query, cnx)
test_ids = set(map(lambda i: i[0], select_cur.fetchall()))
print 'done'
cnx.close()

df = Features(df).add_tfidf()
df.drop('body', axis=1, inplace=True)
mask = df['_guid'].apply(lambda i: i in test_ids)
df_test = df[mask]
df_train = df[~mask]

file_name, ext = os.path.splitext(csv_path)
df_test.to_csv(file_name + '_test' + ext, sep=sep, quoting=csv.QUOTE_ALL)
df_train.to_csv(file_name + '_train' + ext, sep=sep, quoting=csv.QUOTE_ALL)
示例#51
0
文件: vision.py 项目: SSabev/SDPCode
class Vision:

    def __init__(self, pitchnum, stdout, sourcefile, resetPitchSize, noGui, debug_window,  pipe):

        self.noGui = noGui
        self.lastFrameTime = self.begin_time = time.time()
        self.processed_frames = 0

        self.running = True
        self.stdout = stdout

        self.pipe = pipe
        

        
        if sourcefile is None:
            self.camera = Camera()
        else:
            self.filetype = 'video'
            if sourcefile.endswith(('jpg', 'png')):
                self.filetype = 'image'

        self.gui = Gui(self.noGui)
        self.threshold = Threshold(pitchnum)
        self.thresholdGui = ThresholdGui(self.threshold, self.gui)
        self.preprocessor = Preprocessor(resetPitchSize)
        self.features = Features(self.gui, self.threshold)
        # if self.debug_window:
        #     self.debug_window = DebugWindow()
        # else:
        #     self.debug_window = None

        calibrationPath = os.path.join('calibration', 'pitch{0}'.format(pitchnum))
        self.camera.loadCalibration(os.path.join(sys.path[0], calibrationPath))

        eventHandler = self.gui.getEventHandler()
        eventHandler.addListener('q', self.quit)

        # Ugly stuff for smoothing coordinates - should probably move it
        self._pastSize = 5
        self._pastCoordinates = {
                            'yellow': [(0, 0)] * self._pastSize,
                            'blue': [(0, 0)] * self._pastSize,
                            'ball': [(0, 0)] * self._pastSize
                            }
        self._pastAngles = {
                            'yellow': [1.0] * self._pastSize,
                            'blue': [1.0] * self._pastSize
                           }

        while self.running:
            if self.preprocessor.hasPitchSize:
                self.outputPitchSize()
                self.gui.setShowMouse(False)
            else:
                eventHandler.setClickListener(self.setNextPitchCorner)

            while self.running:
                self.doStuff()

    def quit(self):
        self.running = False
        self.pipe.send('q')

    def print_fps(self):
        thisFrameTime = time.time()
        time_diff = thisFrameTime - self.lastFrameTime
        fps = 1.0 / time_diff
        self.processed_frames = self.processed_frames + 1
        avg_fps = self.processed_frames * 1.0 / (thisFrameTime - self.begin_time)
        self.lastFrameTime = thisFrameTime

        if self.stdout:
            print("Instantaneous fps = %f Average fps = %f" % (fps, avg_fps))

    def doStuff(self):

        frame = self.camera.getImageUndistort()

        # Uncomment to see changes in barrell distortion matrix
        # calibrationPath = os.path.join('calibration', 'pitch{0}'.format(0))
        # self.camera.loadCalibration(os.path.join(sys.path[0], calibrationPath))

        frame = self.preprocessor.preprocess(frame)

        self.gui.updateLayer('raw', frame)

        ents = self.features.extractFeatures(frame)
        self.outputEnts(ents)

        self.print_fps()

        self.gui.loop()

    def setNextPitchCorner(self, where):
        self.preprocessor.setNextPitchCorner(where)

        if self.preprocessor.hasPitchSize:
            self.outputPitchSize()
            self.gui.setShowMouse(False)
            self.gui.updateLayer('corner', None)
        else:
            self.gui.drawCrosshair(where, 'corner')

    def outputPitchSize(self):
        if self.stdout:
            print ("Pitch size:\t %i\t %i\n" % tuple(self.preprocessor.pitch_size))
        # if self.debug_window:
        #     self.debug_window.insert_text("Pitch size:\t %i\t %i\n" % tuple(self.preprocessor.pitch_size))
        self.pipe.send(InitSignal(self.preprocessor.pitch_size[0], self.preprocessor.pitch_size[1]))

    def addCoordinates(self, entity, coordinates):
	self._pastCoordinates[entity].pop(0)
        self._pastCoordinates[entity].append(coordinates)
	
        #(x, y) = coordinates;
        
        # if the frame is bad(-1) then add the most recent coordinate instead
        #if (x != -1):
        #    self._pastCoordinates[entity].append(coordinates)
        #else:
        #    self._pastCoordinates[entity].append(self._pastCoordinates[entity][-1])

    def smoothCoordinates(self, entity):
        x = sum(map(lambda (x, _): x, self._pastCoordinates[entity])) / self._pastSize
        y = sum(map(lambda (_, y): y, self._pastCoordinates[entity])) / self._pastSize
        return (x, y)

    def addAngle(self, entity, angle):
        self._pastAngles[entity].pop(0)
        self._pastAngles[entity].append(angle)
        # if the frame is bad(-1) then add the most recent angle instead
        # good angle is always in (0,2pi), bad angle is -1, careful with real number
        #if (angle > -0.5):
        #    self._pastAngles[entity].append(angle)
        #else:
        #    self._pastAngles[entity].append(self._pastAngles[entity][-1])


    def smoothAngle(self, entity):
        # angle is periodic (of 2pi) and a bit tricky to smooth
        temp = sorted (self._pastAngles[entity])
        
        # if max_angle > min_angle > pi, those angles are crossing 0
        # we must add a period to the small ones
        if (temp[-1] - temp[0] > math.pi):
            temp = map(lambda angle: angle + 2*math.pi if angle < math.pi else angle, temp)
        
        return sum(temp) / self._pastSize
        
    # add/substract period (2pi) so angle is always in (0,2pi)
    # assume they are off by at most a period
    def standardize_angle(self, angle):
        if (angle > 2*math.pi):
            return angle - 2*math.pi
        if (angle < 0):
            return angle + 2*math.pi
        return angle
            
    def outputEnts(self, ents):
        # Messyyy
        if not self.preprocessor.hasPitchSize:
            return

        msg_data = []

        for name in ['yellow', 'blue', 'ball']:
            entity = ents[name]
            coordinates = entity.coordinates()
            
            # This is currently not needed
            # if the frame is not recognized, skip a maximum of self.max_skip times
            #if (coordinates[0] != -1):
            #    self.addCoordinates(name, coordinates)
            #    self.skip = 0
            #else:
            #    if (self.skip < self.max_skip):
            #        self.skip = self.skip + 1;
            #    else:
            #        self.addCoordinates(name, coordinates)
                    
            self.addCoordinates(name, coordinates)
            x, y = self.smoothCoordinates(name)

            # TODO: The system needs (0, 0) at top left!
            if y != -1:
                y = self.preprocessor.pitch_size[1] - y

            if name == 'ball':
                # self.send('{0} {1} '.format(x, y))
                msg_data += [int(x), int(y)]
                #print (self._pastCoordinates[name])
                #print(coordinates)
            else:
                # angle is currently clockwise, this makes it anti-clockwise
                angle = self.standardize_angle( 2*math.pi - entity.angle() )
                
                self.addAngle(name, angle)
                angle = self.standardize_angle ( self.smoothAngle(name) );
                
                msg_data += [int(x), int(y), angle]
                


        msg_data.append(int(time.time() * 1000))
        data = FrameData(*msg_data)

        if self.stdout:
            print ("Yellow:\t %i\t %i\t Angle:\t %s\nBlue:\t %i\t %i\t Angle:\t %s\nBall:\t %i\t %i\t\nTime:\t %i\n" % tuple(msg_data))
        # if debug_window:
        #     debug_window.insert_text("Yellow:\t %i\t %i\t Angle:\t %s\nBlue:\t %i\t %i\t Angle:\t %s\nBall:\t %i\t %i\t\nTime:\t %i\n" % tuple(msg_data))

        self.pipe.send(data)
示例#52
0
 def test_correct_mentions_count(self):
     features = Features(self.testData)
     count = features.num_word_count('Obama', parser.parse('Sep 21, 2012').date())
     self.assertEqual(2, count)
示例#53
0
parser.add_argument("-af", dest="address_filename", default=None, help="Classify every address in a text file, one per line.")
parser.add_argument("-cf", dest="cluster_filename", default=None, help="Classify every cluster in a text file, one per line.")
parser.add_argument("-c", dest="cluster", type=int, default=None, help="Classify a single cluster.")
parser.add_argument("--all-clusters", action="store_true", dest="all_clusters", default=False, help="Classify every cluster.")
options = parser.parse_args()

db = SQLiteWrapper(options.db)

try:
	db_blockchain = SQLiteWrapper("../blockchain/blockchain.sqlite")
	max_block = int(db_blockchain.query(max_block_query, fetch_one=True))
	db_blockchain.close()
except:
	max_block = 0

f = Features()

scores = f.features
labels = f.labels
labels_string = f.labels_string

with open("../grapher/tx_graph.dat", "rb") as gf:
	G = pickle.load(gf)
print("Graph loaded.")

with open("../clusterizer/clusters.dat", "rb") as cf:
	users = pickle.load(cf)
print("Clusters loaded.")

users = stripSingletons(users)
示例#54
0
文件: data.py 项目: fpeder/GPTrack
 def __init__(self, dataconf, dbconf, featdesc):
     self._config = {'data': dataconf, 'db': dbconf}
     self._c2l = Color2Label(dataconf.labels, dataconf.discard)
     self._features = Features(featdesc)
示例#55
0
im.show()
data = []

#log 
device = InputDevice('/dev/input/event5')

skip = 0

m = 10
i = 0

X = np.zeros([m,2], dtype='int16')
y = np.zeros(m, dtype='bool')

for event in device.read_loop():
    if i == m:
        break
    if event.type == ecodes.BTN_MOUSE && event.value == 1 && skip <= 0: #mousedown
        pb = pb.get_from_drawable(w,cm,58,140,0,0,*sz)
        im = Image(pb.get_pixels_array()) #creates simplecv image from pixbuf
        click = True
        f = Features(im, click)
        extracted = f.extract()
        if not extracted:
            skip = 4
        else:
            X[i] = [f.x_disp, f.y_disp]
            y[i] = f.click
            i += 1
        skip -= 1
示例#56
0
data = read_data(sys.argv[1])
for upper, lower in data:
    print upper, lower
    for i, c in enumerate(set(list(upper))):
        if c not in Sigma:
            Sigma_inv[len(Sigma)] = c
            Sigma[c] = len(Sigma)

    for i, c in enumerate(set(list(lower))):
        if c not in Sigma:
            Sigma_inv[len(Sigma)] = c
            Sigma[c] = len(Sigma)
            
        
        
features = Features(Sigma, Sigma_inv)
for upper, lower in data:
    #print upper, lower, len(features.features)
    features.extract(upper, URC=0, ULC=0,create=True)
    break

print len(features.features)

print features.num_extracted


for k, v in  features.features.items():
    print k, v
#print features._right_context(2, "hello", 4)
#print features._left_context(2, "helloword", 7)
示例#57
0
文件: main.py 项目: DocOnDev/mythtv
    def gather(self, debug=False):
        def _stage(text):
            print 'Processing %s' % (text)
        # Local imports to not pull missing dependencies in
        # on non-Gentoo machines.
        from globaluseflags import GlobalUseFlags
        from compileflags import CompileFlags
        from mirrors import Mirrors
        from overlays import Overlays
        from packagestar import PackageMask
        from systemprofile import SystemProfile
        from trivials import Trivials
        from features import Features
        from installedpackages import InstalledPackages

        _stage('global use flags')
        global_use_flags = GlobalUseFlags()

        _stage('compile flags')
        compile_flags = CompileFlags()

        _stage('mirrors')
        mirrors = Mirrors(debug=debug)

        _stage('overlays')
        overlays = Overlays()

        _stage('package.mask entries')
        user_package_mask = PackageMask()

        _stage('features')
        features = Features()

        _stage('trivials')
        trivials = Trivials()

        _stage('installed packages (takes some time)')
        if debug:
            def cb_enter(cpv, i, count):
                print '[% 3d%%] %s' % (i * 100 / count, cpv)
        else:
            def cb_enter(*_):
                pass
        installed_packages = InstalledPackages(debug=debug, cb_enter=cb_enter)

        machine_data = {}
        html_lines = []
        rst_lines = []
        metrics_dict = {}

        html_lines.append('<h1>Gentoo</h1>')
        rst_lines.append('Gentoo')
        rst_lines.append('=================================')
        rst_lines.append('')
        machine_data['protocol'] = '1.2'

        trivials.dump_html(html_lines)
        trivials.dump_rst(rst_lines)
        rst_lines.append('')
        trivials.get_metrics(metrics_dict)

        machine_data['features'] = features.serialize()
        features.dump_html(html_lines)
        features.dump_rst(rst_lines)
        rst_lines.append('')
        features.get_metrics(metrics_dict)

        machine_data['call_flags'] = compile_flags.serialize()
        compile_flags.dump_html(html_lines)
        compile_flags.dump_rst(rst_lines)
        rst_lines.append('')
        compile_flags.get_metrics(metrics_dict)

        machine_data['mirrors'] = mirrors.serialize()
        mirrors.dump_html(html_lines)
        mirrors.dump_rst(rst_lines)
        rst_lines.append('')
        mirrors.get_metrics(metrics_dict)

        machine_data['repos'] = overlays.serialize()
        overlays.dump_html(html_lines)
        overlays.dump_rst(rst_lines)
        rst_lines.append('')
        overlays.get_metrics(metrics_dict)

        machine_data['user_package_mask'] = user_package_mask.serialize()
        user_package_mask.dump_html(html_lines)
        user_package_mask.dump_rst(rst_lines)
        rst_lines.append('')
        user_package_mask.get_metrics(metrics_dict)

        machine_data['global_use_flags'] = global_use_flags.serialize()
        global_use_flags.dump_html(html_lines)
        global_use_flags.dump_rst(rst_lines)
        rst_lines.append('')
        global_use_flags.get_metrics(metrics_dict)

        machine_data['installed_packages'] = installed_packages.serialize()
        installed_packages.dump_html(html_lines)
        installed_packages.dump_rst(rst_lines)
        installed_packages.get_metrics(metrics_dict)

        for container in (trivials, ):
            for k, v in container.serialize().items():
                key = k.lower()
                if key in machine_data:
                    raise Exception('Unintended key collision')
                machine_data[key] = v

        machine_data['privacy_metrics'] = metrics_dict
        self.dump_metrics_html(html_lines, metrics_dict)
        rst_lines.append('')
        self.dump_metrics_rst(rst_lines, metrics_dict)

        excerpt_lines = []
        excerpt_lines.append('ACCEPT_KEYWORDS: ' + ' '.join(trivials.serialize()['accept_keywords']))
        excerpt_lines.append('CXXFLAGS: ' + ' '.join(compile_flags.serialize()['cxxflags']))
        excerpt_lines.append('MAKEOPTS: ' + ' '.join(compile_flags.serialize()['makeopts']))
        excerpt_lines.append('...')

        self._data = machine_data
        self._html = '\n'.join(html_lines)
        self._rst = '\n'.join(rst_lines)
        self._excerpt = '\n'.join(excerpt_lines)