Example #1
0
 def test_create_complete(self):
     feature = Feature(self.minimum_data)
     feature.add_custom_data(self.additional_data)
     eq_('alias/project', feature.project)
     eq_('status_200', feature.name)
     eq_(0.0, feature.min30)
     eq_(0.0453, feature.mean30)
     eq_(1119.0, feature.max30)
     eq_(28.5136, feature.var30)
     eq_(5.3398, feature.std30)
     eq_(0.0, feature.median30)
Example #2
0
    def execute(self, *args, **kwargs):
        """ Computes simple statistics over all examples of project and interval.
            All operations are calculated over entire matrix across columns
            (features) except percentile and histogram. After all calculations
            data is saved in feature set in one record per feature/project and one
            bin per stat/interval.
        """
        project_interval = "{0}_{1}".format(self.project.full_name,
                                            self.interval)

        labels, X = self.create_ndarray(
            self.session.query(Example).filter_by(p_interval=project_interval))

        tmp_stats = {
            'max': np.amax(X[:, 3:], 0),
            'min': np.amin(X[:, 3:], 0),
            'mean': np.mean(X[:, 3:], 0),
            'std': np.std(X[:, 3:], 0),
            'var': np.var(X[:, 3:], 0),
            'median': np.median(X[:, 3:], 0),
        }

        # reshape by feature
        for feature_index, feature_name in enumerate(labels[3:]):
            if feature_name == 'grouptime':
                continue
            custom_data = {}
            xkey = feature_index + 3
            feature = Feature({
                'project': self.project.full_name,
                'name': feature_name
            })
            hist, bin_hedges = np.histogram(X[:, xkey], bins=10)
            # cast to list are needed by persistence layer (array not supported)
            custom_data['histogram{0}'.format(
                self.interval)] = [list(hist), list(bin_hedges)]
            custom_data['percentile{0}'.format(self.interval)] = list(
                np.percentile(X[:, xkey], [25, 50, 75]))

            for stat_name, stat_value in tmp_stats.items():
                name = '{0}{1}'.format(stat_name, self.interval)
                value = float(stat_value[feature_index])  # cast needed
                custom_data[name] = value
            feature.add_custom_data(custom_data)
            self.session.add(feature)
    def execute(self, *args, **kwargs):
        """ Computes simple statistics over all examples of project and interval.
            All operations are calculated over entire matrix across columns
            (features) except percentile and histogram. After all calculations
            data is saved in feature set in one record per feature/project and one
            bin per stat/interval.
        """
        project_interval = "{0}_{1}".format(
            self.project.full_name, self.interval)

        labels, X = self.create_ndarray(
            self.session.query(Example).filter_by(p_interval=project_interval)
        )

        tmp_stats = {
            'max': np.amax(X[:, 3:], 0),
            'min': np.amin(X[:, 3:], 0),
            'mean': np.mean(X[:, 3:], 0),
            'std': np.std(X[:, 3:], 0),
            'var': np.var(X[:, 3:], 0),
            'median': np.median(X[:, 3:], 0),
        }

        # reshape by feature
        for feature_index, feature_name in enumerate(labels[3:]):
            if feature_name == 'grouptime':
                continue
            custom_data = {}
            xkey = feature_index + 3
            feature = Feature({'project': self.project.full_name, 'name': feature_name})
            hist, bin_hedges = np.histogram(X[:, xkey], bins=10)
            # cast to list are needed by persistence layer (array not supported)
            custom_data['histogram{0}'.format(self.interval)] = [list(hist), list(bin_hedges)]
            custom_data['percentile{0}'.format(self.interval)] = list(np.percentile(X[:, xkey], [25, 50, 75]))

            for stat_name, stat_value in tmp_stats.items():
                name = '{0}{1}'.format(stat_name, self.interval)
                value = float(stat_value[feature_index]) # cast needed
                custom_data[name] = value
            feature.add_custom_data(custom_data)
            self.session.add(feature)
 def _load_feature_with_background(self):
     feature_and_background = self._parsed_data[1]
     feature = feature_and_background[0]
     feature_name = ' '.join(feature[1])
     self._feature = feature_name  # Used to create scenarios
     db_feature = None
     try:
         Feature.get(Feature.name == feature_name)  # This statement will throw an exception if feature is not in the db
         db_feature = Feature(name=feature_name + " - (Duplicated)")
         db_feature.save()
     except Feature.DoesNotExist:
         print "Adding new feature to database..."
         db_feature = Feature.create(name=feature_name)
     if len(feature_and_background) > 1:  # Has background
         background = feature_and_background[1]
         scenario = self._create_scenario(parser_constants.BACKGROUND, True, [], db_feature)
         for step in background[1]:
             self._create_step(' '.join(step[1]), scenario, step[0])
     return db_feature
import os, joblib, cv2, dlib
import socket
import uuid 

from model.feature import Feature
saved_img_path = 'server/static/result/'

ip = socket.gethostbyname(socket.gethostname())
url = 'http://'+ip+':'+str(os.getenv('PORT'))+'/static/result/'

model = joblib.load('model/tools/model.sav')

detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("model/tools/shape_predictor_68_face_landmarks.dat")
ft = Feature(detector, predictor)
    
def draw_face_landmarks(img_path):
    img = cv2.imread(img_path)
    image = cv2.imread(img_path, 0) 
    detections = detector(image, 1)
    best_face = 0
          
    if len(detections) > 1:
        # On s'interisse au visage de grande dimension qui est le plus proche dans l'image       
        max_surface = 0
        xm, ym, wm, hm = 0, 0, 0, 0
        for i, face in enumerate(detections) :
            # Finding points for rectangle to draw on face
            xi, yi, wi,hi = face.left(), face.top(), face.width(), face.height()
            if wi*hi > max_surface:
                xm, ym, wm,hm = xi, yi, wi,hi
    def get_feature(self, feat_name, feature_index):
        var_datatype = self.get_data_type(feat_name)
        var_vartype = self.get_vartype_formatted(feat_name)
        var_count = self.get_count(feat_name)
        var_missing = self.get_missing_formatted(feat_name)
        var_unique = self.get_count_unique(feat_name)

        # Numeric only
        var_avg = self.format_rounded_string(self.get_average(feat_name))
        var_median = self.get_median(feat_name)
        var_mode = self.get_mode(feat_name)
        var_max = self.get_max(feat_name)
        var_min = self.get_min(feat_name)
        var_stddev = self.format_rounded_string(self.get_stddev(feat_name))
        var_variance = self.format_rounded_string(self.get_variance(feat_name))
        var_quantile25 = self.format_rounded_string(self.get_quantile25(feat_name))
        var_quantile75 = self.format_rounded_string(self.get_quantile75(feat_name))
        var_iqr = self.format_rounded_string(self.get_iqr(feat_name))
        var_skew = self.format_rounded_string(self.get_skew(feat_name))
        var_kurtosis = self.format_rounded_string(self.get_kurtosis(feat_name))

        # Non-numeric only
        var_mostcommon = self.get_mostcommon(feat_name)
        var_leastcommon = self.get_leastcommon(feat_name)

        # Graphs
        graph_histogram = self.get_histogram(feat_name)
        graph_countplot = self.get_countplot(feat_name)

        # Errors, warnings, and info
        feat_errors = self.get_errors(feat_name)
        feat_warnings = self.get_warnings(feat_name)
        feat_notes = self.get_notes(feat_name)

        # Save the feature stats
        feature = Feature(feat_name=feat_name,
                          feat_index=feature_index,
                          feat_datatype=var_datatype,
                          feat_vartype=var_vartype,
                          feat_count=var_count,
                          feat_missing=var_missing,
                          feat_unique=var_unique,
                          feat_average=var_avg,
                          feat_median=var_median,
                          feat_mode=var_mode,
                          feat_max=var_max,
                          feat_min=var_min,
                          feat_stddev=var_stddev,
                          feat_variance=var_variance,
                          feat_quantile25=var_quantile25,
                          feat_quantile75=var_quantile75,
                          feat_iqr=var_iqr,
                          feat_skew=var_skew,
                          feat_kurtosis=var_kurtosis,
                          feat_mostcommon=var_mostcommon,
                          feat_leastcommon=var_leastcommon,
                          graph_histogram=graph_histogram,
                          graph_countplot=graph_countplot,
                          feat_errors=feat_errors,
                          feat_warnings=feat_warnings,
                          feat_notes=feat_notes)
        return feature
Example #7
0
 def test_to_primitive(self):
     feature = Feature(self.minimum_data)
     feature.add_custom_data(self.additional_data)
     primitive = feature.to_primitive()
Example #8
0
 def test_validate_complete_data(self):
     feature = Feature(self.minimum_data)
     feature.add_custom_data(self.additional_data)
     feature.validate()
Example #9
0
 def test_validate_minimum_data(self):
     feature = Feature(self.minimum_data)
     feature.validate()
Example #10
0
 def test_tablename(self):
     eq_('feature', Feature.__tablename__)
     feature = Feature(self.minimum_data)
     eq_('feature', feature.__tablename__)
Example #11
0
 def test_key(self):
     feature = Feature(self.minimum_data)
     eq_('alias/project-status_200', feature.__key__)
Example #12
0
 def test_create_minimum_data(self):
     feature = Feature(self.minimum_data)
     eq_('alias/project', feature.project)
     eq_('status_200', feature.name)
 def get_base_feature(self, base_feat, feature_index):
     feat_datatype = self.get_data_type(base_feat)
     feat_vartype = self.get_variable_type(base_feat)
     base_feature = Feature(feat_name=base_feat, feat_index=feature_index, feat_datatype=feat_datatype,
                            feat_vartype=feat_vartype)
     return base_feature
 def find_features(self, expression):
     return Feature.select().where(Feature.name ** (self._ANY_STRING + expression + self._ANY_STRING))
 def find_features_per_step(self, step_id):
     return Feature.select().join(Scenario).join(Step).where(Step.id == step_id)