예제 #1
0
파일: entity.py 프로젝트: wmey/reclass
 def __init__(self,
              classes=None,
              applications=None,
              parameters=None,
              uri=None,
              name=None,
              environment=None):
     if classes is None: classes = Classes()
     self._set_classes(classes)
     if applications is None: applications = Applications()
     self._set_applications(applications)
     if parameters is None: parameters = Parameters()
     self._set_parameters(parameters)
     self._uri = uri or ''
     self._name = name or ''
     self._environment = environment or ''
class ClassesFromVideos(object):
    def __init__(self):
        self.__classes = Classes()
        self.__config = Config()
        self.__class = self.__config.get('detection_class')
        self.__conf = self.__config.get('confidence_threshold')
        self.__model = self.__config.get('caffe_model')
        self.__proto = self.__config.get('caffe_prototype')
        self.__caffe = cv2.dnn.readNetFromCaffe(self.__proto, self.__model)
        self.__count = 0

    def __detect(self, class_name, image):
        if len(image.shape) == 3 and image.shape[2] == 3:
            h, w = image.shape[:2]
            resized = cv2.resize(image, (300, 300))
            scaled = cv2.dnn.blobFromImage(resized, 0.007843, (300, 300),
                                           127.5)
            self.__caffe.setInput(scaled)
            items = self.__caffe.forward()
            for i in np.arange(0, items.shape[2]):
                cls = int(items[0, 0, i, 1])
                if items[0, 0, i, 2] > self.__conf and self.__classes.get(
                        cls) == self.__class:
                    bounds = items[0, 0, i, 3:7] * np.array([w, h, w, h])
                    a, b, c, d = bounds.astype(np.int)
                    image = image[a:c, b:d]
                    if image.shape[0] > 127 and image.shape[1] > 127:
                        save_name = 'data/{}/{}.png'.format(
                            class_name, self.__count)
                        imsave(save_name, image)
                        self.__count += 1

    def generate(self):
        dirlist = listdir('videos')
        video_names = [f for f in dirlist if 'mp4' in f]
        for video_name in video_names:
            class_name = splitext(video_name)[0]
            class_name = ''.join(i for i in class_name if not i.isdigit())
            if not exists('data/{}'.format(class_name)):
                makedirs('data/{}'.format(class_name))
            video_file = 'videos/{}'.format(video_name)
            for frame in VideoFileClip(video_file).iter_frames():
                self.__detect(class_name, frame)
예제 #3
0
 def __init__(self,
              settings,
              classes=None,
              applications=None,
              parameters=None,
              exports=None,
              uri=None,
              name=None,
              environment=None):
     self._uri = uri or ''
     self._name = name or ''
     if classes is None: classes = Classes()
     self._set_classes(classes)
     if applications is None: applications = Applications()
     self._set_applications(applications)
     if parameters is None: parameters = Parameters(None, settings, uri)
     if exports is None: exports = Exports(None, settings, uri)
     self._set_parameters(parameters)
     self._set_exports(exports)
     self._environment = environment
예제 #4
0
    def parse(self):
        pattern = re.compile(
            r'<td class="BTsubj">(.+)</td><td class="BTclass">(.+)</td><td class="BTtime">(.+)</td><td class="BTroom">(.+)</td></tr>'
        )
        # be wary of using regular expression to parse html
        # refer to this link -> https://stackoverflow.com/a/1732454/1509809
        # but hey it works right now, i don't want to waste time experimenting tags and children with beautifulsoup
        for line in self.page:
            if len(pattern.findall(line)) != 0:
                rawdata = pattern.findall(line)[0]

                clsname = rawdata[0]
                clstype = rawdata[1]
                clstime = [rawdata[2][:5], rawdata[2][8:]
                           ]  # a list with starting and ending time
                clsroom = rawdata[3]

                tempcls = Classes(clsname, clstype, clstime, clsroom)

                self.classes.append(tempcls)
예제 #5
0
파일: project.py 프로젝트: bellyfat/uml2fmw
 def __init__(self, xmlobj):
     self.associations = Associations(xmlobj)
     self.classes = Classes(xmlobj, associations=self.associations)
     xml_attributes = xmlobj.attrib
     super(Project, self).__init__(xml_attributes)
예제 #6
0
	def __init__(self, cRace, cClass):		
		Races.__init__(self, cRace)
		Classes.__init__(self, cClass)
		self.characterLevel = 1
class ImageDetect(object):
    def __init__(self):
        self.__classes = Classes()
        self.__config = Config()
        self.__class = self.__config.get('detection_class')
        self.__null = self.__config.get('null_classname')
        self.__conf = self.__config.get('confidence_threshold')
        self.__model = self.__config.get('caffe_model')
        self.__proto = self.__config.get('caffe_prototype')
        self.__cmodel = self.__config.get('classifier_model')
        self.__dmodel = self.__config.get('decomposition_model')
        self.__pcacomp = self.__config.get('pca_components')
        self.__caffe = cv2.dnn.readNetFromCaffe(self.__proto, self.__model)
        self.__make_convolutional_model()
        if isfile(self.__cmodel) and isfile(self.__dmodel):
            print('Using existing models...')
            self.__load_classifier()
            self.__load_decomposition()
        else:
            print('Training...')
            self.__train_classifier()
            self.__save_classifier()
            self.__save_decomposition()

    def __make_convolutional_model(self):
        self.__model = mobilenet.MobileNet(weights='imagenet',
                                           include_top=False,
                                           input_shape=(128, 128, 3))

    def __save_classifier(self):
        fp = open(self.__cmodel, 'wb')
        pickle.dump(self.__classifier, fp)

    def __load_classifier(self):
        fp = open(self.__cmodel, 'rb')
        self.__classifier = pickle.load(fp)

    def __save_decomposition(self):
        fp = open(self.__dmodel, 'wb')
        pickle.dump(self.__decomposition, fp)

    def __load_decomposition(self):
        fp = open(self.__dmodel, 'rb')
        self.__decomposition = pickle.load(fp)

    def __conv_predict(self, image):
        image = imresize(image, (128, 128)).astype(np.float32)
        image = imagenet_utils.preprocess_input(image)
        image = imresize(image, (128, 128)).astype(np.float32)
        image = np.reshape(image, (1, 128, 128, 3))
        label = self.__model.predict([image])
        return label.ravel()

    def __generate_training_data(self):
        features, labels = [], []
        dirlist = listdir('data')
        dir_names = [d for d in dirlist if isdir('data/{}'.format(d))]
        for dir_name in dir_names:
            dirlist = listdir('data/{}'.format(dir_name))
            image_names = [f for f in dirlist if 'png' in f]
            for image_name in image_names:
                labels.append(dir_name)
                feature = imread('data/{}/{}'.format(dir_name, image_name))
                feature = self.__conv_predict(feature)
                features.append(feature)
        return features, labels

    def __train_classifier(self):
        features, labels = self.__generate_training_data()
        self.__decomposition = PCA(n_components=self.__pcacomp)
        features = self.__decomposition.fit_transform(features)
        self.__classifier = LinearSVC()
        self.__classifier.fit(features, labels)

    def annotations(self, image):
        results = []
        h, w = image.shape[:2]
        resized = cv2.resize(image, (300, 300))
        scaled = cv2.dnn.blobFromImage(resized, 0.007843, (300, 300), 127.5)
        self.__caffe.setInput(scaled)
        items = self.__caffe.forward()
        for i in np.arange(0, items.shape[2]):
            cls = int(items[0, 0, i, 1])
            if items[0, 0, i, 2] > self.__conf and self.__classes.get(
                    cls) == self.__class:
                bounds = items[0, 0, i, 3:7] * np.array([w, h, w, h])
                bounds = bounds.astype(np.int)
                a, b, c, d = bounds
                image = image[a:c, b:d]
                if image.shape[0] > 127 and image.shape[1] > 127:
                    feature = self.__conv_predict(image)
                    feature = self.__decomposition.transform([feature])
                    label = self.__classifier.predict(feature)[0]
                    results.append((bounds, label))
        return results

    def label_image(self, image, items):
        for item in items:
            if item[1] != self.__null:
                cv2.rectangle(image, (item[0][0], item[0][1]),
                              (item[0][2], item[0][3]), (0, 255, 0), 1)
                cv2.rectangle(image, (item[0][0], item[0][1] - 20),
                              (item[0][2], item[0][1]), (0, 255, 0),
                              cv2.FILLED)
                cv2.putText(image, item[1], (item[0][0] + 2, item[0][1] - 2),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0))
        return image