예제 #1
0
    def extractTo(self, fileName):
        output_filename = fileName + '.arff'
        imagesData = []

        for index, image in enumerate(list(self.list_directory_files())):
            Logger.log(f'Extracting characteristics from {image}')

            features = ReadImage().read(f'{self.images_directory}/{image}')
            features[6] = "Krusty" if features[6] == 0.0 else "Ned"
            imagesData.append(features)

            Logger.log(f'Data added to index {index}')
            Logger.log('Extracted Features:')
            Logger.log(f'Krusty hair = {features[0]}')
            Logger.log(f'Krusty tshirt = {features[1]}')
            Logger.log(f'Krusty face = {features[2]}')
            Logger.log(f'Ned hair = {features[3]}')
            Logger.log(f'Ned tshirt = {features[4]}')
            Logger.log(f'Ned pants = {features[5]}')
            Logger.log(f'Class = {features[6]}', True)

            self.body += ','.join(map(str, features)) + "\n"

        Logger.log(f'Writing the ARFF file {output_filename} to disk')
        with open(output_filename, 'w') as fp:
            fp.write(self.header)
            fp.write(self.body)

        Logger.log('All Done!')
        return imagesData
예제 #2
0
    def predict(model, img, X_train, X_test, y_train, y_test):
        features_from_img = ReadImage().read(img=img)

        if model == 'naive-bayes':
            model = naive_bayes.GaussianNB()
        elif model == 'decision-tree':
            model = tree.DecisionTreeClassifier()

        model.fit(X_train, y_train)

        scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
        rescaled_feature = scaler.fit_transform(
            np.array(features_from_img[0:5]).reshape(-1, 1))

        predict = model.predict(X_test)
        prediction = model.predict(rescaled_feature.reshape(1, -1))[0]

        accuracy = metrics.accuracy_score(y_test, predict) * 100

        return prediction, features_from_img, accuracy
예제 #3
0
    def classify(self, img):
        final_features, final_labels = self.load_dataset()

        X_train, X_test, y_train, y_test = model_selection.train_test_split(
            final_features, final_labels, test_size=0.35, train_size=0.65
        )

        featuresFromImg = ReadImage().read(img=img)

        model = naive_bayes.GaussianNB()
        model.fit(X_train, y_train)

        scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
        rescaled_feature = scaler.fit_transform(
            np.array(featuresFromImg[0:5]).reshape(-1, 1))

        predict = model.predict(X_test)
        prediction = model.predict(rescaled_feature.reshape(1, -1))[0]

        accuracy = metrics.accuracy_score(
            y_test, predict) * 100

        label = 'Krusty'
        if prediction:
            label = 'Ned'

        print(json.dumps({
            'features': {
                'Krusty hair': featuresFromImg[0],
                'Krusty tshirt': featuresFromImg[1],
                'Krusty face': featuresFromImg[2],
                'Ned hair': featuresFromImg[3],
                'Ned tshirt': featuresFromImg[4],
                'Ned pants': featuresFromImg[5]
            },
            'prediction': {
                'accuracy': accuracy,
                'label': label
            }
        }))
예제 #4
0
def main():
    parser = build_parser()

    # Parse input arguments
    options = parser.parse_args()

    assert len(options.style_image_influence) == len(
        options.styles
    ), 'Number of weights needs to match number of style images'
    assert sum(
        options.style_image_influence) == 1, 'Weights do not add up to 1'

    # Get images from local source
    content_image, style_images = ReadImage(options.base_width,
                                            MOD_ASPECT_RATIO).read_local(
                                                options.content,
                                                options.styles)

    # Instantiate the optimizer
    optimizer = Optimizer(
        iterations=options.iterations,
        checkpoint_iter=options.checkpoint_iter,
        style_image_influence=options.style_image_influence,
        eval_content_layers=options.eval_content_layers,
        eval_style_layers=options.eval_style_layers,
        content_weight=options.content_weight,
        style_weight=options.style_weight,
        noise_ratio=options.noise_ratio,
        learning_rate=options.learning_rate,
        content_image=content_image,
        style_images=style_images,
        model_path=MODEL_PATH,
        save_path=SAVE_PATH,
        content_layer_influence=options.content_layer_influence,
        style_layer_influence=options.style_layer_influence)

    # Execute optimizer
    optimizer.execute()
        sys.exit("failed to connec to redis server")

    # Test redis connection
    ts = redis_conn.hgetall(RedisLastAccessPrefix + device_id)
    print("last query time: ", ts)

    get_images = False
    packet_queue = queue.Queue()

    th = RTSPtoRTMP(rtsp_endpoint=rtsp, 
                    rtmp_endpoint=rtmp, 
                    packet_queue=packet_queue, 
                    device_id=device_id,
                    disk_path=disk_path, 
                    redis_conn=redis_conn, 
                    is_decode_packets_event=decode_packet, 
                    lock_condition=lock_condition)
    th.daemon = True
    th.start()

    ri = ReadImage(packet_queue=packet_queue, 
        device_id=device_id, 
        memory_buffer=memory_buffer,
        redis_conn=redis_conn, 
        is_decode_packets_event=decode_packet, 
        lock_condition=lock_condition)
    ri.daemon = True
    ri.start()
    ri.join()
    
    th.join()