コード例 #1
0
def test_entered_non_empty_batcher():
    """Test appending to a non-empty DataBatcher."""
    data_batcher = DataBatcher()
    data_batcher.entered(datetime_range(1, 3))
    data_batcher.entered(datetime_range(4, 6))

    batch = data_batcher.batch()
    assert batch.entering == datetime_range(1, 6)
コード例 #2
0
def test_left_non_empty_batcher():
    """Test appending to a non-empty DataBatcher."""
    data_batcher = DataBatcher()
    data_batcher.left(datetime_range(1, 3))
    data_batcher.left(datetime_range(4, 6))

    batch = data_batcher.batch()
    assert batch.leaving == datetime_range(1, 6)
コード例 #3
0
def test_entered_list_with_non_float():
    """Test exception raising on invalid list elements."""
    data_batcher = DataBatcher()

    invalid_list = [1, 2, 'a']
    with pytest.raises(TypeError):
        data_batcher.entered(invalid_list)

    batch = data_batcher.batch()
    assert not batch.entering
コード例 #4
0
def test_entered_non_float():
    """Test exception raising on non-list arguments."""
    data_batcher = DataBatcher()

    invalid_arg = 100
    with pytest.raises(TypeError):
        data_batcher.entered(invalid_arg)

    batch = data_batcher.batch()
    assert not batch.entering
コード例 #5
0
def test_entered_empty_batcher(sample_timestamps):
    """Test appending to an empty DataBatcher.

    Args:
        sample_timestamps: samples for the correct argument
    """
    data_batcher = DataBatcher()
    data_batcher.entered(sample_timestamps)

    batch = data_batcher.batch()
    assert batch.entering == sample_timestamps
コード例 #6
0
def test_entered_batch_entered():
    """Test appending after a batch."""
    data_batcher = DataBatcher()
    data_batcher.entered(datetime_range(1, 3))
    data_batcher.batch()
    data_batcher.entered(datetime_range(4, 6))

    batch = data_batcher.batch()
    assert batch.entering == datetime_range(4, 6)
コード例 #7
0
ファイル: main.py プロジェクト: pooh-labs/eaterslab
    def __init__(self):
        """Initialize signal handlers, API client and run."""
        self._should_close = False
        self._frame_ingestor = FrameIngestor()
        self._batcher = DataBatcher()

        # Add signal handlers
        signal.signal(signal.SIGINT, self._handle_signals)
        signal.signal(signal.SIGTERM, self._handle_signals)

        # Extra configuration
        self._uses_file = False
        self._display_frame = read_int_from_env('DISPLAY_FRAME', 0) > 0
        self._max_frame_width = read_int_from_env(
            'MAX_FRAME_WIDTH', DEFAULT_FRAME_WIDTH,
        )

        # Start client
        self._start()
コード例 #8
0
ファイル: main.py プロジェクト: pooh-labs/eaterslab
class Main(object):
    """Handle application lifecycle."""

    def __init__(self):
        """Initialize signal handlers, API client and run."""
        self._should_close = False
        self._frame_ingestor = FrameIngestor()
        self._batcher = DataBatcher()

        # Add signal handlers
        signal.signal(signal.SIGINT, self._handle_signals)
        signal.signal(signal.SIGTERM, self._handle_signals)

        # Extra configuration
        self._uses_file = False
        self._display_frame = read_int_from_env('DISPLAY_FRAME', 0) > 0
        self._max_frame_width = read_int_from_env(
            'MAX_FRAME_WIDTH', DEFAULT_FRAME_WIDTH,
        )

        # Start client
        self._start()

    def _init_counter(self):
        """Set up people counter."""
        try:
            configuration = load_counter_configuration()
        except RuntimeError as ex:
            msg = 'Could not load counter settings: {0}'.format(ex)
            logging.error(msg)
            self._should_close = True
            self._close()
            return
        logging.info('Setting up counter...')
        self._counter = PeopleCounter(configuration)

    def _init_archiver(self, start_time: datetime):
        """Set up archiver and save initial message.

        Args:
            start_time: monitoring start timestamp
        """
        archives_dir = os.getenv('ARCHIVES_DIR', None)
        if not archives_dir:
            logging.error('Missing ARCHIVES_DIR')
            self._should_close = True
            return

        start_time_string = start_time.strftime('%Y-%m-%d_%H_%M_%S')
        archive_path = '{0}/{1}.csv'.format(archives_dir, start_time_string)

        logging.info('Opening archive...')

        try:
            self._archiver = CsvArchiver(archive_path)
        except RuntimeError as ex1:
            msg = 'Could not setup archive: {0}'.format(ex1)
            logging.error(msg)
            self._archiver = None
            self._should_close = True

        try:
            self._archiver.init()
        except RuntimeError as ex2:
            msg = 'Could not init archive: {0}'.format(ex2)
            logging.error(msg)
            self._archiver = None
            self._should_close = True

        # Save initial event
        self._archiver.append_event(
            self._last_batch_time, EventType.monitoring_started,
        )

    def _init_api_connector(self, start_time: datetime):
        """Set up uplink and send initial message.

        Args:
            start_time: monitoring start timestamp
        """
        try:
            configuration = load_api_configuration()
        except RuntimeError as ex:
            msg = 'Could not load API settings: {0}'.format(ex)
            logging.error(msg)
            self._should_close = True
            self._close()
            return
        camera_pk = configuration.camera_pk
        logging.info('Setting up uplink...')
        self._api_connector = ApiConnector(
            camera_pk, configuration, start_time,
        )

    def _start(self):
        """Start the system."""
        self._start_monitoring()

        while not self._should_close:
            # Execute loop
            self._execute_loop()

        self._close()

    def _start_monitoring(self):
        """Start monitoring coroutines."""
        # Save start time
        start_time = datetime.now().astimezone()
        self._last_batch_time = start_time
        logging.info('System starts')

        # Start monitoring
        self._init_counter()
        self._init_archiver(start_time)
        self._init_api_connector(start_time)
        self._init_ingestion_stream()

    def _execute_loop(self):
        """Execute main program loop."""
        # Get camera frame
        frame = self._frame_ingestor.get_frame()
        if self._uses_file and frame is None:
            self._should_close = True
            return

        frame = resize(frame, width=self._max_frame_width)

        if self._display_frame:
            cv2.imshow('Frame', frame)
            cv2.waitKey(1)

        # Update counter
        current_time = datetime.now().astimezone()
        self._counter.update(frame, current_time)
        self._batcher.entered(self._counter.get_entering_list())
        self._batcher.left(self._counter.get_leaving_list())

        # Run batching
        delta = current_time - self._last_batch_time
        if delta.total_seconds() > BATCH_SECONDS:
            self._last_batch_time = current_time

            # Log data
            batch = self._batcher.batch()
            logging.debug('People in: {0}'.format(len(batch.entering)))
            logging.debug('People out: {0}'.format(len(batch.leaving)))

            # Add to archiver
            self._archiver.append(batch)
            self._archiver.flush()

            # Send to API endpoint
            if not self._api_connector.send(batch):
                logging.warn('Could not upload events')

    def _close(self):
        """Close the system."""
        # Save start time
        shutdown_time = datetime.now().astimezone()
        logging.info('System shutting down...')

        # Finish ingestion stream
        if self._display_frame:
            cv2.destroyAllWindows()
        if self._frame_ingestor.has_source():
            logging.info('Closing ingestor source...')
            self._frame_ingestor.release_source()

        # Finish archiver
        if self._archiver:
            logging.info('Closing archive...')
            self._archiver.append_event(
                shutdown_time, EventType.monitoring_ended,
            )
            self._archiver.finalize()

        # Finish monitoring
        if self._api_connector:
            logging.info('Closing uplink...')
            self._api_connector.close(shutdown_time)

    def _init_ingestion_stream(self):
        """Init ingestion stream."""
        logging.info('Opening camera stream...')

        stream_type = read_int_from_env('FRAME_SOURCE')
        if stream_type is None:
            self._should_close = True
            return

        if stream_type == 0:  # File
            self._uses_file = True
            path = os.getenv('SOURCE_FILE_PATH', None)
            source = FileIngestorSource(path)
        elif stream_type == 1:  # Webcam
            stream_num = read_int_from_env('WEBCAM_STREAM_NUM')
            if stream_num is None:
                self._should_close = True
                return
            source = WebcamIngestorSource(stream_num)

        try:
            self._frame_ingestor.register_source(source)
        except RuntimeError as ex:
            msg = 'Could not register_source: {0}'.format(ex)
            logging.error(msg)
            self._should_close = True

    def _handle_signals(self, _signum, _frame):
        """Handle interruption events.

        Args:
            _signum: Signal number
            _frame: Current stack frame
        """
        self._should_close = True
コード例 #9
0
from model import Model
from log_reg_model import LogisticRegressionModel
from soft_reg_model import SoftmaxRegressionModel
from data_batcher import DataBatcher
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import tensorflow as tf

epochs = 600
batch_size = 20

batcher = DataBatcher("../normalizedData/trainingSetX.npy",
                      "../normalizedData/trainingSetY.npy",
                      "../normalizedData/testSetX.npy",
                      "../normalizedData/testSetY.npy")
model = Model([40])
log_model = LogisticRegressionModel()
soft_model = SoftmaxRegressionModel()

plot_x = list()
plot_y = list()
plot_y_log = list()
plot_y_soft = list()
with tf.Session() as session:
    session.run(tf.global_variables_initializer())
    epoch_index = 0
    while epoch_index < epochs:
        samples, labels = batcher.get_batch(batch_size)
        model.train_model(session, samples, labels)
        log_model.train_model(session, samples, labels)
コード例 #10
0
                print(
                    f'========================= {opt_name} ========================='
                )
                for learning_rate in learning_rates:
                    print(
                        f'------------------------- {learning_rate} -------------------------'
                    )

                    batch_size = 100
                    iterations = 2000

                    metrics = defaultdict(list)

                    data_path = f'data/processed_data/{data_type}_movement/sliding_window_{version}_sec.p'

                    batcher = DataBatcher(data_path)
                    length, dimensions, n_labels = batcher.get_info()

                    for nc in (4, 8):
                        print(
                            f'_________________________ {nc} Cells _________________________'
                        )

                        for cs in (16, 32):
                            print(
                                f'......................... {cs} size .........................'
                            )

                            name = f'rnn/{opt_name}/{data_type}_movement/{str(learning_rate).replace(".", "-")}/{nc}_cells/{cs}_size/{version}_sec'

                            metric_path = os.path.join(*name.split('/')[:-1])
コード例 #11
0
ファイル: train.py プロジェクト: tallen11/myai
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from data_batcher import DataBatcher
from vae_model import VAEModel
from time import time
import numpy as np
import tensorflow as tf

print("Finding training data...")
batcher = DataBatcher("generated_data")

print("Building model...")
model = VAEModel(50, [40, 35, 30])
batch_size = 5000
training_steps = 200000

print("Starting training...")
with tf.Session() as session:
    session.run(tf.global_variables_initializer())
    for i in range(training_steps):
        batch, epoch_complete = batcher.get_batch(batch_size)
        model.train_model(session, inputs=batch)
        if epoch_complete:
            test_batch = batcher.get_test_batch()
            loss = model.get_loss(session, inputs=test_batch)
            print("Epoch complete - loss: {}".format(loss))
        if i % 500 == 0:
            test_batch = batcher.get_test_batch()
            loss = model.get_loss(session, inputs=test_batch)
            print("Step {} - loss: {}".format(i, loss))
コード例 #12
0
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from resnet import ResNet
from data_batcher import DataBatcher
from time import time
import tensorflow as tf

model = ResNet(1)

batcher = DataBatcher("cifar")
saver = tf.train.Saver()

epochs = 1000
batch_size = 512

with tf.Session() as session:
    print("Beginning training...")
    session.run(tf.global_variables_initializer())
    epoch_index = 0
    accuracy_data = []
    train_accuracy_data = []
    # step_index = 0
    epoch_start_time = time()
    while True:
        if batcher.epoch_finished():
            accuracy = 0
            image_batches, label_batches = batcher.get_test_batches(50)
            for i in range(len(image_batches)):
                accuracy += model.get_accuracy(session, image_batches[i],
                                               label_batches[i])
            accuracy /= len(image_batches)
コード例 #13
0
def test_empty_batch():
    """Test batching on empty DataBatcher."""
    data_batcher = DataBatcher()
    batch = data_batcher.batch()
    assert not batch.entering
    assert not batch.leaving
コード例 #14
0
from data_batcher import DataBatcher
from nlp_model import NLPModel
import tensorflow as tf

batcher = DataBatcher("data/command_data_noisy.json")
model = NLPModel((batcher.max_sentence_len, batcher.total_words), batcher.total_classes, [3,4,5])

saver = tf.train.Saver()

with tf.Session() as session:
	session.run(tf.global_variables_initializer())
	ckpt = tf.train.get_checkpoint_state("checkpoints/")
	if ckpt and ckpt.model_checkpoint_path:
		saver.restore(session, ckpt.model_checkpoint_path)
	else:
		print("Failed loading NLP model!")

	print(batcher.key_map)
	while True:
		command = input("› ")
		command_processed = batcher.preprocess_string(command)
		tensor = [batcher.sentence_to_tensor(command_processed)]
		prediction = model.predict(session, tensor)
		print("\tPredicted %s" % batcher.index_key_map[prediction])
コード例 #15
0
ファイル: nlp_trainer.py プロジェクト: tallen11/myai
from nlp_model import NLPModel
from data_batcher import DataBatcher
import tensorflow as tf

batcher = DataBatcher("data/command_data_noisy.json")
model = NLPModel((batcher.max_sentence_len, batcher.total_words),
                 batcher.total_classes, [3, 4, 5])

saver = tf.train.Saver()

with tf.Session() as session:
    session.run(tf.global_variables_initializer())
    epoch_index = 0
    for i in range(10000):
        if batcher.epoch_finished():
            sentences, labels = batcher.generate_full_batch()
            accuracy = model.get_accuracy(session, sentences, labels)
            print("Epoch %i ~ %f" % (epoch_index, accuracy))
            epoch_index += 1
            batcher.prepare_epoch()
            saver.save(session, "checkpoints/nlp_model.ckpt")
        else:
            sentences, labels = batcher.generate_batch(50)
            model.train(session, sentences, labels)
    saver.save(session, "checkpoints/nlp_model.ckpt")