def start(self): args = self.parser.parse_args() if hasattr(args, 'config_file'): config = Config() config.read_config_files([args.config_file]) self.surround.set_config(config) self.transform(args)
def test_surround_config(self): path = os.path.dirname(__file__) config = Config() config.read_config_files([os.path.join(path, "config.yaml")]) data = AssemblerState() assembler = Assembler("Surround config", InputValidator(), HelloStage(), config) assembler.run(data) self.assertEqual(data.config_value, "Scott")
def test_env_config(self): with patch.dict('os.environ', { 'SURROUND_MAIN_COUNT': str(45), 'SURROUND_TEMP': str(0.3) }): config = Config() config.read_from_dict(yaml.safe_load(yaml3)) self.assertEqual(config["main"]["count"], 45) self.assertEqual(config["temp"], 0.3)
def test_surround_config(self): path = os.path.dirname(__file__) config = Config() config.read_config_files([os.path.join(path, "config.yaml")]) surround = Surround([HelloStage()]) surround.set_config(config) data = BasicData() surround.process(data) self.assertEqual(data.config_value, "Scott")
def main(): logging.basicConfig(level=logging.INFO) surround_config = Config() surround_config.read_config_files(["config.yaml"]) # Fetch the data from the data folder. raw_data = fetch_data(surround_config) wrapper = PipelineWrapper(surround_config) wrapper.run(raw_data)
def test_surround_override(self): path = os.path.dirname(__file__) surround = Surround([FirstStage()]) config = Config() config.read_config_files([os.path.join(path, "stages.yaml")]) surround.set_config(config) data = BasicData() surround.process(data) self.assertEqual(data.stage1, "first stage") self.assertEqual(data.stage2, "second stage")
def test_rejecting_attributes(self): data = BasicData() assembler = Assembler("Reject attribute", ValidateData(), HelloStage(), Config()) assembler.init_assembler() assembler.run(data) self.assertRaises(AttributeError, getattr, data, "no_text")
def test_happy_path(self): data = BasicData() assembler = Assembler("Happy path", ValidateData(), HelloStage(), Config()) assembler.init_assembler() assembler.run(data) self.assertEqual(data.text, test_text)
def test_auto_loading_project_config(self): config = Config(auto_load=True) self.assertEqual(config['main']['count'], 15) self.assertTrue(config['enable_logging']) self.assertIsInstance(config['objects'], list) self.assertEqual(config['objects'][0]['node'], 43) self.assertEqual(config['objects'][0]['size'], 355)
def run(self, is_training=False): data = TensorboardExampleData() config = Config( os.path.abspath( os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))) self.assembler.set_config(config) self.assembler.init_assembler(True) data_size = config['data_size'] if is_training: train_pct = config['train_pct'] train_size = data.train_size = int(data_size * train_pct) # Generate values between -1 and 1 x = np.linspace(-1, 1, data_size) np.random.shuffle(x) # Generate data following y = 0.5x + 2 + noise y = 0.5 * x + 2 + np.random.normal(0, 0.05, (data_size, )) data.x_train, data.y_train = x[:train_size], y[:train_size] data.x_test, data.y_test = x[train_size:], y[train_size:] else: # Generate true values that can be used for evaluation data.x_test = np.linspace(-10, 10, data_size) data.y_test = 0.5 * data.x_test + 2 # Run assembler self.assembler.run(data, is_training)
def test_happy_path(self): data = AssemblerState() assembler = Assembler("Happy path").set_stages( [InputValidator(), HelloStage()]).set_config(Config()) assembler.init_assembler() assembler.run(data) self.assertEqual(data.text, test_text)
def test_rejecting_attributes(self): data = AssemblerState() assembler = Assembler("Reject attribute").set_stages( [InputValidator(), HelloStage()]).set_config(Config()) assembler.init_assembler() assembler.run(data) self.assertRaises(AttributeError, getattr, data, "no_text")
def process_image_dir(input_dir, output_dir, config_path): """ Processes all image files in the given input_dir and outputs face encodings (or an error) for each one in the given output_dir, using the config file located at config_path. """ # Load config from the specified file. config = Config() config.read_config_files([config_path]) # Load and initialise the face encoding pipeline. pipeline = face_recognition_pipeline() pipeline.set_config(config) pipeline.init_stages() # Process each image in input_dir. for filename in iglob_recursive(input_dir, "*.jpg", "*.JPG", "*.jpeg", "*.JPEG", "*.png", "*.PNG"): LOGGER.info("Processing {}...".format(filename)) # Run the current filename through the pipeline. data = FaceRecognitionPipelineData(filename) pipeline.process(data) if data.error: # Check and handle any errors. LOGGER.error(str(data.error)) output_filename = "{}.error.json".format( os.path.basename(filename)) with open( os.path.abspath(os.path.join(output_dir, output_filename)), "w") as output_file: output_file.write(json.dumps(data.error)) else: # Write the output to file. output_filename = "{}.encoding.json".format( os.path.basename(filename)) with open( os.path.abspath(os.path.join(output_dir, output_filename)), "w") as output_file: output = dict(output=data.output_data, warnings=data.warnings) output_file.write(json.dumps(output)) # Log any warnings. for warning in data.warnings: LOGGER.warning(str(warning))
def test_env_config(self): with patch.dict('os.environ', { 'SURROUND_MAIN_COUNT': str(45), 'SURROUND_TEMP': str(0.3), 'SURROUND_STRING': "this is a test string", 'SURROUND_BOOL': "true", 'SURROUND_BOOLTWO': "false", "SURROUND_BOOLTHREE": "True", "SURROUND_BOOLFOUR": "False" }): config = Config() config.read_from_dict(yaml.safe_load(yaml3)) self.assertEqual(config["main"]["count"], 45) self.assertEqual(config["temp"], 0.3) self.assertEqual(config["string"], "this is a test string") self.assertTrue(config["bool"]) self.assertFalse(config["booltwo"]) self.assertTrue(config["boolthree"]) self.assertFalse(config["boolfour"])
def test_finaliser_successful_pipeline(self): data = AssemblerState() assembler = Assembler("Finalizer test", InputValidator(), HelloStage(), Config()) assembler.set_finaliser(TestFinalStage()) assembler.init_assembler() # Run assembler which will succeed assembler.run(data) # Finalizer should be executed self.assertTrue(data.final_ran)
def main(): logging.basicConfig(level=logging.INFO) surround = Surround( [WranglingData(), ModellingAndPrediction(), DisplayOutput()]) surround_config = Config() surround_config.read_config_files(["config.yaml"]) surround.set_config(surround_config) surround.init_stages() # Fetch the data from the data folder. raw_data = fetch_data(surround_config) # epl_data is created as the SurroundData type. epl_data = EplData() # raw_data is fed into epl_data. epl_data.feed_data(raw_data) # Surround process started. surround.process(epl_data)
class FaceRecognitionWebApplication(tornado.web.Application): def __init__(self, **kwargs): # Get time for uptime calculation. self.start_time = datetime.datetime.now() # Load config file. self.config = Config() self.config_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "config.yaml") self.config.read_config_files([self.config_path]) # Create PostgreSQL client. self.postgres_client = PostgresClient( self.config["postgres"]["db"], self.config["postgres"]["user"], self.config["postgres"]["host"], self.config["postgres"]["port"], self.config["postgres"]["password"]) # Create face recognition pipeline. self.pipeline = face_recognition_pipeline() self.pipeline.set_config(self.config) self.pipeline.init_stages() init_args = dict(config=self.config, pipeline=self.pipeline, postgres_client=self.postgres_client, start_time=self.start_time) kwargs["handlers"] = [ (r"/", HomeHandler, init_args), (r"/info", InfoHandler, init_args), (r"/persons/photo-search", PhotoSearchHandler, init_args), (r"/persons/encoding-search", EncodingSearchHandler, init_args), (r"/persons/(?P<person_id>.*)/faces/(?P<face_id>.*)", FaceHandler, init_args), (r"/persons/(?P<person_id>.*)/faces", FaceHandler, init_args), (r"/persons/(?P<person_id>.*)", PersonHandler, init_args), (r"/persons", PersonCollectionHandler, init_args), (r"/faces", FaceCollectionHandler, init_args), (r"/encode", AdHocEncodingHandler, init_args), ] super().__init__(**kwargs)
def test_merging_config(self): config = Config() config.read_config_files([self.f1.name, self.f2.name]) output = { 'company': 'a2i2', 'version': 'latest', 'image': 'surround', 'surround': { 'enable_stage_output_dump': False }, 'main': { 'surround': 'au.com.first_stage.FirstStage', 'count': 15 }, 'objects': [{ 'node': 43, 'size': 355 }], 'enable_logging': True } self.assertDictEqual(config.__dict__["_storage"], output)
def main(): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind(('0.0.0.0', 1024)) surround = Surround([ValidateData(), VadDetection()]) config = Config() surround.set_config(config) surround.init_stages() audio_input = [] last_packet_time = time.time() packet_id = -1 while True: source_addr = None while len(audio_input) < 48000: # Retrieve data from client (9600 samples in bytes = 9600 * 2 bytes (2 bytes per sample)) data_bytes, source_addr = sock.recvfrom(2400 * 2 + 4) # If last packet received was 5 seconds ago, clear cache if last_packet_time + 5 < time.time(): audio_input.clear() last_packet_time = time.time() # Get the packet id from the end of the byte array packet_id = int.from_bytes(data_bytes[2400 * 2:], sys.byteorder, signed=True) # Convert the byte array into an array of float samples (-1 to 1) for i in range(0, len(data_bytes) - 4, 2): sample = int.from_bytes(data_bytes[i : i + 2], sys.byteorder, signed=True) sample /= 32767.0 audio_input.append(sample) # Process the audio data for voice activity data = VadData(audio_input) surround.process(data) # Rolling window of 2400 samples (50ms) audio_input = audio_input[2400:] if data.error is None and data.output_data is not None: print("Noise: " + str(data.output_data[0] * 100.0) + " Voice: " + str(data.output_data[1] * 100.0)) # Sending the results back to who made the request results = { "id": packet_id, "noise": float(data.output_data[0]), "voice": float(data.output_data[1]) } sock.sendto(json.dumps(results).encode(), (source_addr[0], 25565)) else: print(data.error) break
def test_finaliser_fail_pipeline(self): # Ensure pipeline will crash data = AssemblerState() data.text = "" assembler = Assembler("Finalizer test", InputValidator(), HelloStage(), Config()) assembler.set_finaliser(TestFinalStage()) assembler.init_assembler() # Run assembler which will fail assembler.run(data) # Finalizer should still be executed self.assertTrue(data.final_ran)
state.company = row['Company'] state.outputs.append((state.word_count, state.company)) def fit(self, state, config): print("No training implemented") class AssemblerState(State): outputs = [] rows = [] row = None word_count = None company = None csv_file = None if __name__ == "__main__": logging.basicConfig(level=logging.INFO) dir_extension = os.path.dirname(__file__) if dir_extension not in os.getcwd(): prefix = dir_extension + "/" app_config = Config() app_config.read_config_files([prefix + "config.yaml"]) assembler = Assembler("Loader example").set_stages( [CSVValidator(), ProcessCSV()]).set_config(app_config) MainRunner(assembler).run()
import logging from surround import Surround, Config from svr.SVR_stages import FeedData, SVRData, ComputeForecast, PlotResult logging.basicConfig(level=logging.INFO) # def main(): # wrapper = PipelineWrapper() # config = wrapper.get_config() # output = wrapper.run(json.dumps({"data": "hello"})) # with open(os.path.join(config["output_path"], "output.txt"), 'w') as f: # f.write(output["output"]) # logging.info(output) if __name__ == "__main__": logging.basicConfig(level=logging.INFO) surround = Surround([FeedData(), ComputeForecast(), PlotResult()]) surround_config = Config() surround_config.read_config_files(["config.yaml"]) surround.set_config(surround_config) surround.init_stages() svr_data = SVRData() svr_data.get_data() surround.process(svr_data)
print("Not training implementation") class AssemblerState(State): text = None class InputValidator(Validator): def validate(self, state, config): if state.text: raise ValueError("'text' is not None") if __name__ == "__main__": logging.basicConfig(level=logging.INFO) path = os.path.dirname(os.path.realpath(__file__)) app_config = Config() app_config.read_config_files([path + "/config.yaml"]) assembler = Assembler("Dump output example") assembler.set_validator(InputValidator()) assembler.set_config(app_config) assembler.set_estimator(WriteWorld(path), [WriteHello(path)]) assembler.run(AssemblerState()) print("Hello output.txt contains '%s'" % open(path + hello_file_path, "r").read()) print("World output.txt contains '%s'" % open(path + world_file_path, "r").read())
from imutils.video import WebcamVideoStream import cv2 import numpy as np import os import logging from surround import Config LOGGER = logging.getLogger(__name__) # Load config file. CONFIG = Config() CONFIG_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.yaml") CONFIG.read_config_files([CONFIG_PATH]) class FaceDetectionWebcamStream(WebcamVideoStream): """ Starts a separate thread to capture frames from the webcam, performs per-frame face detection, and stores latest frame along with a bounding box. """ def __init__(self, src=0, name="FaceDetectionWebcamStream"): super().__init__(src, name) self.src = src self.grabbed = False self.boxes = [] self.net = cv2.dnn.readNetFromCaffe( os.path.dirname(os.path.realpath(__file__)) + "/../models/deploy.prototxt.txt", os.path.dirname(os.path.realpath(__file__)) +
import os import subprocess import collections from surround import Config import pandas as pd import logging import sys from py2or3_wrapper import to_py_str, test_py config = Config() config.read_config_files(['config.yaml']) input_path = config['input_path'] output_path = config['output_path'] class VerInfo: def __init__(self, repo, path, ver=""): self.repo = repo self.path = path self.ver = ver # Class variable ROW_HEADERS = ["repo", "path", "ver"] def to_rows(self): row = [self.repo, self.path, self.ver] return [row] def process(repo, path, filepath): result = to_py_str(*test_py(filepath))
def init_stage(self, config): file_ = open(config.get_path("surround.path_to_HelloSurround"), "r") self.data = file_.read() def operate(self, surround_data, config): print(self.data) class HelloWorld(Stage): def __init__(self): self.data = None def init_stage(self, config): file_ = open(config.get_path("surround.path_to_HelloWorld"), "r") self.data = file_.read() def operate(self, surround_data, config): print(self.data) class BasicData(SurroundData): text = None if __name__ == "__main__": logging.basicConfig(level=logging.INFO) surround = Surround([HelloSurround(), HelloWorld()]) surround_config = Config() surround_config.read_config_files(["examples/init-stage-with-data/config.yaml"]) surround.set_config(surround_config) surround.init_stages() surround.process(BasicData())
import os import surround import logging from .stages import face_recognition_pipeline, FaceRecognitionPipelineData from celery import Celery, Task from celery.signals import worker_process_init from surround import Config # Set up logging. logging.basicConfig(level=logging.ERROR) # Load config from file. config = Config() config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.yaml") config.read_config_files([config_path]) # Create Celery app and configure its queue broker and results backend. app = Celery("face-recognition", broker=config["celery"]["broker"], backend=config["celery"]["backend"]) # TensorFlow is not fork-safe, so we need to initialise the # Surround pipeline in @worker_process_init.connect() instead # of doing it here. pipeline = None @worker_process_init.connect() def init_worker_process(**kwargs): """
import os from surround import Config CONFIG = Config(os.path.dirname(__file__)) DOIT_CONFIG = {'verbosity': 2} IMAGE = "%s/%s:%s" % (CONFIG["company"], CONFIG["image"], CONFIG["version"]) def task_build(): """Build the Docker image for the current project""" return {'actions': ['docker build --tag=%s .' % IMAGE]} def task_dev(): """Run the main task for the project""" return { 'actions': ["docker run --volume %s/:/app %s" % (CONFIG["project_root"], IMAGE)] } def task_prod(): """Run the main task inside a Docker container for use in production """ return {'actions': ["docker run %s" % IMAGE], 'task_dep': ["build"]}