Esempio n. 1
0
 def setUp(self):
     """ initial setup for testing. """
     # txt file for testing the reading of a 'txt' file
     self.txt_file = '/home/blaxeep/Workspace/test/simpy/test.txt'
     # a binary file as we want it
     self.bin_file = '/home/blaxeep/Workspace/vsam/wc_day44'
     # a bad binary file (our demanded structure is different)
     self.bad_bin_file = '/home/blaxeep/Workspace/test/bad_bin'
     self.handler = StreamHandler(self.bin_file)
Esempio n. 2
0
def run(server_only=False):
    # set up logging:
    logging.basicConfig(
        filename=LOG_FILE_NAME,
        format='%(asctime)s - %(levelname)s:%(message)s',
        level=logging.DEBUG
    )

    try:
        import setproctitle
        setproctitle.setproctitle('anagramatron')
    except ImportError:
        print("missing module: setproctitle")
        pass

    if server_only:
        hit_server.start_hit_server()
    else:

        hitserver = multiprocessing.Process(target=hit_server.start_hit_server)
        hitserver.daemon = True
        hitserver.start()

        anagram_finder = AnagramFinder()
        stats.clear_stats()

        while 1:
            print('top of run loop')
            logging.debug('top of run loop')
            try:
                print('starting stream handler')
                stream_handler = StreamHandler()
                stream_handler.start()
                for processed_tweet in stream_handler:
                    anagram_finder.handle_input(processed_tweet)
                    stats.update_console()

            except NeedsMaintenance:
                logging.debug('caught NeedsMaintenance exception')
                print('performing maintenance')
                stream_handler.close()
                anagram_finder.perform_maintenance()

            except KeyboardInterrupt:
                stream_handler.close()
                anagram_finder.close()
                return 0

            except Exception as err:
                logging.error(sys.exc_info())
                stream_handler.close()
                anagram_finder.close()
                TwitterHandler().send_message(str(err) +
                                              "\n" +
                                              datetime.today().isoformat())
                raise
Esempio n. 3
0
    def execute_runbook(self):
        """Executes the job runtime and performs runtime operation (stream upload / status change)."""
        # set status to running
        tracer.log_sandbox_job_started(self.job_id, self.runbook.definition_kind_str, self.runbook_data.name,
                                       self.runbook_data.runbook_version_id)
        start_request_time = time.strptime(self.job_data.start_request_time.split("+")[0].split(".")[0],
                                           "%Y-%m-%dT%H:%M:%S")
        time_taken_to_start_td = datetime.utcnow() - datetime.fromtimestamp(time.mktime(start_request_time))
        time_taken_to_start_in_seconds = (time_taken_to_start_td.microseconds + (time_taken_to_start_td.seconds +
                                                                                 time_taken_to_start_td.days * 24 * 3600) * 10 ** 6) / 10 ** 6
        tracer.log_etw_job_status_changed_running(self.job_data.subscription_id, self.job_data.account_id,
                                                  self.job_data.account_name, self.sandbox_id, self.job_data.job_id,
                                                  self.runbook.definition_kind_str, self.runbook_data.name,
                                                  time_taken_to_start_in_seconds)
        tracer.log_etw_user_requested_start_or_resume(self.job_data.account_id, self.sandbox_id, self.job_data.job_id,
                                                      self.runbook_data.name, self.job_data.account_name,
                                                      time_taken_to_start_in_seconds, self.runbook.definition_kind_str)
        self.jrds_client.set_job_status(self.sandbox_id, self.job_id, jobstatus.RUNNING, False, self.get_job_extended_properties())

        # create runbook subprocess
        self.runtime.start_runbook_subprocess()

        # monitor runbook output for streams
        stream_handler = StreamHandler(self.job_data, self.runtime.runbook_subprocess, self.jrds_client)
        stream_handler.daemon = True
        stream_handler.start()

        # wait for runbook execution to complete
        pending_action = None
        while stream_handler.isAlive() or self.runtime.runbook_subprocess.poll() is None:
            try:
                pending_action = self.msg_queue.get(block=False)
                tracer.log_sandbox_job_pending_action_detected(self.job_id, pending_action)
                if pending_action == pendingactions.STOP_ENUM_INDEX:
                    self.jrds_client.set_job_status(self.sandbox_id, self.job_id, jobstatus.STOPPING, False)
                    self.runtime.kill_runbook_subprocess()
                    break
            except Queue.Empty:
                pass
            time.sleep(0.2)

        # handle terminal state changes
        if pending_action == pendingactions.STOP_ENUM_INDEX:
            self.jrds_client.set_job_status(self.sandbox_id, self.job_id, jobstatus.STOPPED, True, self.get_job_extended_properties())
            tracer.log_etw_job_status_changed_stopped(self.job_data.subscription_id, self.job_data.account_id,
                                                      self.job_data.account_name, self.sandbox_id, self.job_data.job_id,
                                                      self.runbook.definition_kind_str, self.runbook_data.name)
        elif self.runtime.runbook_subprocess.poll() is not None and self.runtime.runbook_subprocess.poll() == EXIT_SUCCESS:
            self.jrds_client.set_job_status(self.sandbox_id, self.job_id, jobstatus.COMPLETED, True, self.get_job_extended_properties())
            tracer.log_etw_job_status_changed_completed(self.job_data.subscription_id, self.job_data.account_id,
                                                        self.job_data.account_name, self.sandbox_id,
                                                        self.job_data.job_id, self.runbook.definition_kind_str,
                                                        self.runbook_data.name)
        else:
            full_error_output = self.get_full_stderr_content(self.runtime.runbook_subprocess.stderr)
            self.jrds_client.set_job_status(self.sandbox_id, self.job_id, jobstatus.FAILED, True, self.get_job_extended_properties(), exception=full_error_output)
            tracer.log_etw_job_status_changed_failed(self.job_data.subscription_id, self.job_data.account_id,
                                                     self.job_data.account_name, self.sandbox_id, self.job_id,
                                                     self.runbook.definition_kind_str, self.runbook_data.name,
                                                     self.runbook_data.runbook_version_id, full_error_output)
Esempio n. 4
0
    def execute_runbook(self):
        """Executes the job runtime and performs runtime operation (stream upload / status change)."""
        # set status to running
        tracer.log_debug_trace("Starting runbook.")
        self.jrds_client.set_job_status(self.sandbox_id, self.job_id, jobstatus.RUNNING, False)

        # create runbook subprocess
        self.runtime.start_runbook_subprocess()

        # monitor runbook output for streams
        stream_handler = StreamHandler(self.job_data, self.runtime.runbook_subprocess, self.jrds_client)
        stream_handler.daemon = True
        stream_handler.start()

        # wait for runbook execution to complete
        pending_action = None
        while stream_handler.isAlive() or self.runtime.runbook_subprocess.poll() is None:
            try:
                pending_action = self.msg_queue.get(block=False)
                tracer.log_debug_trace("Pending action detected. " + str(pending_action))
                if pending_action == pendingactions.STOP_ENUM_INDEX:
                    self.jrds_client.set_job_status(self.sandbox_id, self.job_id, jobstatus.STOPPING, False)
                    self.runtime.kill_runbook_subprocess()
                    break
            except Queue.Empty:
                pass
            time.sleep(0.2)

        # handle terminal state changes
        if pending_action == pendingactions.STOP_ENUM_INDEX:
            self.jrds_client.set_job_status(self.sandbox_id, self.job_id, jobstatus.STOPPED, True)
            tracer.log_debug_trace("Completed - Stopped")
        elif self.runtime.runbook_subprocess.poll() is not None and self.runtime.runbook_subprocess.poll() == EXIT_SUCCESS:
            self.jrds_client.set_job_status(self.sandbox_id, self.job_id, jobstatus.COMPLETED, True)
            tracer.log_debug_trace("Completed - Without error")
        else:
            full_error_output = self.get_full_stderr_content(self.runtime.runbook_subprocess.stderr)
            self.jrds_client.set_job_status(self.sandbox_id, self.job_id, jobstatus.FAILED, True,
                                            exception=full_error_output)
            tracer.log_debug_trace("Completed - With error")
Esempio n. 5
0
def main():
    params = assign_params()
    maxTime, maxNodes, maxCoords, dataRate, datapath = params
    sh = StreamHandler(datapath)
    initialize()
    net = Network()
    stats = NetStats()  # for network statistics
    node_stats = NodeStats()  # for node statistics
    env = Environment(sh, net, maxNodes, maxCoords, dataRate)
    activate(env, env.run())
    simulate(until=maxTime)
    r = NetResults(stats)
    #monitor_results(env, params)
    print_results(env, params, stats)
Esempio n. 6
0
    def execute_runbook(self):
        """Executes the job runtime and performs runtime operation (stream upload / status change)."""
        # set status to running
        tracer.log_debug_trace("Starting runbook.")
        self.jrds_client.set_job_status(self.sandbox_id, self.job_id,
                                        jobstatus.RUNNING, False)

        # create runbook subprocess
        self.runtime.start_runbook_subprocess()

        # monitor runbook output for streams
        stream_handler = StreamHandler(self.job_data,
                                       self.runtime.runbook_subprocess,
                                       self.jrds_client)
        stream_handler.daemon = True
        stream_handler.start()

        # wait for runbook execution to complete
        pending_action = None
        while stream_handler.isAlive(
        ) or self.runtime.runbook_subprocess.poll() is None:
            try:
                pending_action = self.msg_queue.get(block=False)
                tracer.log_debug_trace("Pending action detected. " +
                                       str(pending_action))
                if pending_action == pendingactions.STOP_ENUM_INDEX:
                    self.jrds_client.set_job_status(self.sandbox_id,
                                                    self.job_id,
                                                    jobstatus.STOPPING, False)
                    self.runtime.kill_runbook_subprocess()
                    break
            except Queue.Empty:
                pass
            time.sleep(0.2)

        # handle terminal state changes
        if pending_action == pendingactions.STOP_ENUM_INDEX:
            self.jrds_client.set_job_status(self.sandbox_id, self.job_id,
                                            jobstatus.STOPPED, True)
            tracer.log_debug_trace("Completed - Stopped")
        elif self.runtime.runbook_subprocess.poll(
        ) is not None and self.runtime.runbook_subprocess.poll(
        ) == EXIT_SUCCESS:
            self.jrds_client.set_job_status(self.sandbox_id, self.job_id,
                                            jobstatus.COMPLETED, True)
            tracer.log_debug_trace("Completed - Without error")
        else:
            full_error_output = self.get_full_stderr_content(
                self.runtime.runbook_subprocess.stderr)
            self.jrds_client.set_job_status(self.sandbox_id,
                                            self.job_id,
                                            jobstatus.FAILED,
                                            True,
                                            exception=full_error_output)
            tracer.log_debug_trace("Completed - With error")
Esempio n. 7
0
scale_factor = 0.01
bound = 4


# First we define the callback function for the stream
def update(data):
    global pos_x
    global pos_y
    data = data.channels_data
    move = model(torch.from_numpy(data).float())
    v = scale_factor * np.asarray(
        [float(move[2] - move[0]),
         float(move[1] - move[3])])
    pos_x += v[0]
    pos_y += v[1]
    # truncate
    pos_x = max(min(bound, pos_x), -1 * bound)
    pos_y = max(min(bound, pos_y), -1 * bound)
    # plot boundaries
    ax.cla()
    ax.plot([-1 * bound, bound, bound, -1 * bound, -1 * bound],
            [-1 * bound, -1 * bound, bound, bound, -1 * bound])
    # Draw circle at interpreted coordinates
    ax.add_artist(plt.Circle((pos_x, pos_y), 0.1, color='blue'))
    fig.canvas.draw()


# Start the stream
stream = StreamHandler(-1, update)
stream.start_stream()
    # quit
    if (user_input == "q"): break

    # ping bci
    if (user_input == "p"):
        test = OpenBCICyton(daisy="True")

    # record
    elif (user_input == "r"):
        row_to_modify = int(
            input(
                "Row to record to? [0 - Left] [1 - Up] [2 - Right] [3 - Down]")
        )
        frame_input = int(input("Recording how many frames?"))
        mystream = StreamHandler(frame_input, collect_data)
        mystream.start_stream()
        print("Finished recording to", row_to_modify)

    # save or save as
    elif (user_input == "s" or user_input == "sa"):
        if (LOADED and user_input == "s"):
            np.save(LOADED_PATH, np.asarray(data))
        else:
            file_name = str(input("File path?"))
            np.save(file_name, np.asarray(data))

    # load
    elif (user_input == "l"):
        file_name = str(input("File path?"))
        arrdata = np.load(file_name, allow_pickle=True)
Esempio n. 9
0
class TestStreamHandler(unittest.TestCase):
    """ tests for streamhandler.py """

    def setUp(self):
        """ initial setup for testing. """
        # txt file for testing the reading of a 'txt' file
        self.txt_file = '/home/blaxeep/Workspace/test/simpy/test.txt'
        # a binary file as we want it
        self.bin_file = '/home/blaxeep/Workspace/vsam/wc_day44'
        # a bad binary file (our demanded structure is different)
        self.bad_bin_file = '/home/blaxeep/Workspace/test/bad_bin'
        self.handler = StreamHandler(self.bin_file)

    def testIsBinary(self):
        """ tests the behavior of isbinary method when input is a
        binary file.
        """
        self.result = self.handler.isbinary(self.bin_file)
        self.assertTrue(self.result)

    def testIsBinaryNot(self):
        """ tests the behavior of isbinary method when input is not a 
        binary file.
        """
        self.result = self.handler.isbinary(self.txt_file)
        self.assertFalse(self.result)

    def testGoodStream(self):
        """ tests if a generator that generates correct items is
        created.
        """
        # create the generator
        self.gen = self.handler.readStream(self.bin_file)
        # produce ten items and check if their length of each one is 8
        for i in range(10):
            self.item = self.gen.next()
            self.assertEqual(len(self.item), 8)

    def testOtherFormatStream(self):
        """ tests that a generator is created."""
        # create a generator of items that contains other format
        self.gen = self.handler.readStream(self.bin_file, 'LLHHII')
        # produce ten items and check if item's length is other than 8
        # and check if there actually is (exists) an item
        for i in range(10):
            self.item = self.gen.next()
            self.assertNotEqual(len(self.item), 8)
            self.assertTrue(self.item)

    def testSendItem(self):
        """ tests the sendItem method. It should return several items."""
        self.item_generator = self.handler.readStream(self.bin_file)
        for i in range(10):
            self.item = self.handler.sendItem()
            self.assertTrue(self.item)
        

    def testInfiniteSendItem(self):
        """ tests the behavior of sendItem method in the particular
        case of the user demanding more data than the generator is able
        to produce.
        """
        self.item_generator = self.handler.readStream(self.bin_file)
        i = 0
        while True:
            self.item = self.handler.sendItem()
            if self.item == 'stream ended':
                self.assertTrue(self.item)
                break
Esempio n. 10
0
 def __init__(self):
     self.consumer = oauth.Consumer(key=API_KEY, secret=API_SECRET)
     self.access_token = oauth.Token(key=ACCESS_TOKEN,
                                     secret=ACCESS_TOKEN_SECRET)
     self.client = oauth.Client(self.consumer, self.access_token)
     self.stream_handler = StreamHandler()
Esempio n. 11
0
import cPickle as pickle
import time
import sys
from streamhandler import StreamHandler
import anagramfunctions

"""a helper file for fetching & saving test data from the twitter stream"""

if __name__ == "__main__":
    stream = StreamHandler()
    stream.start()
    count = 0
    save_interval = 50000
    tlist = []

    try:
        for t in stream:
            if not t: 
                continue

            tlist.append(t)
            count += 1
            sys.stdout.write(str(count) + '\r')
            sys.stdout.flush()
            if count > save_interval:
                filename = "testdata/filt_%s.p" % time.strftime("%b%d%H%M")
                pickle.dump(tlist, open(filename, 'wb'))
                count = 0
                tlist = []
    finally:
        if count > 1000: