def main(): parser = OptionParser() parser.add_option("-C", "--config", dest="config_file", help="read config from FILE", metavar="FILE") parser.add_option("-a", "--controller", dest="controller", help="controller address", default="localhost:3456") (options, args) = parser.parse_args() if len(args) < 1: print "usage: stop_computation [computation ID]" sys.exit(0) serv_addr, serv_port = normalize_controller_addr(options.controller) #Unlike most Jetstream programs, need to know how many nodes we have to set up the distribution properly print "connecting..." server = RemoteController() server.connect(serv_addr, serv_port) print "connected to remote, sending stop" resp = server.stop_computation(args[0]) print "server response:", resp
def main(): parser = OptionParser() parser.add_option("-C", "--config", dest="config_file", help="read config from FILE", metavar="FILE") parser.add_option("-a", "--controller", dest="controller", help="controller address", default="localhost:3456") (options, args) = parser.parse_args() pattern = ".*" + args[0] + ".*" file_to_grep = args[1] if ':' in options.controller: (serv_addr, serv_port) = options.controller.split(':') serv_port = int(serv_port) else: serv_addr = options.controller serv_port = 3456 ### Define the graph abstractly, without a computation g = jsapi.QueryGraph() reader = jsapi.FileRead(g, file_to_grep) grepper = jsapi.StringGrep(g, pattern) host_extend = jsapi.ExtendOperator(g, "s", ["${HOSTNAME}"]) cube = g.add_cube("local_results") cube.add_dim("log_line", Element.STRING, 0) cube.add_dim("hostname", Element.STRING, 1) cube.add_agg("count", jsapi.Cube.AggType.COUNT, 2) cube.set_overwrite(True) #fresh results g.connect(reader,grepper) g.connect(grepper, host_extend) g.connect(host_extend, cube) #### Finished building in memory, now to join server = RemoteController() server.connect(serv_addr, serv_port) n = server.get_a_node() assert isinstance(n, NodeID) nodes = server.all_nodes() cube.instantiate_on(n) host_extend.instantiate_on(nodes) result_reader = ClientDataReader() net_addr = result_reader.prep_to_receive_data() g.connect(cube, net_addr) server.deploy(g) result_reader.blocking_read(lambda x: print x )
def main(): parser = OptionParser() parser.add_option("-C", "--config", dest="config_file", help="read config from FILE", metavar="FILE") parser.add_option("-a", "--controller", dest="controller", help="controller address", default="localhost:3456") (options, args) = parser.parse_args() serv_addr, serv_port = normalize_controller_addr(options.controller) file_to_parse = args[0] k2 = 20 #how many to pull to top level k = 10 #how many to display ### Define the graph abstractly, without a computation g = jsapi.QueryGraph() reader = jsapi.FileRead(g, file_to_parse) parse = jsapi.GenericParse(g, ".*GET ([^ ]*) .*", "s") local_cube = g.add_cube("local_results") local_cube.add_dim("url", Element.STRING, 0) # cube.add_dim("hostname", Element.STRING, 1) local_cube.add_agg("count", jsapi.Cube.AggType.COUNT, 1) local_cube.set_overwrite(True) #fresh results pull_k2 = jsapi.TimeSubscriber(g, {}, 2000, "-count", k2) echo = jsapi.Echo(g) # local_cube = jsapi.Echo(g) g.connect(reader, parse) g.connect(parse, local_cube) g.connect(local_cube, pull_k2) g.connect(pull_k2, echo) # Should do a pull into a consolidated cube #### Finished building in memory, now to join server = RemoteController((serv_addr, serv_port)) n = server.get_a_node() assert isinstance(n, NodeID) all_nodes = server.all_nodes() local_cube.instantiate_on(all_nodes) server.deploy(g)
from machine import AEVC from console import console import teensy_talker as teensy from remote_controller import RemoteController # async collector of info and placing it in queue direct_queue = Queue() user_queue = Queue() teensy_queue = Queue() m = AEVC() joystickArray = Array('d', [0.0] * 4) xbox = RemoteController(joystickArray) def dequeue(ja): m.set_joystick_array(ja) wait_time = time.time() + 2 manual_refresh_rate = 15 manual_refrash_delay = 1.0 / manual_refresh_rate while True: if not direct_queue.empty(): a = direct_queue.get() m.direct_event(a)
def remote_deploy(serv_addr, serv_port, graph, cube=None): server = RemoteController((serv_addr, serv_port)) assert isinstance(server.get_a_node(), NodeID) server.deploy(graph, cube)
boxes = boxes[selected] centers = [((lx + 0.5 * (rx - lx)), (ly + 0.5 * (ry - ly))) for (ly, lx, ry, rx) in boxes] width, height = pil_img.size centers = [(((x / width) * 2 - 1), -((y / height) * 2 - 1)) for (x, y) in centers] return centers if __name__ == '__main__': cap = RemoteCapture('ws://rosa.local:5678') controller = RemoteController('ws://rosa.local:1234') controller.setup(AIN1=18, AIN2=17, PWMA=4, BIN1=24, BIN2=27, PWMB=22, STBY=23) model_path = get_file(fname=WEIGHTS['name'], origin=WEIGHTS['origin'], cache_subdir='rosa', file_hash=WEIGHTS['hash']) yolo = YOLO( model_path=model_path,
def main(): parser = OptionParser() parser.add_option("-C", "--config", dest="config_file", help="read config from FILE", metavar="FILE") parser.add_option("-a", "--controller", dest="controller", help="controller address", default="localhost:3456") parser.add_option("-2", "--two-nodes", dest="USE_TWO_NODES", action="store_true", help="whether to use two nodes", default=False) parser.add_option("-s", "--serialize", dest="serialize", action="store_true", help="whether to add a dummy serialization", default=False) (options, args) = parser.parse_args() if ':' in options.controller: (serv_addr, serv_port) = options.controller.split(':') serv_port = int(serv_port) else: serv_addr = options.controller serv_port = 3456 ### Define the graph abstractly, without a computation g = jsapi.QueryGraph() source = jsapi.SendK(g, "1" + 10 * "0") #10 billion; fits into an int64 very easily if options.serialize: s2 = jsapi.DummySerialize(g) g.connect(source, s2) source = s2 sink = jsapi.RateRecord(g) g.connect(source, sink) #### Finished building in memory, now to join server = RemoteController() server.connect(serv_addr, serv_port) n = server.get_a_node() assert isinstance(n, NodeID) nodes = server.all_nodes() if options.USE_TWO_NODES: if len(nodes) < 2: print "not enough nodes for two-node test" sys.exit(0) source.instantiate_on(nodes[0]) sink.instantiate_on(nodes[1]) server.deploy(g)