def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and TBinaryProtocol is not None: TBinaryProtocol.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.I32: self.success = iprot.readI32(); else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd()
def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and TBinaryProtocol is not None: oprot.trans.write(TBinaryProtocol.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('add_result') if self.success is not None: oprot.writeFieldBegin('success', TType.I32, 0) oprot.writeI32(self.success) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd()
def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and TBinaryProtocol is not None: oprot.trans.write(TBinaryProtocol.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('add_args') if self.a is not None: oprot.writeFieldBegin('a', TType.I32, 1) oprot.writeI32(self.a) oprot.writeFieldEnd() if self.b is not None: oprot.writeFieldBegin('b', TType.I32, 2) oprot.writeI32(self.b) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd()
def write_it(stream): transportOut = TMemoryBuffer() protocolOut = TBinaryProtocol.TBinaryProtocol(transportOut) topology.write(protocolOut) bytes = transportOut.getvalue() stream.write(bytes)
def get_binary_protocol(transport): return TBinaryProtocol.TBinaryProtocol(transport)
from thrift.protocol import TBinaryProtocol try: # Read Airavata Client properties airavataConfig = ConfigParser.RawConfigParser() airavataConfig.read('../conf/airavata-client.properties') # Create a socket to the Airavata Server transport = TSocket.TSocket(airavataConfig.get('AiravataServer', 'host'), airavataConfig.get('AiravataServer', 'port')) # Use Buffered Protocol to speedup over raw sockets transport = TTransport.TBufferedTransport(transport) # Airavata currently uses Binary Protocol protocol = TBinaryProtocol.TBinaryProtocol(transport) # Create a Airavata client to use the protocol encoder airavataClient = Airavata.Client(protocol) # Connect to Airavata Server transport.open() computeResourceNames = airavataClient.getAllComputeResourceNames() print computeResourceNames # Close Connection to Airavata Server transport.close() except Thrift.TException, tx:
def _create_connection(self): self.transport = TTransport.TBufferedTransport(TFramedTransport(TSocket.TSocket(self.host, self.port))) self.protocol = TBinaryProtocol.TBinaryProtocolAccelerated(self.transport) self.client = Hbase.Client(self.protocol) self.transport.open()
def factory(transport): protocol = TBinaryProtocol.TBinaryProtocol(transport) multiplex_prot = TMultiplexedProtocol(protocol, cls.service_name) return multiplex_prot
def __init__(self): self.transport = TSocket.TSocket('127.0.0.1', 7748) self.transport = TTransport.TBufferedTransport(self.transport) self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport) self.client = Ping.Client(self.protocol) self.transport.open()
sys.path.append('/usr/lib/python2.6/site-packages/') from conf_crawler import DownloaderService from conf_crawler import DCService from conf_crawler.ttypes import * from thrift import Thrift from thrift.transport import TSocket from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol from thrift.transport.TTransport import TFramedTransport transport = TSocket.TSocket('localhost', 44002) framed_transport = TFramedTransport(transport) framed_transport.open() protocol = TBinaryProtocol.TBinaryProtocol(framed_transport) client = DCService.Client(protocol) def PushDownloadTask(): try: url = 'http://www.coo8.com/interfaces/showReviewsByGoodsId.action<%param%>flag=all&goodsId=P145484&pageIndex=1' referer = "http://www.coo8.com/product/145484.html" download_task = DownloadTask() download_task.req_item = DownloadReqItem() download_task.req_item.url = url download_task.req_item.referer = referer download_task.req_item.time_out = 1000 download_task.prop_item = DownloadPropItem() download_task.prop_item.is_friendly = True
def get_client(host, port): trans = TSocket.TSocket(host, port) trans = TTransport.TBufferedTransport(trans) proto = TBinaryProtocol.TBinaryProtocolAccelerated(trans) client = KnnThriftService.Client(proto) return client, trans
def correlate_dfe(data, size_timeseries, num_timeseries, correlations): """Calculates correlations on DFE.""" start_time = time.time() # Make socket socket = TSocket.TSocket('localhost', 9090) # Buffering is critical. Raw sockets are very slow transport = TTransport.TBufferedTransport(socket) # Wrap in a protocol protocol = TBinaryProtocol.TBinaryProtocol(transport) # Create a client to use the protocol encoder client = correlationService.Client(protocol) current_time = time.time() - start_time print 'Creating a client:\t\t\t\t%.5lfs' % current_time try: # Connect! start_time = time.time() transport.open() current_time = time.time() - start_time print 'Opening connection:\t\t\t\t%.5lfs' % current_time num_timesteps = size_timeseries window_size = size_timeseries num_bursts = calc_num_bursts(num_timeseries) # Get loop length start_time = time.time() loop_length = client.correlation_get_CorrelationKernel_loopLength() current_time = time.time() - start_time print 'Getting Correlation Kernel loopLength:\t\t%.5lfs' % current_time # Prepare data for DFE start_time = time.time() burst_size = 384 / 2 # for anything other than ISCA this should be 384 in_mem_load = [0] * (num_bursts * burst_size) precalculations, data_pairs = prepare_data_for_dfe( data, size_timeseries, num_timeseries) current_time = time.time() - start_time print 'Data reordering time:\t\t\t\t%.5lfs' % current_time # Allocate and send input streams to server start_time = time.time() loop_length_size = 1 address_loop_length = client.malloc_int32_t(loop_length_size) client.send_data_int32_t(address_loop_length, [loop_length]) loop_length_time = time.time() - start_time print('\tSending LoopLength:\t\t(size = %d bit)\t\t%.5lfs' % (32 * loop_length_size, loop_length_time)) start_time = time.time() in_mem_load_size = num_bursts * burst_size address_in_mem_load = client.malloc_int32_t(in_mem_load_size) client.send_data_int32_t(address_in_mem_load, in_mem_load) in_mem_load_time = time.time() - start_time print('\tSending InMemLoad:\t\t(size = %d bit)\t%.5lfs' % (32 * in_mem_load_size, in_mem_load_time)) start_time = time.time() precalculations_size = 2 * num_timeseries * num_timesteps address_precalculations = client.malloc_double(precalculations_size) client.send_data_double(address_precalculations, precalculations) precalculations_time = time.time() - start_time print('\tSending Precalculations:\t(size = %d bit)\t%.5lfs' % (64 * precalculations_size, precalculations_time)) start_time = time.time() data_pairs_size = 2 * num_timeseries * num_timesteps address_data_pairs = client.malloc_double(data_pairs_size) client.send_data_double(address_data_pairs, data_pairs) data_pairs_time = time.time() - start_time print('\tSending DataPairs:\t\t(size = %d bit)\t%.5lfs' % (64 * data_pairs_size, data_pairs_time)) current_time = (loop_length_time + in_mem_load_time + precalculations_time + data_pairs_time) speed = ((32 * loop_length_size + 32 * in_mem_load_size + 64 * precalculations_size + 64 * data_pairs_size) / (current_time * 1000000)) print( 'Sending input streams to server total time:\t%.5lfs' % current_time + '\t(average speed = %.5lfMb/s)' % (speed)) # Allocate memory for output stream on server start_time = time.time() out_correlation_size = (num_timesteps * CORRELATION_NUM_TOP_SCORES * CORRELATION_NUM_PIPES * loop_length + num_bursts * 24 ) # for anything other than ISCA # 48 should be instead of 24 address_out_correlation = client.malloc_double(out_correlation_size) out_indices_size = (2 * num_timesteps * loop_length * CORRELATION_NUM_TOP_SCORES * CORRELATION_NUM_PIPES) address_out_indices = client.malloc_int32_t(out_indices_size) current_time = time.time() - start_time print('Allocating memory for output stream on server:\t%.5lfs' % current_time) # Initialize LMem start_time = time.time() client.correlation_loadLMem(num_bursts, address_loop_length, address_in_mem_load) current_time = time.time() - start_time print 'LMem initialization:\t\t\t\t%.5lfs' % current_time # Executing correlation action start_time = time.time() client.correlation( num_bursts, # scalar input num_timesteps, # scalar input num_timeseries, # scalar input 1, # scalar input float(window_size), # scalar input address_precalculations, # streaming reordered input address_data_pairs, # streaming reordered input address_out_correlation, # streaming unordered output address_out_indices) # streaming unordered output current_time = time.time() - start_time print 'Correlation time:\t\t\t\t%.5lfs' % current_time # Get output stream from server start_time = time.time() out_correlation = client.receive_data_double(address_out_correlation, out_correlation_size) out_correlation_time = time.time() - start_time print('\tGet output stream Correlation:\t(size = %d bit)\t%.5lfs' % (64 * out_correlation_size, out_correlation_time)) start_time = time.time() _ = client.receive_data_int32_t(address_out_indices, out_indices_size) out_indices_time = time.time() - start_time print('\tGet output stream outIndices:\t(size = %d bit)\t%.5lfs' % (32 * out_indices_size, out_indices_time)) start_time = time.time() loop_length_size = 1 loop_length = client.receive_data_int32_t(address_loop_length, loop_length_size) loop_length_time = time.time() - start_time print('\tGet output stream loopLength:\t(size = %d bit)\t\t%.5lfs' % (32 * loop_length_size, loop_length_time)) current_time = (out_indices_time + out_correlation_time + loop_length_time) speed = ((32 * loop_length_size + 32 * out_indices_time + 64 * out_correlation_size) / (current_time * 1000000)) print( 'Getting output stream from server total time:\t%.5lfs' % current_time + '\t(average speed = %.5lfMb/s)' % (speed)) # Free allocated memory for streams on server start_time = time.time() client.free(address_loop_length) client.free(address_in_mem_load) client.free(address_precalculations) client.free(address_data_pairs) client.free(address_out_correlation) client.free(address_out_indices) current_time = time.time() - start_time print('Freeing allocated memory for streams on server:\t%.5lfs' % current_time) # Close! start_time = time.time() transport.close() current_time = time.time() - start_time print 'Closing connection:\t\t\t\t%.5lfs' % current_time # Store data start_time = time.time() position = 0 index = 0 start = ((num_timesteps - 1) * loop_length[0] * CORRELATION_NUM_TOP_SCORES * CORRELATION_NUM_PIPES) for i in range(num_timeseries): for j in range(i): correlations[index + j] = out_correlation[start + position + j] index += i position += ((i / 12) + 1) * 12 current_time = time.time() - start_time print 'Storing time:\t\t\t\t\t%.5lfs' % current_time except Thrift.TException, thrift_exception: print '%s' % (thrift_exception.message) sys.exit(-1)
def simple_dfe(size, data_in): """Simple DFE implementation.""" try: start_time = time.time() # Make socket socket = TSocket.TSocket('localhost', 9090) # Buffering is critical. Raw sockets are very slow transport = TTransport.TBufferedTransport(socket) # Wrap in a protocol protocol = TBinaryProtocol.TBinaryProtocol(transport) # Create a client to use the protocol encoder client = SimpleService.Client(protocol) print('Creating a client:\t\t\t\t%.5lfs' % (time.time() - start_time)) # Connect! start_time = time.time() transport.open() print('Opening connection:\t\t\t\t%.5lfs' % (time.time() - start_time)) size_bytes = size * 4 # Initialize maxfile start_time = time.time() max_file = client.Simple_init() print('Initializing maxfile:\t\t\t\t%.5lfs' % (time.time() - start_time)) # Load DFE start_time = time.time() max_engine = client.max_load(max_file, '*') print('Loading DFE:\t\t\t\t\t%.5lfs' % (time.time() - start_time)) # Allocate and send input streams to server start_time = time.time() address_data_in = client.malloc_float(size) client.send_data_float(address_data_in, data_in) print('Sending input data:\t\t\t\t%.5lfs' % (time.time() - start_time)) # Allocate memory for output stream on server start_time = time.time() address_data_out = client.malloc_float(size) print('Allocating memory for output stream on server:\t%.5lfs' % (time.time() - start_time)) # Action default start_time = time.time() actions = client.max_actions_init(max_file, "default") client.max_set_param_uint64t(actions, "N", size) client.max_queue_input(actions, "x", address_data_in, size_bytes) client.max_queue_output(actions, "y", address_data_out, size_bytes) client.max_run(max_engine, actions) print('Simple time:\t\t\t\t\t%.5lfs' % (time.time() - start_time)) # Unload DFE start_time = time.time() client.max_unload(max_engine) print('Unloading DFE:\t\t\t\t\t%.5lfs' % (time.time() - start_time)) # Get output stream from server start_time = time.time() data_out = client.receive_data_float(address_data_out, size) print('Getting output stream:\t(size = %d bit)\t%.5lfs' % ((size * 32), (time.time() - start_time))) # Free allocated memory for streams on server start_time = time.time() client.free(address_data_in) client.free(address_data_out) client.free(actions) print('Freeing allocated memory for streams on server:\t%.5lfs' % (time.time() - start_time)) # Free allocated maxfile data start_time = time.time() client.Simple_free() print('Freeing allocated maxfile data:\t\t\t%.5lfs' % (time.time() - start_time)) # Close! start_time = time.time() transport.close() print('Closing connection:\t\t\t\t%.5lfs' % (time.time() - start_time)) except Thrift.TException, thrift_exceptiion: print '%s' % (thrift_exceptiion.message) sys.exit(-1)
class TestChord: transport = TSocket.TSocket(sys.argv[1], int(sys.argv[2])) transport = TTransport.TBufferedTransport(transport) protocol = TBinaryProtocol.TBinaryProtocol(transport) filestore = FileStore.Client(protocol) transport.open() def __init__(self): pass def test_write_file(self, rfile): node = self.filestore.findSucc(rfile.meta.contentHash) print "node id: ", node assert node.id == "162f2ef78020a93545457290a21d4ea634d4bca22aff8530e2011209be88ff82", "Test_Write_file - Node returned is wrong" print "Test case passed 1 -- get successor node for key: ", rfile.meta.contentHash file_store = make_socket(node.ip, node.port) file_store.writeFile(rfile) read_file = file_store.readFile(rfile.meta.filename, rfile.meta.owner) assert str(read_file.meta.filename) == str( rfile.meta.filename), "Test_Write_file - Filename mismatch" assert str(read_file.meta.version) == str( rfile.meta.version), "Test_Write_file - version mismatch" assert str(read_file.meta.owner) == str( rfile.meta.owner), "Test_Write_file - owner mismatch" assert str(read_file.meta.contentHash) == str( rfile.meta.contentHash), "Test_Write_file - contentHash mismatch" assert str(read_file.content) == str( rfile.content), "Test_Write_file - Content mismatch" print "Test case passed 2 -- Write file: file name - %s, owner - %s" % ( rfile.meta.filename, rfile.meta.owner) """try: self.filestore.writeFile(rfile) raise AssertionError("Test_Write_file - No Exception on invalid write") except SystemException as err: assert err.message == "Key is not associated with the node", "Test_Write_file - Message Content Mismatch" print "Test case passed 3 -- ", err.message""" def test_read_file(self, filename, owner): node = self.filestore.findSucc( sha256(owner + ":" + filename).hexdigest()) assert node.id == "162f2ef78020a93545457290a21d4ea634d4bca22aff8530e2011209be88ff82", "Test_Read_File - Node returned is wrong" print "Test case passed 4 -- get successor node for key: ", sha256( owner + ":" + filename).hexdigest() file_store = make_socket(node.ip, node.port) rfile.content = "This is a test program" file_store.writeFile(rfile) read_file = file_store.readFile(filename, owner) assert read_file.meta.filename == rfile.meta.filename, "Test_Read_file - Filename mismatch" assert str(read_file.meta.version) == str( int(rfile.meta.version) + 1), "Test_Read_file - version mismatch" assert read_file.meta.owner == rfile.meta.owner, "Test_Read_file - owner mismatch" assert read_file.meta.contentHash == rfile.meta.contentHash, "Test_Read_file - contentHash mismatch" assert read_file.content == rfile.content, "Test_Read_file - Content mismatch" print "Test case passed 5 -- write file: file name - %s, owner - %s" % ( filename, owner) rfile.meta.filename = "test_p2.txt" rfile.meta.contentHash = sha256(owner + ":" + rfile.meta.filename).hexdigest() node = self.filestore.findSucc(rfile.meta.contentHash) assert node.id == "162f2ef78020a93545457290a21d4ea634d4bca22aff8530e2011209be88ff82", "Test_Read_File - Node returned is wrong" print "Test case passed 6 -- get successor node for key: ", rfile.meta.contentHash file_store = make_socket(node.ip, node.port) file_store.writeFile(rfile) read_file = file_store.readFile(rfile.meta.filename, owner) assert read_file.meta.filename == rfile.meta.filename, "Test_Read_file - Filename mismatch" assert read_file.meta.version == rfile.meta.version, "Test_Read_file - version mismatch" assert read_file.meta.owner == rfile.meta.owner, "Test_Read_file - owner mismatch" assert read_file.meta.contentHash == rfile.meta.contentHash, "Test_Read_file - contentHash mismatch" assert read_file.content == rfile.content, "Test_Read_file - Content mismatch" print "Test case passed 7 -- write file: file name - %s, owner - %s" % ( rfile.meta.filename, owner) rfile.meta.filename = "p2.txt" rfile.meta.owner = "AravindKumar" rfile.meta.contentHash = sha256(rfile.meta.owner + ":" + rfile.meta.filename).hexdigest() node = self.filestore.findSucc(rfile.meta.contentHash) assert node.id == "162f2ef78020a93545457290a21d4ea634d4bca22aff8530e2011209be88ff82", "Test_Read_File - Node returned is wrong" print "Test case passed 8 -- get successor node for key: ", rfile.meta.contentHash file_store = make_socket(node.ip, node.port) file_store.writeFile(rfile) read_file = file_store.readFile(rfile.meta.filename, rfile.meta.owner) assert read_file.meta.filename == rfile.meta.filename, "Test_Read_file - Filename mismatch" assert read_file.meta.version == rfile.meta.version, "Test_Read_file - version mismatch" assert read_file.meta.owner == rfile.meta.owner, "Test_Read_file - owner mismatch" assert read_file.meta.contentHash == rfile.meta.contentHash, "Test_Read_file - contentHash mismatch" assert read_file.content == rfile.content, "Test_Read_file - Content mismatch" print "Test case passed 9 -- write file: file name - %s, owner - %s" % ( rfile.meta.filename, rfile.meta.owner) rfile.meta.filename = "p2_p2.txt" rfile.meta.owner = "AravindKumarDhinagaran" rfile.meta.contentHash = sha256(rfile.meta.owner + ":" + rfile.meta.filename).hexdigest() node = self.filestore.findSucc(rfile.meta.contentHash) assert node.id == "162f2ef78020a93545457290a21d4ea634d4bca22aff8530e2011209be88ff82", "Test_Read_File - Node returned is wrong" print "Test case passed 10 -- get successor node for key: ", rfile.meta.contentHash file_store = make_socket(node.ip, node.port) file_store.writeFile(rfile) read_file = file_store.readFile(rfile.meta.filename, rfile.meta.owner) assert read_file.meta.filename == rfile.meta.filename, "Test_Read_file - Filename mismatch" assert read_file.meta.version == rfile.meta.version, "Test_Read_file - version mismatch" assert read_file.meta.owner == rfile.meta.owner, "Test_Read_file - owner mismatch" assert read_file.meta.contentHash == rfile.meta.contentHash, "Test_Read_file - contentHash mismatch" assert read_file.content == rfile.content, "Test_Read_file - Content mismatch" print "Test case passed 11 -- write file: file name - %s, owner - %s" % ( rfile.meta.filename, rfile.meta.owner) try: file_store.readFile(filename, 'invalid') raise AssertionError( "Test_Read_file - No Exception on invalid owner name") except SystemException as err: print "Test case passed 12 -- ", err.message try: file_store.readFile('invalid', owner) raise AssertionError( "Test_Read_file - No Exception on invalid filename") except SystemException as err: print "Test case passed 13 -- ", err.message try: self.filestore.readFile(filename, owner) raise AssertionError( "Test_Read_file - No Exception on invalid read") except SystemException as err: assert err.message == "Key is not associated with the node", "Test_Read_file - Message Content Mismatch" print "Test case passed 14 -- ", err.message def test_negative_cases(self, filestore, key): try: filestore.findSucc(key) raise AssertionError( "Test_Find_Succ - No Exception on empty finger table") except SystemException as err: assert err.message == "Fingertable not exist for the current node", "Test_Find_Succ - message content mismatch" print "Test case passed 15 -- ", err.message try: filestore.findPred(key) raise AssertionError( "Test_Find_Pred - No Exception on empty finger table") except SystemException as err: assert err.message == "Fingertable not exist for the current node", "Test_Find_Pred - message content mismatch" print "Test case passed 16 -- ", err.message try: filestore.getNodeSucc() raise AssertionError( "Test_Get_Node_Succ - No Exception on empty finger table") except SystemException as err: assert err.message == "Fingertable not exist for the current node", "Test_Node_Succ - message content mismatch" print "Test case passed 17 -- ", err.message
def testBinaryProtocolAcceleratedEof(self): """Test that TBinaryProtocolAccelerated throws an EOFError when it reaches the end of the stream""" self.eofTestHelper(TBinaryProtocol.TBinaryProtocolAcceleratedFactory()) self.eofTestHelperStress( TBinaryProtocol.TBinaryProtocolAcceleratedFactory())
def findNtpAmplifiers(table, today=datetime.date.today(), verbose=False): """ Find NTP amplifiers in the given traffic (table) and store the results in the 'ntpamplifiers' Hive table. """ date = "%d%02d%02d" % (today.year, today.month, today.day) table = scrub(table) ## set some variables regarding the input data if table.startswith("netflow"): dataType = "netflow" req0 = "select sa, sum(ibyt), sum(ipkt) from %s where sp=123 and dt='%s' and pr='UDP' and ibyt/ipkt=468 group by sa" % ( table, date) elif table.startswith("sflow"): dataType = "sflow" req0 = "select srcip, sum(ipsize), count(*) from %s where udpsrcport=123 and ipprotocol=17 and ipsize=468 and dt='%s' group by srcip" % ( table, date) else: sys.stderr.write("Data type unknown!") sys.exit(-1) cursor = presto.connect('localhost').cursor() if verbose: sys.stdout.write("Looking for %s NTP amplifiers... (%s)\n" % (date, table)) # get today's data cursor.execute(req0) res = cursor.fetchall() if len(res) == 0: return data = pd.DataFrame(res, columns=["srcip", "nbbyt", "nbpkt"]) # add the confidence score: data["confidence"] = "LOW" data.loc[data.nbpkt >= 100, "confidence"] = "MED" data.loc[data.nbpkt >= 1000, "confidence"] = "HIGH" outputFile = open( "%s/ntpamplifiers_%s_%s.txt" % (outputDirectory, table, date), "w") data.to_csv(outputFile, sep="\t", header=False, cols=["srcip", "nbbyt", "nbpkt", "confidence"], index=False) outputFile.close() # Store results in Hive try: transport = TSocket.TSocket('localhost', 10000) transport = TTransport.TBufferedTransport(transport) protocol = TBinaryProtocol.TBinaryProtocol(transport) client = ThriftHive.Client(protocol) transport.open() client.execute( "create table if not exists ntpamplifiers (srcip string, byte bigint, pkt bigint, confidence string) partitioned by(dt string, dataSrc string) row format delimited fields terminated by '\t'" ) client.execute( "load data local inpath '{dir}/ntpamplifiers_{table}_{date}.txt' overwrite into table ntpamplifiers partition (dt='{date}', dataSrc='{table}')" .format(table=table, date=date, dir=outputDirectory)) transport.close() except Thrift.TException, tx: sys.stderr.write('%s\n' % (tx.message))
def __init__(self): self.transport = TSocket.TSocket('192.168.0.242', 9090) transport = TTransport.TBufferedTransport(self.transport) protocol = TBinaryProtocol.TBinaryProtocol(transport) self.client = Hbase.Client(protocol) transport.open()
from thrift import Thrift from thrift.protocol import TBinaryProtocol from thrift.transport import THttpClient from thrift.transport import TTransport ''' @author: anant bhardwaj @date: Oct 11, 2013 Sample Python client for DataHub Account Creation ''' try: datahub_transport = THttpClient.THttpClient( 'http://datahub.csail.mit.edu/service') datahub_transport = TTransport.TBufferedTransport(datahub_transport) datahub_protocol = TBinaryProtocol.TBinaryProtocol(datahub_transport) datahub_client = DataHub.Client(datahub_protocol) account_transport = THttpClient.THttpClient( 'http://datahub.csail.mit.edu/service/account') account_transport = TTransport.TBufferedTransport(account_transport) account_protocol = TBinaryProtocol.TBinaryProtocol(account_transport) account_client = AccountService.Client(account_protocol) print "Version: %s" % (datahub_client.get_version()) try: print account_client.remove_account( username="******", app_id="confer", app_token="d089b3ed-1d82-4eae-934a-859d7070d364")
def testBinaryProtocolAcceleratedEof(self): """Test that TBinaryProtocolAccelerated throws a TTransportException when it reaches the end of the stream""" self.eofTestHelper(TBinaryProtocol.TBinaryProtocolAcceleratedFactory()) self.eofTestHelperStress( TBinaryProtocol.TBinaryProtocolAcceleratedFactory())
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys from thrift.Thrift import TException from thrift.transport import TSocket, TTransport from thrift.protocol import TBinaryProtocol from genpy.calc.ICalc import Client if __name__ == '__main__': try: TRANSPORT = TSocket.TSocket('localhost', 9090) TRANSPORT = TTransport.TBufferedTransport(TRANSPORT) PROTOCOL = TBinaryProtocol.TBinaryProtocol(TRANSPORT) CLIENT = Client(PROTOCOL) TRANSPORT.open() num1, num2 = sys.argv[1], sys.argv[2] print CLIENT.add(int(num1), int(num2)) TRANSPORT.close() except TException as e: print e.message
def execute_hurdle(make_plot=False, result_file="results.json", host='127.0.0.1', port=9090, seed=None, initial_state=None, num_trials=10, num_rounds=30000, scoring_rounds=1000, team_name="team"): num_states = 10 avg_score_threshold = 2.0 trial_pass_threshold = 6 expected = expected_random_score(num_states) print("expected score of random guesser is {}".format(expected * num_rounds)) print("score required to pass a trial is {}".format(avg_score_threshold * num_rounds)) print("Number of trials passed to pass Hurdle 3 is {} out of {}".format( trial_pass_threshold, num_trials)) # Make socket transport = TSocket.TSocket(host, port) # Buffering is critical. Raw sockets are very slow transport = TTransport.TBufferedTransport(transport) # Wrap in a protocol protocol = TBinaryProtocol.TBinaryProtocol(transport) # Create a client to use the protocol encoder client = Hurdle3Execution.Client(protocol) # Connect! transport.open() results = {"trials": {}, "team_name": team_name} for t in range(num_trials): if initial_state is None: initial_state = random.randrange(num_states) if seed is None: seed = random.randint(0, 0xffffffff) # create a new probabilistic state machine with potentaiily # a new initial state and seed # at the start of each trial psm = PSM(num_states, initial_state, seed) print("starting trial {} of {}".format(t, num_trials)) # run the trial and store the trial results trial_results = run_trial(t, num_rounds, scoring_rounds, avg_score_threshold, client, psm) # add seed and initial state to trial_results trial_results["seed"] = seed trial_results["initial_state"] = initial_state results["trials"][t] = trial_results # count the number of trials that passed trials_passed = sum( [results["trials"][i]["trial_pass"] for i in range(num_trials)]) print( "Number of trials passed: {} Number of trials passed required to pass Hurdle 3: {}" .format(trials_passed, trial_pass_threshold)) hurdle_pass = trials_passed >= trial_pass_threshold print("Hurdle 3 Passed? {}".format(hurdle_pass)) results["num_trials"] = num_trials results["num_states"] = num_states results["trials_passed"] = trials_passed results["trial_pass_threshold"] = trial_pass_threshold results["hurdle_pass"] = hurdle_pass with open(result_file, 'w') as f: f.write(json.dumps(results)) print("Writing results to file: {}".format(result_file)) #print("Results file: {}".format(results)) client.stop()
def __init__(self): transport = TSocket.TSocket(kw_config.IP, kw_config.PORT) self.transport = TTransport.TBufferedTransport(transport) protocol = TBinaryProtocol.TBinaryProtocol(self.transport) self.client = kwServer.Client(protocol) self.transport.open()
class AcceleratedBinaryTest(AbstractTest): protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()
def serialize(thrift_obj, filename): with open(filename, 'wb') as f: transport = TTransport.TFileObjectTransport(f) protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport) thrift_obj.write(protocol) transport.flush()
def kill(self): os._exit(0) def say(self, word): print word if __name__ == '__main__': try: parser = argparse.ArgumentParser( description='Durable File Service Participant.') parser.add_argument(dest='port', help='Port') args = parser.parse_args() handler = TestFileStoreHandler() processor = TestFileStore.Processor(handler) tsocket = TSocket.TServerSocket('0.0.0.0', args.port) transport = TTransport.TBufferedTransportFactory() protocol = TBinaryProtocol.TBinaryProtocolFactory() server = TServer.TThreadedServer(processor, tsocket, transport, protocol) host = socket.gethostname() host += '.cs.binghamton.edu' if host.startswith('remote') else '' print('TestFileStore server running on ' + host + ':' + args.port) server.serve() except Thrift.TException as tx: print('%s' % (tx.message))
def handler(event, context): # dataset data_bucket = event['data_bucket'] file = event['file'] dataset_type = event["dataset_type"] assert dataset_type == "dense_libsvm" n_features = event['n_features'] # ps setting host = event['host'] port = event['port'] # hyper-parameter n_clusters = event['n_clusters'] n_epochs = event["n_epochs"] threshold = event["threshold"] sync_mode = event["sync_mode"] n_workers = event["n_workers"] worker_index = event['worker_index'] assert sync_mode.lower() == Synchronization.Reduce print('data bucket = {}'.format(data_bucket)) print("file = {}".format(file)) print('number of workers = {}'.format(n_workers)) print('worker index = {}'.format(worker_index)) print('num clusters = {}'.format(n_clusters)) print('host = {}'.format(host)) print('port = {}'.format(port)) # Set thrift connection # Make socket transport = TSocket.TSocket(host, port) # Buffering is critical. Raw sockets are very slow transport = TTransport.TBufferedTransport(transport) # Wrap in a protocol protocol = TBinaryProtocol.TBinaryProtocol(transport) # Create a client to use the protocol encoder t_client = ParameterServer.Client(protocol) # Connect! transport.open() # test thrift connection ps_client.ping(t_client) print("create and ping thrift server >>> HOST = {}, PORT = {}".format( host, port)) # Reading data from S3 read_start = time.time() storage = S3Storage() lines = storage.load(file, data_bucket).read().decode('utf-8').split("\n") print("read data cost {} s".format(time.time() - read_start)) parse_start = time.time() dataset = libsvm_dataset.from_lines(lines, n_features, dataset_type).ins_np data_type = dataset.dtype centroid_shape = (n_clusters, dataset.shape[1]) print("parse data cost {} s".format(time.time() - parse_start)) print("dataset type: {}, dtype: {}, Centroids shape: {}, num_features: {}". format(dataset_type, data_type, centroid_shape, n_features)) # register model model_name = Prefix.KMeans_Cent model_length = centroid_shape[0] * centroid_shape[1] + 1 ps_client.register_model(t_client, worker_index, model_name, model_length, n_workers) ps_client.exist_model(t_client, model_name) print("register and check model >>> name = {}, length = {}".format( model_name, model_length)) init_centroids_start = time.time() ps_client.can_pull(t_client, model_name, 0, worker_index) ps_model = ps_client.pull_model(t_client, model_name, 0, worker_index) if worker_index == 0: centroids = dataset[0:n_clusters].flatten() ps_client.can_push(t_client, model_name, 0, worker_index) ps_client.push_grad( t_client, model_name, np.append(centroids.flatten(), 1000.).astype(np.double) - np.asarray(ps_model).astype(np.double), 1.0, 0, worker_index) else: centroids = np.zeros(centroid_shape) ps_client.can_push(t_client, model_name, 0, worker_index) ps_client.push_grad( t_client, model_name, np.append(centroids.flatten(), 0).astype(np.double), 0, 0, worker_index) ps_client.can_pull(t_client, model_name, 1, worker_index) ps_model = ps_client.pull_model(t_client, model_name, 1, worker_index) cur_centroids = np.array(ps_model[0:-1]).astype( np.float32).reshape(centroid_shape) cur_error = float(ps_model[-1]) #print("init centroids = {}, error = {}".format(cur_centroids, cur_error)) print("initial centroids cost {} s".format(time.time() - init_centroids_start)) model = cluster_models.get_model(dataset, cur_centroids, dataset_type, n_features, n_clusters) train_start = time.time() cal_time = 0 comm_time = 0 for epoch in range(1, n_epochs + 1): epoch_start = time.time() # local computation model.find_nearest_cluster() local_cent = model.get_centroids("numpy").reshape(-1) local_cent_error = np.concatenate( (local_cent.astype(np.double).flatten(), np.array([model.error], dtype=np.double))) epoch_cal_time = time.time() - epoch_start print("error after local update = {}".format(model.error)) # push updates epoch_comm_start = time.time() last_cent_error = np.concatenate( (cur_centroids.astype(np.double).flatten(), np.array([cur_error], dtype=np.double))) ps_model_inc = local_cent_error - last_cent_error ps_client.can_push(t_client, model_name, epoch, worker_index) ps_client.push_grad(t_client, model_name, ps_model_inc, 1.0 / n_workers, epoch, worker_index) # pull new model epoch_pull_start = time.time() ps_client.can_pull(t_client, model_name, epoch + 1, worker_index) # sync all workers ps_model = ps_client.pull_model(t_client, model_name, epoch + 1, worker_index) model.centroids = np.array(ps_model[0:-1]).astype( np.float32).reshape(centroid_shape) model.error = float(ps_model[-1]) cur_centroids = model.get_centroids("numpy").reshape(-1) cur_error = model.error epoch_comm_time = time.time() - epoch_comm_start print( "Epoch[{}] Worker[{}], error = {}, cost {} s, cal cost {} s, sync cost {} s" .format(epoch, worker_index, model.error, time.time() - epoch_start, epoch_cal_time, epoch_comm_time)) if model.error < threshold: break print("Worker[{}] finishes training: Error = {}, cost {} s".format( worker_index, model.error, time.time() - train_start)) return
#!/usr/bin/env python import sys sys.path.append('./gen-py') from scribe import * from scribe.ttypes import * from thrift.transport import TTransport, TSocket from thrift.protocol import TBinaryProtocol category = 'ncs_index_err' message = 'test_message' host = 'localhost' port = 9999 log_entry = LogEntry(category=category, message=message) socket = TSocket.TSocket(host=host, port=port) transport = TTransport.TFramedTransport(socket) protocol = TBinaryProtocol.TBinaryProtocol(trans=transport, strictRead=False, strictWrite=False) client = scribe.Client(iprot=protocol, oprot=protocol) for i in range(10000): transport.open() result = client.Log(messages=[log_entry]) transport.close()
def create_sharing_client(transport): protocol = TBinaryProtocol.TBinaryProtocol(transport) return SharingRegistryService.Client(protocol)
if __name__ == '__main__': if os.path.exists('../../service_log'): shutil.rmtree('../../service_log') os.makedirs('../../service_log/info_log') os.makedirs('../../service_log/error_log') info_logger = get_logger(name=__name__ + '_info', filename='../../service_log/info_log/info.log', level='info') error_logger = get_logger(name=__name__ + '_error', filename='../../service_log/error_log/error.log', level='error') # conf_path_1 = '/Users/lix/Desktop/fasic_cv_sdk/pretrained_models/2stems' conf_path_2 = '/Users/lix/Desktop/fasic_cv_sdk/inaSpeechSegmenter' handler = VisionServicesHandler(conf_path_1, conf_path_2, info_logger=info_logger, error_logger=error_logger) processor = VisionServices.Processor(handler) port, worker = get_args() transport = TSocket.TServerSocket(port=port) tfactory = TTransport.TBufferedTransportFactory() pfactory = TBinaryProtocol.TBinaryProtocolFactory(strictRead=True, strictWrite=False) server = TServer.TThreadedServer(processor, transport, tfactory, pfactory) print('server starting...') server.serve()
uid_list = req.uidList if not uid_list: raise Exception('参数错误', ErrCodeEnum.PARAM_ERROR) user_list_all = self.get_user_by_uid_list(uid_list) if not user_list_all: raise Exception('服务器错误', ErrCodeEnum.SERVER_ERROR) user_list_resp = UserListResp(user_list_all) return user_list_resp except Exception as e: print e raise ApiErrorException(e.getCode(), e.getMessage()) if __name__ == '__main__': handler = UserInfoHandler() processor = UserService.Processor(handler) transport = TSocket.TServerSocket(host='127.0.0.1', port=9090) tfactory = TTransport.TBufferedTransportFactory() pfactory = TBinaryProtocol.TBinaryProtocolFactory() # 简单服务器模式 server = TServer.TSimpleServer(processor, transport, tfactory, pfactory) # 线程模式 # server = TServer.TThreadedServer( # processor, transport, tfactory, pfactory) print "Starting thrift server in python..." server.serve() print "done!"
def start_extension(name="<unknown>", version="0.0.0", sdk_version="3.0.7", min_sdk_version="1.8.0"): """Start your extension by communicating with osquery core and starting a thrift server. Keyword arguments: name -- the name of your extension version -- the version of your extension sdk_version -- the version of the osquery SDK used to build this extension min_sdk_version -- the minimum version of the osquery SDK that you can use """ args = parse_cli_params() # Disable logging for the thrift module (can be loud). logging.getLogger('thrift').addHandler(logging.NullHandler()) client = ExtensionClient(path=args.socket) if not client.open(args.timeout): if args.verbose: message = "Could not open socket %s" % args.socket raise ExtensionException( code=1, message=message, ) return ext_manager = ExtensionManager() # try connecting to the desired osquery core extension manager socket try: status = client.extension_manager_client().registerExtension( info=InternalExtensionInfo( name=name, version=version, sdk_version=sdk_version, min_sdk_version=min_sdk_version, ), registry=ext_manager.registry(), ) except socket.error: message = "Could not connect to %s" % args.socket raise ExtensionException( code=1, message=message, ) if status.code != 0: raise ExtensionException( code=1, message=status.message, ) # Start a watchdog thread to monitor the osquery process. rt = threading.Thread(target=start_watcher, args=(client, args.interval)) rt.daemon = True rt.start() # start a thrift server listening at the path dictated by the uuid returned # by the osquery core extension manager ext_manager.uuid = status.uuid processor = Processor(ext_manager) transport = None if sys.platform == 'win32': transport = TPipeServer(pipe_name="{}.{}".format(args.socket, status.uuid)) else: transport = TSocket.TServerSocket( unix_socket=args.socket + "." + str(status.uuid)) tfactory = TTransport.TBufferedTransportFactory() pfactory = TBinaryProtocol.TBinaryProtocolFactory() server = TServer.TSimpleServer(processor, transport, tfactory, pfactory) server.serve()
class NormalBinaryTest(AbstractTest): protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()
def start(self): # Set up thrift client and contact server thrift_server = 'localhost' self.transport = TSocket.TSocket(thrift_server, 9090) self.transport = TTransport.TBufferedTransport(self.transport) bprotocol = TBinaryProtocol.TBinaryProtocol(self.transport) # And the pltfm server as well self.transport_pltfm = None if self.pltfm_pm_client_module or self.pltfm_mgr_client_module: thrift_server = 'localhost' self.transport_pltfm = TSocket.TSocket(thrift_server, 9095) self.transport_pltfm = TTransport.TBufferedTransport(self.transport_pltfm) bprotocol_pltfm = TBinaryProtocol.TBinaryProtocol(self.transport_pltfm) # And the diag server as well self.transport_diag = None if self.diag_client_module: thrift_server = 'localhost' self.transport_diag = TSocket.TSocket(thrift_server, 9096) self.transport_diag = TTransport.TBufferedTransport(self.transport_diag) #bprotocol_diag = TBinaryProtocol.TBinaryProtocol(self.transport_diag) self.mc_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol, "mc") if self.mirror_client_module: self.mirror_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol, "mirror") if self.sd_client_module: self.sd_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol, "sd") if self.plcmt_client_module: self.plcmt_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol, "plcmt") if self.devport_mgr_client_module: self.devport_mgr_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol, "devport_mgr") else: self.devport_mgr_protocol = None if self.port_mgr_client_module: self.port_mgr_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol, "port_mgr") else: self.port_mgr_protocol = None self.conn_mgr_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol, "conn_mgr") if self.pkt_client_module: self.pkt_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol, "pkt") else: self.pkt_protocol = None if self.pltfm_pm_client_module and self.transport_pltfm: self.pltfm_pm_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol_pltfm, "pltfm_pm_rpc") else: self.pltfm_pm_protocol = None if self.pltfm_mgr_client_module and self.transport_pltfm: self.pltfm_mgr_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol_pltfm, "pltfm_mgr_rpc") else: self.pltfm_mgr_protocol = None if self.diag_client_module and self.transport_diag: self.diag_protocol = TBinaryProtocol.TBinaryProtocol(self.transport_diag) else: self.diag_protocol = None self.p4_protocols = {} self.clients = {} self.client = None for p4_name, p4_prefix in zip(self.p4_names, self.p4_prefixes): p4_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol, p4_prefix) self.p4_protocols[p4_name] = p4_protocol self.clients[p4_name] = self.p4_client_modules[p4_name].Client(p4_protocol) if len(self.clients) == 1: self.client = self.clients.values()[0] self.mc = self.mc_client_module.Client(self.mc_protocol) if self.mirror_client_module: self.mirror = self.mirror_client_module.Client(self.mirror_protocol) else: self.mirror = None if self.sd_client_module: self.sd = self.sd_client_module.Client(self.sd_protocol) else: self.sd = None if self.plcmt_client_module: self.plcmt = self.plcmt_client_module.Client(self.plcmt_protocol) else: self.plcmt = None if self.devport_mgr_client_module: self.devport_mgr = self.devport_mgr_client_module.Client(self.devport_mgr_protocol) else: self.devport_mgr = None if self.port_mgr_client_module: self.port_mgr = self.port_mgr_client_module.Client(self.port_mgr_protocol) else: self.port_mgr = None self.conn_mgr = self.conn_mgr_client_module.Client(self.conn_mgr_protocol) if self.pkt_client_module: self.pkt = self.pkt_client_module.Client(self.pkt_protocol) else: self.pkt = None if self.pltfm_pm_client_module and self.transport_pltfm: self.pltfm_pm = self.pltfm_pm_client_module.Client(self.pltfm_pm_protocol) else: self.pltfm_pm = None if self.pltfm_mgr_client_module and self.transport_pltfm: self.pltfm_mgr = self.pltfm_mgr_client_module.Client(self.pltfm_mgr_protocol) else: self.pltfm_mgr = None if self.diag_client_module and self.transport_diag: self.diag = self.diag_client_module.Client(self.diag_protocol) else: self.diag = None self.transport.open() if self.transport_pltfm: try: self.transport_pltfm.open() except: print "Did not connect to pltfm thrift server" self.transport_pltfm = None self.pltfm_mgr = None self.pltfm_pm = None if self.transport_diag: try: self.transport_diag.open() except: print "Did not connect to diag thrift server" self.transport_diag = None self.diag = None self.sess_hdl = self.conn_mgr.client_init() self.dev_tgt = DevTarget_t(0, hex_to_i16(0xFFFF)) self.mc_sess_hdl = self.mc.mc_create_session() self.platform_type = "mavericks" board_type = self.pltfm_pm.pltfm_pm_board_type_get() if re.search("0x0234|0x1234|0x4234", hex(board_type)): self.platform_type = "mavericks" elif re.search("0x2234|0x3234", hex(board_type)): self.platform_type = "montara" print ("platform type: %s"%self.platform_type) return self.client