def writeToEdge(self, writable, microbatchID, streamId, data, EDGE_ID,
                    index, sizeChoice, metaData, metaKeyValueMap):
        device = ""
        if (writable.edgeInfo != None):
            device = "local"
        else:
            device = str(writable.node.nodeId)
        localTime = repr(time.time())
        timestamp_record = str(microbatchID) + "," + str(
            index
        ) + "," + device + "," + "write req,starttime = " + localTime + ","
        #print "got the writable ",writable," the microbatchID is ",microbatchID

        nodeInfo = writable.node  #NodeInfoData
        writePreference = writable.preference
        reliability = writable.reliability
        edgeInfoData = writable.edgeInfo  # this is an optional field

        #Write to the edge
        if edgeInfoData != None:

            #print "ip ",edgeInfoData.nodeIp," port",edgeInfoData.port
            # Make socket
            transport = TSocket.TSocket(edgeInfoData.nodeIp, edgeInfoData.port)

            # Buffering is critical. Raw sockets are very slow
            transport = TTransport.TFramedTransport(transport)

            # Wrap in a protocol
            protocol = TBinaryProtocol.TBinaryProtocol(transport)

            # Create a client to use the protocol encoder
            client = EdgeService.Client(protocol)

            # Connect!
            transport.open()

            #byte write(1:string mbId, 2:Metadata mbMetadata, 3:binary mbData)

            #local write -150
            timestamp_record_local = str(microbatchID) + "," + str(
                -150) + ",local,write req,starttime = " + localTime + ","
            #the response is not a byte anymore, its a WriteResponse
            response = client.write(microbatchID, metaData, data)
            timestamp_record_local = timestamp_record_local + "endtime = " + repr(
                time.time()) + " , " + str(sizeChoice) + '\n'

            #print "response from the edge ",response.status

            transport.close()

            #update the metadata structures for the Fog
            client, transport = self.openSocketConnection(
                nodeInfo.NodeIP, nodeInfo.port, FOG_SERVICE)

            #byte insertMetadata(1: Metadata mbMetadata, 2: EdgeInfoData edgeInfoData);

            #this was valid as per previous implementation in which we assumed that a local
            #write means that the client will be writing to itself. However as per the new
            #implementation, it is not necessary and a local write means a write that is written
            #to itself or any other edge managed by the same Fog i.e. a neighbor edge of the client
            #edgeInfoData = EdgeInfoData()
            #edgeInfoData.nodeId = EDGE_ID
            #edgeInfoData.nodeIp = EDGE_IP
            #edgeInfoData.port = EDGE_PORT
            #edgeInfoData.reliability = EDGE_RELIABILITY
            #edgeInfoData.storage = 12 #This value is not useful for computation

            #update the metadata with the checksum
            #hash_md5 = hashlib.md5()
            #hash_md5.update(data)
            #metaData.checksum = hash_md5.hexdigest()

            #metadata insert to fog -50
            timeMetadata = str(microbatchID) + "," + str(
                -50) + ",local ,metadata req,starttime = " + repr(
                    time.time()) + ","
            response = client.insertMetadata(metaData, edgeInfoData,
                                             metaKeyValueMap)
            timeMetadata = timeMetadata + " endTime = " + repr(
                time.time()) + " , " + str(sizeChoice) + '\n'

            myLogs = open(BASE_LOG + 'logs.txt', 'a')
            myLogs.write(timestamp_record_local)
            myLogs.write(timeMetadata)
            myLogs.close()
            #print "The response from the fog is ", response

            self.closeSocket(transport)

        else:

            #print "Have to talk to a fog with preference "
            # byte write(1:Metadata mbMetadata, 2:binary data, 3:WritePreference preference);
            client, transport = self.openSocketConnection(
                nodeInfo.NodeIP, nodeInfo.port, FOG_SERVICE)

            #response is now a WriteResponse and not a byte
            response = client.putNext(metaData, data, writable.preference,
                                      metaKeyValueMap)

            #print "the response from the fog for write ",response.status
            self.closeSocket(transport)

        timestamp_record = timestamp_record + "endtime = " + repr(
            time.time()) + " , " + str(sizeChoice) + '\n'
        #print "the time stamp for write request is ",timestamp_record

        myLogs = open(BASE_LOG + 'logs.txt', 'a')
        myLogs.write(timestamp_record)
        myLogs.close()
def handler(event, context):
    # dataset
    data_bucket = event['data_bucket']
    file = event['file']
    dataset_type = event["dataset_type"]
    assert dataset_type == "sparse_libsvm"
    n_features = event['n_features']

    # ps setting
    host = event['host']
    port = event['port']

    # hyper-parameter
    n_clusters = event['n_clusters']
    n_epochs = event["n_epochs"]
    threshold = event["threshold"]
    sync_mode = event["sync_mode"]
    n_workers = event["n_workers"]
    worker_index = event['worker_index']
    assert sync_mode.lower() == Synchronization.Reduce

    print('data bucket = {}'.format(data_bucket))
    print("file = {}".format(file))
    print('number of workers = {}'.format(n_workers))
    print('worker index = {}'.format(worker_index))
    print('num clusters = {}'.format(n_clusters))
    print('host = {}'.format(host))
    print('port = {}'.format(port))

    # Set thrift connection
    # Make socket
    transport = TSocket.TSocket(host, port)
    # Buffering is critical. Raw sockets are very slow
    transport = TTransport.TBufferedTransport(transport)
    # Wrap in a protocol
    protocol = TBinaryProtocol.TBinaryProtocol(transport)
    # Create a client to use the protocol encoder
    t_client = ParameterServer.Client(protocol)
    # Connect!
    transport.open()
    # test thrift connection
    ps_client.ping(t_client)
    print("create and ping thrift server >>> HOST = {}, PORT = {}".format(host, port))

    # Reading data from S3
    read_start = time.time()
    storage = S3Storage()
    lines = storage.load(file, data_bucket).read().decode('utf-8').split("\n")
    print("read data cost {} s".format(time.time() - read_start))

    parse_start = time.time()
    dataset = libsvm_dataset.from_lines(lines, n_features, dataset_type)
    train_set = dataset.ins_list
    np_dtype = train_set[0].to_dense().numpy().dtype
    centroid_shape = (n_clusters, n_features)
    print("parse data cost {} s".format(time.time() - parse_start))
    print("dataset type: {}, data type: {}, centroids shape: {}"
          .format(dataset_type, np_dtype, centroid_shape))

    # register model
    model_name = Prefix.KMeans_Cent
    model_length = centroid_shape[0] * centroid_shape[1] + 1
    ps_client.register_model(t_client, worker_index, model_name, model_length, n_workers)
    ps_client.exist_model(t_client, model_name)
    print("register and check model >>> name = {}, length = {}".format(model_name, model_length))

    init_centroids_start = time.time()
    ps_client.can_pull(t_client, model_name, 0, worker_index)
    ps_model = ps_client.pull_model(t_client, model_name, 0, worker_index)
    if worker_index == 0:
        centroids_np = sparse_centroid_to_numpy(train_set[0:n_clusters], n_clusters)
        ps_client.can_push(t_client, model_name, 0, worker_index)
        ps_client.push_grad(t_client, model_name,
                            np.append(centroids_np.flatten(), 1000.).astype(np.double) - np.asarray(ps_model).astype(np.double),
                            1., 0, worker_index)
    else:
        centroids_np = np.zeros(centroid_shape)
        ps_client.can_push(t_client, model_name, 0, worker_index)
        ps_client.push_grad(t_client, model_name,
                            np.append(centroids_np.flatten(), 0).astype(np.double),
                            0, 0, worker_index)
    ps_client.can_pull(t_client, model_name, 1, worker_index)
    ps_model = ps_client.pull_model(t_client, model_name, 1, worker_index)
    cur_centroids = np.array(ps_model[0:-1]).astype(np.float32).reshape(centroid_shape)
    cur_error = float(ps_model[-1])
    print("initial centroids cost {} s".format(time.time() - init_centroids_start))

    model = cluster_models.get_model(train_set, torch.from_numpy(cur_centroids), dataset_type,
                                     n_features, n_clusters)

    train_start = time.time()
    for epoch in range(1, n_epochs + 1):
        epoch_start = time.time()

        # local computation
        model.find_nearest_cluster()
        local_cent = model.get_centroids("numpy").reshape(-1)
        local_cent_error = np.concatenate((local_cent.astype(np.double).flatten(),
                                           np.array([model.error], dtype=np.double)))
        epoch_cal_time = time.time() - epoch_start

        # push updates
        epoch_comm_start = time.time()
        last_cent_error = np.concatenate((cur_centroids.astype(np.double).flatten(),
                                          np.array([cur_error], dtype=np.double)))
        ps_model_inc = local_cent_error - last_cent_error
        ps_client.can_push(t_client, model_name, epoch, worker_index)
        ps_client.push_grad(t_client, model_name,
                            ps_model_inc, 1. / n_workers, epoch, worker_index)

        # pull new model
        ps_client.can_pull(t_client, model_name, epoch + 1, worker_index)   # sync all workers
        ps_model = ps_client.pull_model(t_client, model_name, epoch + 1, worker_index)
        model.centroids = [torch.from_numpy(c).reshape(1, n_features).to_sparse()
                           for c in np.array(ps_model[0:-1]).astype(np.float32).reshape(centroid_shape)]

        model.error = float(ps_model[-1])
        cur_centroids = model.get_centroids("numpy")
        cur_error = model.error

        epoch_comm_time = time.time() - epoch_comm_start

        print("Epoch[{}] Worker[{}], error = {}, cost {} s, cal cost {} s, sync cost {} s"
              .format(epoch, worker_index, model.error,
                      time.time() - epoch_start, epoch_cal_time, epoch_comm_time))

        if model.error < threshold:
            break

    print("Worker[{}] finishes training: Error = {}, cost {} s"
          .format(worker_index, model.error, time.time() - train_start))
    return
Example #3
0
def start_client(block_port, serverport, fname, dirf, op):
    try:
        f = open(os.devnull, 'w')
        old = sys.stderr
        sys.stderr = f
        #print "Connecting to Metadata server...",
        transport = TSocket.TSocket('localhost', serverport)
        transport = TTransport.TBufferedTransport(transport)
        protocol = TBinaryProtocol.TBinaryProtocol(transport)
        client = MetadataServerService.Client(protocol)
        #print "SUCCESS"

        #print "Connecting to Block Server...",
        b_trans = TSocket.TSocket('localhost', block_port)
        b_trans = TTransport.TBufferedTransport(b_trans)
        b_proto = TBinaryProtocol.TBinaryProtocol(b_trans)
        b_client = BlockServerService.Client(b_proto)
        #print "SUCCESS"
        #b_trans.open()
        #b_client.hasBlock("test client")
        #b_trans.close()
        #client.ping()

        #print "Creating file"
        aFile = file()
        aFile.filename = fname
        aFile.version = 1
        aFile.status = responseType.OK
        #print "Mode:	", op

        if op == "upload":
            tmp = separate_file(dirf, fname)
            aFile.hashList = tmp[0]
            #print "	Sending file metadata..."
            transport.open()
            res = client.storeFile(aFile)
            transport.close()
            #print "	The following blocks will be sent"
            b_trans.open()
            for b_hash in res.hashList:
                #print b_hash
                out = hashBlock()
                out.hash = b_hash
                out.block = (tmp[2])[b_hash]
                b_client.storeBlock(out)
            b_trans.close()
            transport.open()
            res = client.storeFile(aFile)
            transport.close()
            if res.status != uploadResponseType.OK:
                print "ERROR"
                return 2

# if op is get
        if op == "download":
            transport.open()
            hashBlockList = list()
            res = client.getFile(fname)
            if res.status == responseType.ERROR:
                print "ERROR"
                return 2
#print "The following blocks will be fetched:"
#for hb in res.hashList:
#    print "	", hb
            for hb in res.hashList:
                b_trans.open()
                # chech block server has block
                res2 = b_client.hasBlock(hb)
                #print "	Checking block ", hb
                if res2.status == hasBlockResponseType.OK:
                    #print "		OK - appending to hashBlock list"
                    tmp = b_client.getBlock(hb)
                    hashBlockList.append(tmp)
                else:
                    #print "Block not in server"
                    print "ERROR"
                    return 2
                b_trans.close()
                bl = join(hashBlockList, dirf, fname)
        if op == "delete":
            transport.open()
            res = client.deleteFile(aFile)
            if res.message != responseType.OK:
                print "ERROR"
                return 2
            transport.close()

#close transport
        print "OK"
        sys.stderr = old
    except:
        return 3
    return 1
 def __init__(self):
     transport = TSocket.TSocket(classify_config.IP, classify_config.PORT)
     self.transport = TTransport.TBufferedTransport(transport)
     protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
     self.client = classifyServer.Client(protocol)
     self.transport.open()
Example #5
0
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 19 16:26:25 2019

@author: Brianzhu
"""
from thrift.transport import TSocket,TTransport
from thrift.protocol import TCompactProtocol
from hbase import Hbase
from hbase.ttypes import ColumnDescriptor, Mutation, BatchMutation, TRegionInfo
from hbase.ttypes import IOError, AlreadyExists

# thrift默认端口是9090
socket = TSocket.TSocket('113.107.166.14',13100)
socket.setTimeout(5000)

transport = TTransport.TBufferedTransport(socket)
protocol = TCompactProtocol.TCompactProtocol(transport)

client = Hbase.Client(protocol)
socket.open()

client.getTableNames()
#print client.getTableNames()
#print client.get('test','row1','cf:a')





Example #6
0
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol

from mamba import description, context, it
from expects import expect, equal

import json

test_doc_path = './data/test_doc.json'

try:
    with description('ThriftKV') as self:
        doc_id = ''
        with it('the create method is called and returns a mongo object id'):
            transport = TSocket.TSocket('0.0.0.0', 9090)
            transport = TTransport.TBufferedTransport(transport)
            protocol = TBinaryProtocol.TBinaryProtocol(transport)
            client = ThriftKVService.Client(protocol)
            transport.open()

            # Get thrift client and open transport protocol
            with open(test_doc_path) as f:
                global doc_id
                doc = f.read()
                doc_id = json.loads(client.create(doc))

                expect('$oid' in doc_id.keys())

            transport.close()
Example #7
0
# -*- coding=utf-8 -*-

__authors__ = [
    '"zhaobo" <*****@*****.**>',
]

from thrift.transport import TSocket
from thrift.protocol import TBinaryProtocol
from thrift.transport import TTransport
from RPC_Practice.empService import empService
from RPC_Practice.empService.empService import Client

__HOST = '127.0.0.1'
__PORT = '8080'

tsoct = TSocket.TSocket(__HOST, __PORT)
trans = TTransport.TBufferedTransport(tsoct)
proc = TBinaryProtocol.TBinaryProtocol(trans)
client = Client(proc)

name = 'zhaobo'
trans.open()

print(client.getEmpByName(name).text)
Example #8
0
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol

from hbase import Hbase
from hbase.ttypes import *

transport = TSocket.TSocket('localhost', 9090)

transport = TTransport.TBufferedTransport(transport)

protocol = TBinaryProtocol.TBinaryProtocol(transport)

client = Hbase.Client(protocol)
transport.open()

contents = ColumnDescriptor(name='cf:', maxVersions=1)
client.createTable('test', [contents])

# 列出所有HBase所有表
print client.getTableNames()
Example #9
0
 def connect(self):
     self.transport = TSocket.TSocket(self.server, self.port)
     self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
     self.client = OrderReplay.Client(self.protocol)
     self.transport.open()
Example #10
0
    def __init__(self,
                 uri=None,
                 user=None,
                 password=None,
                 host=None,
                 port=6274,
                 dbname=None,
                 protocol='binary',
                 sessionid=None,
                 ):
        if sessionid is not None:
            if any([user, password, uri, dbname]):
                raise TypeError("Cannot specify sessionid with user, password,"
                                " dbname, or uri")
        if uri is not None:
            if not all([user is None,
                        password is None,
                        host is None,
                        port == 6274,
                        dbname is None,
                        protocol == 'binary']):
                raise TypeError("Cannot specify both URI and other arguments")
            user, password, host, port, dbname, protocol = _parse_uri(uri)
        if host is None:
            raise TypeError("`host` parameter is required.")
        if protocol in ("http", "https"):
            if not host.startswith(protocol):
                # the THttpClient expects http[s]://localhost
                host = protocol + '://' + host
            transport = THttpClient.THttpClient("{}:{}".format(host, port))
            proto = TJSONProtocol.TJSONProtocol(transport)
            socket = None
        elif protocol == "binary":
            socket = TSocket.TSocket(host, port)
            transport = TTransport.TBufferedTransport(socket)
            proto = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
        else:
            raise ValueError("`protocol` should be one of",
                             " ['http', 'https', 'binary'],",
                             " got {} instead".format(protocol))
        self._user = user
        self._password = password
        self._host = host
        self._port = port
        self._dbname = dbname
        self._transport = transport
        self._protocol = protocol
        self._socket = socket
        self._closed = 0
        self._tdf = None
        try:
            self._transport.open()
        except TTransportException as e:
            if e.NOT_OPEN:
                err = OperationalError("Could not connect to database")
                raise err from e
            else:
                raise
        self._client = Client(proto)
        try:
            # If a sessionid was passed, we should validate it
            if sessionid:
                self._session = sessionid
                self.get_tables()
            else:
                self._session = self._client.connect(user, password, dbname)
        except TMapDException as e:
            raise _translate_exception(e) from e
        except TTransportException:
            raise ValueError(f"Connection failed with port {port} and "
                             f"protocol '{protocol}'. Try port 6274 for "
                             "protocol == binary or 6273, 6278 or 443 for "
                             "http[s]"
                             )

        # if OmniSci version <4.6, raise RuntimeError, as data import can be
        # incorrect for columnar date loads
        # Caused by https://github.com/omnisci/pymapd/pull/188
        semver = self._client.get_version()
        if Version(semver.split("-")[0]) < Version("4.6"):
            raise RuntimeError(f"Version {semver} of OmniSci detected. "
                               "Please use pymapd <0.11. See release notes "
                               "for more details.")
Example #11
0
from com.python.thrift import PersonService
from com.python.thrift import ttypes

from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TCompactProtocol
# 解决乱码问题
import sys

# 解决乱码问题
reload(sys)
sys.setdefaultencoding('utf8')

try:
    tSocket = TSocket.TSocket('localhost', 9999)
    tSocket.setTimeout(600)
    transport = TTransport.TFramedTransport(tSocket)
    protocol = TCompactProtocol.TCompactProtocol(transport)
    client = PersonService.Client(protocol)
    transport.open()

    person = client.getPersonByUsername("张三")
    print person.username
    print person.age
    print person.married
    print '-----------------'
    newPerson = ttypes.Person()
    newPerson.username = "******"
    newPerson.age = 18
    newPerson.married = True
Example #12
0
    def __init__(self, port):
        self.transport = TSocket.TSocket('localhost', port)
        self.transport = TTransport.TBufferedTransport(self.transport)
        protocol = TBinaryProtocol.TBinaryProtocol(self.transport)

        self.client = SessionSegment.Client(protocol)
Example #13
0
from thrift import Thrift
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
from hbase import Hbase
transport = TSocket.TSocket('62.234.212.250', 9090)
transport.setTimeout(5000)
# 设置传输方式(TFramedTransport或TBufferedTransport)
trans = TTransport.TBufferedTransport(transport)
# 设置传输协议
protocol = TBinaryProtocol.TBinaryProtocol(trans)
# 确定客户端
client = Hbase.Client(protocol)
# 打开连接
transport.open()

from hbase.ttypes import ColumnDescriptor, Mutation, BatchMutation, TRegionInfo
from hbase.ttypes import IOError, AlreadyExists

tableName = "test"
rowkey = "1"

# 获取所有表名

tableNames = client.getTableNames()
print('tableNames:', tableNames)
Example #14
0
from thrift.protocol import TMultiplexedProtocol
from thrift.protocol import TBinaryProtocol
from thrift.transport import TSocket
from thrift.transport import TTransport
import conn_mgr_pd_rpc.conn_mgr
from res_pd_rpc.ttypes import *
from ptf.thriftutils import *
from AHashFlow.p4_pd_rpc.ttypes import *
import AHashFlow.p4_pd_rpc.AHashFlow as AHashFlow
import json

thrift_server = "localhost"
#transport = TSocket.TSocket(thrift_server, 9090)
transport = TSocket.TSocket(thrift_server, 9090)
transport = TTransport.TBufferedTransport(transport)
transport.open()
bprotocol = TBinaryProtocol.TBinaryProtocol(transport)
conn_mgr_protocol = TMultiplexedProtocol.TMultiplexedProtocol(
    bprotocol, "conn_mgr")
conn_mgr = conn_mgr_pd_rpc.conn_mgr.Client(conn_mgr_protocol)
sess_hdl = conn_mgr.client_init()
dev = 0
dev_tgt = DevTarget_t(dev, hex_to_i16(0xFFFF))

p4_prefix = "AHashFlow"
p4_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol, p4_prefix)
client = AHashFlow.Client(p4_protocol)
flag = AHashFlow_register_flags_t(read_hw_sync=True)
#res = client.register_read_pktcnt(sess_hdl, dev_tgt, 0, flag)
#client.register_write_main_table_1_value(sess_hdl, dev_tgt, 0, 5)
#for idx in range(100):
Example #15
0
from thrift.protocol import TCompactProtocol
from thrift.server import TServer

import re

if __name__ == "__main__":

    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument("-p", "--port", dest="port", type=int, default=9090)
    parser.add_argument("-H", "--host", dest="host", default="localhost")
    options = parser.parse_args()

    # Make socket
    transport = TSocket.TSocket(options.host, options.port)

    # Buffering is critical. Raw sockets are very slow
    transport = TTransport.TBufferedTransport(transport)

    # Wrap in a protocol
    protocol = TCompactProtocol.TCompactProtocol(transport)

    # Create a client to use the protocol encoder
    client = Annotator.Client(protocol)
    
    # Connect!
    transport.open()

    while True:
        s = raw_input("Write some text > ")
Example #16
0
# coding: utf-8
"""
thrift_client.py
"""

import sys
from keyphrase import KeyphraseModel
from keyphrase import ttypes
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol

transport = TSocket.TSocket('192.168.101.4', 8084)
transport = TTransport.TBufferedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = KeyphraseModel.Client(protocol)
transport.open()

# x = {"id": "1", "title": "s3", "text": "txt"}
x = {
    "text":
    "社会经济的发展,使基于计算机互联网的信息化技术普及程度越来越高,高校媒体肩负着新闻信息传播的重要使命,成为了当前引领高校思想的重要平台。为了促进教育事业的发展,推动和谐社会的建设,国家为宣传发展战略拟定出新的高度,旨在为高校宣传工作提供更好的发展空间。当前,大学生思想文化的多元化发展态势已成为新常态,传媒发展与时俱进也成为大势所趋。不过唯有如此,才能够有效发挥传媒的力量,守"旧"创"新",切实履行好传媒的社会责任,致力于为当代社会思潮发展提供正确的导向。本文将以此出发,以视网融合为背景,浅谈纸媒新闻报道的新方向。",
    "id": "viezhong_for_test",
    "title": "探析视网融合下的高校纸媒新闻报道策略"
}
msg = client.predict([ttypes.Article(x["id"], x["title"], x["text"])])
print(msg[0].keyphrases)

transport.close()
                    value_bytes = value_bytes[4:]
                    size = struct.unpack('>i', size)[0]
                    records[j].append(value_bytes[:size])
                    value_bytes = value_bytes[size:]
                value_index += 1

    return records


if __name__ == '__main__':
    ip = "localhost"
    port = "6667"
    username = '******'
    password = '******'
    # Make socket
    transport = TSocket.TSocket(ip, port)

    # Buffering is critical. Raw sockets are very slow
    transport = TTransport.TBufferedTransport(transport)

    # Wrap in a protocol
    protocol = TBinaryProtocol.TBinaryProtocol(transport)

    # Create a client to use the protocol encoder
    client = Client(protocol)

    # Connect!
    transport.open()

    # Authentication
    clientProtocol = TSProtocolVersion.IOTDB_SERVICE_PROTOCOL_V2
Example #18
0
import sys
sys.path.append('gen-py')

from gen.server import server
from gen.server.ttypes import *

from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol

try:

    transport = TSocket.TSocket('127.0.0.1', 9090)
    transport = TTransport.TBufferedTransport(transport)
    protocol = TBinaryProtocol.TBinaryProtocol(transport)
    client = server.Client(protocol)
    transport.open()

    print(client.get_methods())

    # client.set_fault(['flush', 'fsync', 'fsyncdir'], False, 0, 100000, "", True, 500000)
    client.set_fault(['flush', 'fsync', 'fsyncdir'], False, 0, 99000, "", True,
                     500000)
    # client.clear_all_faults()

except Thrift.TException as tx:
    print('%s' % tx.message)
Example #19
0
 def __init__(self, cls, host="localhost", port=9090):
     transport_ = TSocket.TSocket(host, port)
     self.transport = TTransport.TFramedTransport(transport_)
     self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
     self.cls = cls
Example #20
0
def signext_dfe():
    """Sign Extension DFE implementation."""
    try:
        # Make socket
        socket = TSocket.TSocket('localhost', 9090)

        # Buffering is critical. Raw sockets are very slow
        transport = TTransport.TBufferedTransport(socket)

        # Wrap in a protocol
        protocol = TBinaryProtocol.TBinaryProtocol(transport)

        # Create a client to use the protocol encoder
        client = SignExtService.Client(protocol)

        # Connect!
        transport.open()

        if len(sys.argv) != 3:
            print 'Usage: signext.py dfe_ip remote_ip'
            sys.exit(1)

        dfe_ip_address = client.malloc_int64_t(5)
        client.inet_aton(sys.argv[1], dfe_ip_address)

        remote_ip_address = client.malloc_int64_t(5)
        client.inet_aton(sys.argv[2], remote_ip_address)

        netmask_address = client.malloc_int64_t(5)
        client.inet_aton('255.255.255.0', netmask_address)

        port = 2000

        maxfile = client.SignExt_init()
        engine = client.max_load(maxfile, '*')

        enum = max_config_key_bool_t_struct(
            SignExtService.max_config_key_bool_t_enum.
            MAX_CONFIG_PRINTF_TO_STDOUT)
        client.max_config_set_bool(enum, True)

        actions = client.max_actions_init(maxfile, 'default')

        client.max_run(engine, actions)
        client.max_actions_free(actions)

        my_buffer_address = client.malloc_int64_t(1)
        my_buffer_size = 4096 * 512
        client.posix_memalign(my_buffer_address, 4096, my_buffer_size)

        my_buffer = client.receive_data_int64_t(my_buffer_address, 1)[0]

        to_cpu = client.max_framed_stream_setup(engine, 'toCPU', my_buffer,
                                                my_buffer_size, -1)

        enum = max_config_key_bool_t_struct(
            SignExtService.max_net_connection_t_enum.
            MAX_NET_CONNECTION_QSFP_TOP_10G_PORT1)
        client.max_ip_config(engine, enum, dfe_ip_address, netmask_address)
        dfe_socket = client.max_udp_create_socket(engine, 'udpTopPort1')
        client.max_udp_bind(dfe_socket, port)
        client.max_udp_connect(dfe_socket, remote_ip_address, 0)

        print 'Listening on %s port %d' % (sys.argv[1], port)

        print 'Waiting for kernel response...'
        sys.stdout.flush()

        frame_address = client.malloc_int64_t(1)
        fsz_address = client.malloc_int64_t(1)
        num_message_rx = 0
        cond = True
        while cond:
            if client.max_framed_stream_read(to_cpu, 1, frame_address,
                                             fsz_address) == 1:
                num_message_rx += 1

                fsz = client.receive_data_int64_t(fsz_address, 1)[0]
                print('CPU: Got output frame %d - size %d bytes' %
                      (num_message_rx, fsz))

                frame = client.receive_data_int64_t(frame_address, 1)[0]

                word = client.receive_data_int64_t(frame, 3)
                for i in range(3):
                    print('Frame [%d] Word[%d]: 0x%lx' %
                          (num_message_rx, i,
                           (word[i] + 2**64) if word[i] < 0 else word[i]))

                client.max_framed_stream_discard(to_cpu, 1)

                if word[0] == 0 & word[1] == 0 & word[2] == 0:
                    cond = False

            else:
                time.sleep(1 / 100000.0)

        client.max_udp_close(dfe_socket)
        client.max_framed_stream_release(to_cpu)
        client.max_unload(engine)
        client.max_file_free(maxfile)
        client.free(dfe_ip_address)
        client.free(remote_ip_address)
        client.free(netmask_address)
        client.free(my_buffer_address)
        client.free(frame_address)
        client.free(fsz_address)
        client.SignExt_free()

        print 'Done.'
        sys.stdout.flush()

        # Close!
        transport.close()

    except Thrift.TException, thrift_exception:
        print '%s' % (thrift_exception.message)
Example #21
0
def main():
     socket = TSocket.TSocket("192.168.10.2", 9160)
Example #22
0
# coding=utf-8

from py.thrift.generated import PersonService
from py.thrift.generated import ttypes

from thrift import Thrift
from thrift.transport import TTransport
from thrift.protocol import TCompactProtocol
from thrift.transport import TSocket


try:
    tsocket = TSocket.TSocket('localhost', 8899)
    tsocket.setTimeout(600)
    transport = TTransport.TFramedTransport(tsocket)
    protocol = TCompactProtocol.TCompactProtocol(transport)
    client = PersonService.Client(protocol)

    transport.open()

    person = client.getPersonByUserName('张三')
    print(person.married)
    print(person.age)
    print(person.username)

    newperson = ttypes.Person()
    newperson.username = '******'
    newperson.age = 24
    newperson.married = True

    client.savePerson(newperson)
Example #23
0
#!/usr/bin/python

from common import *
from thrift.transport import TSocket
from thrift.protocol import TBinaryProtocol
from thrift.transport import TTransport
from hbase import Hbase

# Connect to HBase Thrift server
transport = TTransport.TBufferedTransport(TSocket.TSocket(host, port))
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)

# Create and open the client connection
client = Hbase.Client(protocol)
transport.open()

rows = client.getRow(tablename, "shakespeare-comedies-000001")

# Do a pull on a single row
for row in rows:
    # Pull out values in cell
    message = row.columns.get(messagecolumncf).value
    username = row.columns.get(usernamecolumncf).value
    linenumber = decode(row.columns.get(linenumbercolumncf).value)

    rowKey = row.row

    print("Got row: " + rowKey + ":" + str(linenumber) + ":" + username + ":" +
          message)

# Open a scan over all comedy rows in Shakespeare
Example #24
0
def create_client(port):
    socket = TSocket.TSocket("localhost", port)
    protocol = THeaderProtocol.THeaderProtocol(socket)
    protocol.trans.set_max_frame_size(MAX_BIG_FRAME_SIZE)
    protocol.trans.open()
    return ThriftTest.Client(protocol)
Example #25
0
def main(cfg, reqhandle, resphandle):
    if cfg.unix:
        if cfg.addr == "":
            sys.exit("invalid unix domain socket: {}".format(cfg.addr))
        socket = TSocket.TSocket(unix_socket=cfg.addr)
    else:
        try:
            (host, port) = cfg.addr.rsplit(":", 1)
            if host == "":
                host = "localhost"
            socket = TSocket.TSocket(host=host, port=int(port))
        except ValueError:
            sys.exit("invalid address: {}".format(cfg.addr))

    transport = TRecordingTransport(socket, reqhandle, resphandle)

    if cfg.transport == "framed":
        transport = TTransport.TFramedTransport(transport)
    elif cfg.transport == "unframed":
        transport = TTransport.TBufferedTransport(transport)
    elif cfg.transport == "header":
        transport = THeaderTransport.THeaderTransport(
            transport,
            client_type=THeaderTransport.CLIENT_TYPE.HEADER,
        )
    else:
        sys.exit("unknown transport {0}".format(cfg.transport))

    transport.open()

    if cfg.protocol == "binary":
        protocol = TBinaryProtocol.TBinaryProtocol(transport)
    elif cfg.protocol == "compact":
        protocol = TCompactProtocol.TCompactProtocol(transport)
    elif cfg.protocol == "json":
        protocol = TJSONProtocol.TJSONProtocol(transport)
    elif cfg.protocol == "finagle":
        protocol = TFinagleProtocol(transport, client_id="thrift-playground")
    else:
        sys.exit("unknown protocol {0}".format(cfg.protocol))

    if cfg.service is not None:
        protocol = TMultiplexedProtocol.TMultiplexedProtocol(
            protocol, cfg.service)

    client = Example.Client(protocol)

    try:
        if cfg.method == "ping":
            client.ping()
            print("client: pinged")
        elif cfg.method == "poke":
            client.poke()
            print("client: poked")
        elif cfg.method == "add":
            if len(cfg.params) != 2:
                sys.exit("add takes 2 arguments, got: {0}".format(cfg.params))

            a = int(cfg.params[0])
            b = int(cfg.params[1])
            v = client.add(a, b)
            print("client: added {0} + {1} = {2}".format(a, b, v))
        elif cfg.method == "execute":
            param = Param(return_fields=cfg.params,
                          the_works=TheWorks(
                              field_1=True,
                              field_2=0x7f,
                              field_3=0x7fff,
                              field_4=0x7fffffff,
                              field_5=0x7fffffffffffffff,
                              field_6=-1.5,
                              field_7=u"string is UTF-8: \U0001f60e",
                              field_8=b"binary is bytes: \x80\x7f\x00\x01",
                              field_9={
                                  1: "one",
                                  2: "two",
                                  3: "three"
                              },
                              field_10=[1, 2, 4, 8],
                              field_11=set(["a", "b", "c"]),
                              field_12=False,
                          ))

            try:
                result = client.execute(param)
                print("client: executed {0}: {1}".format(param, result))
            except AppException as e:
                print("client: execute failed with IDL Exception: {0}".format(
                    e.why))
        else:
            sys.exit("unknown method {0}".format(cfg.method))
    except Thrift.TApplicationException as e:
        print("client exception: {0}: {1}".format(e.type, e.message))

    if cfg.request is None:
        req = "".join(["%02X " % ord(x) for x in reqhandle.getvalue()]).strip()
        print("request: {}".format(req))
    if cfg.response is None:
        resp = "".join(["%02X " % ord(x)
                        for x in resphandle.getvalue()]).strip()
        print("response: {}".format(resp))

    transport.close()
Example #26
0
"""
创建一个HBase表 https://cloud.tencent.com/document/product/589/12309
"""
from thrift import Thrift
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
from hbase import Hbase
from hbase.ttypes import ColumnDescriptor, Mutation, BatchMutation, TRegionInfo
from hbase.ttypes import IOError, AlreadyExists

socket = TSocket.TSocket(host='192.168.40.188', port=9090)
socket.setTimeout(5000)

transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)

client = Hbase.Client(protocol)
transport.open()

new_table = ColumnDescriptor(name='cf:', maxVersions=1)
client.createTable('thrift_test_1', [new_table])

tables = client.getTableNames()
socket.close()

print(tables)
Example #27
0
def handler(event, context):
    start_time = time.time()
    bucket = event['data_bucket']
    worker_index = event['rank']
    num_workers = event['num_workers']
    key = 'training_{}.pt'.format(worker_index)

    print('data_bucket = {}\n worker_index:{}\n num_worker:{}\n key:{}'.format(bucket, worker_index, num_workers, key))

    # Set thrift connection
    # Make socket
    transport = TSocket.TSocket(constants.HOST, constants.PORT)
    # Buffering is critical. Raw sockets are very slow
    transport = TTransport.TBufferedTransport(transport)
    # Wrap in a protocol
    protocol = TBinaryProtocol.TBinaryProtocol(transport)
    # Create a client to use the protocol encoder
    t_client = ParameterServer.Client(protocol)
    # Connect!
    transport.open()

    # test thrift connection
    ps_client.ping(t_client)
    print("create and ping thrift server >>> HOST = {}, PORT = {}"
          .format(constants.HOST, constants.PORT))

    # read file from s3
    readS3_start = time.time()
    s3.Bucket(bucket).download_file(key, os.path.join(local_dir, training_file))
    s3.Bucket(bucket).download_file(test_file, os.path.join(local_dir, test_file))
    print("read data cost {} s".format(time.time() - readS3_start))


    # preprocess dataset
    preprocess_start = time.time()

    trainset = torch.load(os.path.join(local_dir, training_file))
    testset = torch.load(os.path.join(local_dir, test_file))
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)
    testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False)
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
    print("preprocess data cost {} s".format(time.time() - preprocess_start))

    device = 'cpu'
    torch.manual_seed(1234)

    #Model
    print('==> Building model..')
    # net = VGG('VGG19')
    # net = ResNet18()
    # net = ResNet50()
    # net = PreActResNet18()
    # net = GoogLeNet()
    # net = DenseNet121()
    # net = ResNeXt29_2x64d()
    net = MobileNet()
    # net = MobileNetV2()
    # net = DPN92()
    # net = ShuffleNetG2()
    # net = SENet18()
    # net = ShuffleNetV2(1)
    # net = EfficientNetB0()

    net = net.to(device)

    optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=5e-4)


    # Loss and Optimizer
    # Softmax is internally computed.
    # Set parameters to be updated.
    # criterion = torch.nn.CrossEntropyLoss()
    # optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE)

    # register model
    model_name = "dnn"
    weight = [param.data.numpy() for param in net.parameters()]
    weight_shape = [layer.shape for layer in weight]
    weight_size = [layer.size for layer in weight]
    weight_length = sum(weight_size)

    ps_client.register_model(t_client, worker_index, model_name, weight_length, num_workers)
    ps_client.exist_model(t_client, model_name)

    print("register and check model >>> name = {}, length = {}".format(model_name, weight_length))

    # Training the Model
    train_start = time.time()
    iter_counter = 0

    for epoch in range(NUM_EPOCHS):

        epoch_start = time.time()

        net.train()
        num_batch = 0
        train_acc = Accuracy()
        train_loss = Average()

        for batch_idx, (inputs, targets) in enumerate(trainloader):

            # print("------worker {} epoch {} batch {}------".format(worker_index, epoch+1, batch_idx+1))
            batch_start = time.time()
            
            # pull latest model
            pull_start = time.time()
            ps_client.can_pull(t_client, model_name, iter_counter, worker_index)
            latest_model = ps_client.pull_model(t_client, model_name, iter_counter, worker_index)
            latest_model = np.asarray(latest_model,dtype=np.float32)
            pull_time = time.time() - pull_start

            # update the model
            offset = 0
            for layer_index, param in enumerate(net.parameters()):

                layer_value = latest_model[offset : offset + weight_size[layer_index]].reshape(weight_shape[layer_index])
                param.data = torch.from_numpy(layer_value)

                offset += weight_size[layer_index]
    
            # Forward + Backward + Optimize
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = net(inputs)
            loss = F.cross_entropy(outputs, targets)
            
            optimizer.zero_grad()
            loss.backward()
            
            train_acc.update(outputs, targets)
            train_loss.update(loss.item(), inputs.size(0))
            
            # flatten and concat gradients of weight and bias
            for index, param in enumerate(net.parameters()):
                if index == 0:
                    flattened_grad = param.grad.data.numpy().flatten()
                else:
                    flattened_grad = np.concatenate((flattened_grad, param.grad.data.numpy().flatten()))
    
            flattened_grad = flattened_grad * -1
            
            # push gradient to PS
            push_start = time.time()
            ps_client.can_push(t_client, model_name, iter_counter, worker_index)
            ps_client.push_grad(t_client, model_name, flattened_grad, learning_rate, iter_counter, worker_index)
            ps_client.can_pull(t_client, model_name, iter_counter+1, worker_index)      # sync all workers
            push_time = time.time() - push_start

            iter_counter += 1
            num_batch += 1
            
            step_time = time.time() - batch_start
            
            print("Epoch:[{}/{}], Step:[{}/{}];\n Training Loss:{}, Training accuracy:{};\n Step Time:{}, Calculation Time:{}, Communication Time:{}".format(
                epoch, NUM_EPOCHS, num_batch, len(trainloader), train_loss, train_acc, step_time, step_time - (pull_time + push_time), pull_time + push_time))

        # Test the Model
        net.eval()
        test_loss = Average()
        test_acc = Accuracy()
        
        with torch.no_grad():
            for batch_idx, (inputs, targets) in enumerate(testloader):
                
                inputs, targets = inputs.to(device), targets.to(device)
                
                outputs = net(inputs)
                
                loss = F.cross_entropy(outputs, targets)
                
                test_loss.update(loss.item(), inputs.size(0))
                test_acc.update(outputs, targets)
        # correct = 0
        # total = 0
        # test_loss = 0
        # for items, labels in validation_loader:
        #     items = Variable(items.view(-1, NUM_FEATURES))
        #     labels = Variable(labels)
        #     outputs = model(items)
        #     test_loss += criterion(outputs, labels).data
        #     _, predicted = torch.max(outputs.data, 1)
        #     total += labels.size(0)
        #     correct += (predicted == labels).sum()

        print('Time = %.4f, accuracy of the model on test set: %f, loss = %f'
              % (time.time() - train_start, test_acc, test_loss))

    end_time = time.time()
    print("Elapsed time = {} s".format(end_time - start_time))
Example #28
0
 def get_client_transport(self, service):
     host, port = service.get_host_port()
     transport = TTransport.TFramedTransport(TSocket.TSocket(host, port))
     protocol = TBinaryProtocol.TBinaryProtocol(transport)
     transport.open()
     return LucidaService.Client(protocol), transport
Example #29
0
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol

from h2oai_scoring import ScoringService
from h2oai_scoring.ttypes import Row

# ------------------------------------------------------------
# Name        Type      Range
# ------------------------------------------------------------
# sepal_len   float32   [4.400000095367432, 7.699999809265137]
# sepal_wid   float32   [2.0, 4.199999809265137]
# petal_len   float32   [1.0, 6.699999809265137]
# petal_wid   float32   [0.10000000149011612, 2.5]
# ------------------------------------------------------------

socket = TSocket.TSocket('localhost', 9090)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = ScoringService.Client(protocol)
transport.open()

server_hash = client.get_hash()
print('Scoring server hash: '.format(server_hash))

print('Scoring individual rows...')
row1 = Row()
row1.sepalLen = '5.199999809265137'  # sepal_len
row1.sepalWid = '2.0'  # sepal_wid
row1.petalLen = '1.2999999523162842'  # petal_len
row1.petalWid = '1.5'  # petal_wid
    def writeRequestToFog(self, microbatchID, streamId, filePath, data,
                          fogReplicaMap, yetAnotherMap, sizeChoice, setLease,
                          erasureCode, metaKeyValueMap):

        #/home/swamiji/eclipse-workspace/edgefs_Europar/EdgeServer/Audio_02_06_2019_20_57_02.mp3

        #Read data and send it
        # path = '/home/swamiji/phd/myFile.txt'
        #path = filePath
        #file = open(path,'r')
        #data = file.read() # add try catch here
        print("read the file ", len(data))

        #statinfo = os.stat(path)
        #dataLength = statinfo.st_size
        encodedSpace, util = self.encodeFreeSpace(
            len(data))  #this is the length of the file

        #lets see if there is an actual need to renew lease though we will be calling renew method anyways

        print("fog ip, fog port , talking to fog for replica locations ",
              FOG_IP, FOG_PORT)
        #write request going to a fog
        # client,transport = self.openSocketConnection(FOG_IP,FOG_PORT,FOG_SERVICE) #127.0.0.1 9091
        transport = TSocket.TSocket(FOG_IP, FOG_PORT)

        # Buffering is critical. Raw sockets are very slow
        transport = TTransport.TFramedTransport(transport)

        # Wrap in a protocol
        protocol = TBinaryProtocol.TBinaryProtocol(transport)

        # Create a client to use the protocol encoder
        myClient = FogService.Client(protocol)

        # Connect!
        transport.open()

        #
        blackListedFogs = []
        #EdgeInfoData(nodeId,nodeIp,port,reliability,storage)getWriteLocations
        # list<WritableFogData> getWriteLocations(1: byte dataLength, 2: Metadata metadata,
        #                                     3: list<i16> blackListedFogs, 4:EdgeInfoData selfInfo)
        global STREAM_ID
        global CLIENT_ID
        global SESSION_SECRET
        metaData = Metadata()
        metaData.clientId = CLIENT_ID
        metaData.sessionSecret = SESSION_SECRET
        metaData.mbId = microbatchID
        metaData.streamId = streamId
        metaData.timestamp = int(time.time() * 1000)
        additional_prop = {}
        additional_prop["Name"] = "Sheshadri"
        metaData.properties = json.dumps(additional_prop)
        metaData.compFormat = COMP_FORMAT
        metaData.uncompSize = len(data)
        metaData.isErasureCoded = False

        #print EDGE_ID,EDGE_IP,EDGE_PORT,EDGE_RELIABILITY,encodedSpace
        #edgeInfo = EdgeInfoData(EDGE_ID,EDGE_IP,EDGE_PORT,EDGE_RELIABILITY,encodedSpace)
        #print "here also ",edgeInfo
        print("encodedSpace ", encodedSpace)

        if erasureCode == "1":
            metaData.isErasureCoded = True

            self.renew_lease(metaData.streamId, metaData.clientId,
                             metaData.sessionSecret, EXPECTED_LEASE,
                             ESTIMATE_PUT_NEXT, metaData.mbId, setLease)

            timestamp_record_getWrite = str(
                microbatchID) + "," + "writeErasureCoded,starttime=" + repr(
                    time.time()) + ","
            result = myClient.writeErasureCoded(metaData, data)
            timestamp_record_getWrite = timestamp_record_getWrite + "endtime=" + repr(
                time.time()) + "," + str(sizeChoice) + ",writeStatus=" + str(
                    result.status) + '\n'

            self.closeSocket(transport)

            myLogs = open(BASE_LOG + 'logs.txt', 'a')
            myLogs.write(timestamp_record_getWrite)
            myLogs.close()

            response = self.increment_block_count(metaData, setLease)
            return int(result.status)
            if response.code == -1:
                #this blockId is already written
                #In our designed experiment, different clients are writing to different regions so this
                #issue will not occur. However this is kept to indicate that such a scenario can occur
                #with concurrent clients
                print("BlockId : " + str(microbatchID) +
                      " is already written, failing the write")
                return -1
            elif response.code == -2:
                #lease expired
                print(
                    "Lease expired, should renew the lease before trying again"
                )
                return -2
            elif response.code == -3:
                #client does not hold the lock
                print(
                    "Client does not hold the lock, should open the stream before writing"
                )
                return -3
            else:
                return response.code

        timestamp_record_getWrite = str(microbatchID) + "," + str(
            -100) + ", local, " + "getWriteLocations ,starttime = " + repr(
                time.time()) + ","
        #result = myClient.getWriteLocations(encodedSpace,metaData,blackListedFogs,edgeInfo) #datalength,
        result = myClient.getWriteLocations(encodedSpace, metaData,
                                            blackListedFogs, True)
        timestamp_record_getWrite = timestamp_record_getWrite + "endtime = " + repr(
            time.time()) + " , " + str(sizeChoice) + '\n'

        #we are calculating replicas using getWriteLocations()
        yetAnotherMap[microbatchID] = {}

        insideDict = {}

        for w in result:
            edgeInfoData = w.edgeInfo
            if (edgeInfoData != None):
                if ("local" in fogReplicaMap):
                    fogReplicaMap["local"] = fogReplicaMap["local"] + 1

                else:
                    fogReplicaMap["local"] = 1

                if ("local" in insideDict):
                    insideDict["local"] = insideDict["local"] + 1

                else:
                    insideDict["local"] = 1

            else:
                if (str(w.node.nodeId) in fogReplicaMap):
                    fogReplicaMap[str(
                        w.node.nodeId)] = fogReplicaMap[str(w.node.nodeId)] + 1
                else:
                    fogReplicaMap[str(w.node.nodeId)] = 1

                if (str(w.node.nodeId) in insideDict):
                    insideDict[str(
                        w.node.nodeId)] = insideDict[str(w.node.nodeId)] + 1
                else:
                    insideDict[str(w.node.nodeId)] = 1

        #util map
        yetAnotherMap[microbatchID] = insideDict

        #before sending the actual writes, lets add the checksum now as there is no point
        #in sending the checksum while identifying replicas
        hash_md5 = hashlib.md5()
        hash_md5.update(data)
        metaData.checksum = hash_md5.hexdigest()

        print("the write locations are ", result)

        timestamp_record = str(
            microbatchID) + ",-1, local ,write req,starttime = " + repr(
                time.time()) + ","

        #lets renew the lease. The behaviour should adhere with the policy of the lease time
        #left in comparison to the time taken to complete the operation
        print("Lets first issue request to renew the lease for putNext()")
        self.renew_lease(metaData.streamId, metaData.clientId,
                         metaData.sessionSecret, EXPECTED_LEASE,
                         ESTIMATE_PUT_NEXT, metaData.mbId, setLease)

        #ISSUE ALERT:: Since the metaData object is prepared above, it might happen that the clientId and sessionSecret
        #were set to dummy global values since before issuing the first write, we do a renew lease which is last code line
        #but we set the clientId and secret many lines above. So once renew_lease() returns the proper sessionSecret will
        #be with the client and it can properly perform the first write operation. The same fields above cannot be commented
        #since both clientId and sessionSecret are required fields (thrift)
        metaData.clientId = CLIENT_ID
        metaData.sessionSecret = SESSION_SECRET

        index = 1
        processes = []
        # loop is for different fogs(and edges) returned WRITING STARTS HERE : ISHAN
        for writable in result:
            writeProcess = multiprocessing.Process(
                target=self.writeToEdge,
                args=(writable, microbatchID, streamId, data, EDGE_ID, index,
                      sizeChoice, metaData, metaKeyValueMap))
            processes.append(writeProcess)
            writeProcess.start()
            index = index + 1

        for p in processes:
            p.join()

        print("all writes to replicas finished ")
        self.closeSocket(transport)

        timestamp_record = timestamp_record + "endtime = " + repr(
            time.time()) + " , " + str(sizeChoice) + '\n'
        myLogs = open(BASE_LOG + 'logs.txt', 'a')
        myLogs.write(timestamp_record)
        myLogs.write(
            timestamp_record_getWrite)  #write timestamp for getWrite Locations
        myLogs.close()

        #the response type is BlockMetadataUpdateResponse
        response = self.increment_block_count(metaData, setLease)
        if response.code == -1:
            #this blockId is already written
            #In our designed experiment, different clients are writing to different regions so this
            #issue will not occur. However this is kept to indicate that such a scenario can occur
            #with concurrent clients
            print("BlockId : " + str(microbatchID) +
                  " is already written, failing the write")
            return -1
        elif response.code == -2:
            #lease expired
            print("Lease expired, should renew the lease before trying again")
            return -2
        elif response.code == -3:
            #client does not hold the lock
            print(
                "Client does not hold the lock, should open the stream before writing"
            )
            return -3
        else:
            return response.code