Beispiel #1
0
    def __init__(self, logger, host, port):
        cfg = SyncObjConf()
        cfg.fullDumpFile = 'raft.bin'
        cfg.logCompactionMinTime = 10
        cfg.useFork = True

        self.serv = ThreadXMLRPCServer(
            (host, port),
            logRequests=True)

        for name in self._rpc_methods:
            self.serv.register_function(getattr(self, name))

        self.logger = logger
        self.host = host
        self.port = port

        self.lock = RWLock()

        self.act_vol_serv = dict()
        self.writable_vid = ReplList() # 可写的vid

        self.vid = ReplCounter()
        self.fkey = ReplCounter()
        self.db = ReplDict()

        super(Master, self).__init__(config.addr, config.clusters, cfg, consumers=[self.vid, self.fkey, self.db, self.writable_vid])
Beispiel #2
0
def test_ReplList():
    l = ReplList()
    l.reset([1, 2, 3], _doApply=True)
    assert l.rawData() == [1, 2, 3]
    l.set(1, 10, _doApply=True)
    assert l.rawData() == [1, 10, 3]
    l.append(42, _doApply=True)
    assert l.rawData() == [1, 10, 3, 42]
    l.extend([5, 6], _doApply=True)
    assert l.rawData() == [1, 10, 3, 42, 5, 6]
    l.insert(2, 66, _doApply=True)
    assert l.rawData() == [1, 10, 66, 3, 42, 5, 6]
    l.remove(66, _doApply=True)
    assert l.rawData() == [1, 10, 3, 42, 5, 6]
    l.pop(1, _doApply=True)
    assert l.rawData() == [1, 3, 42, 5, 6]
    l.sort(reverse=True, _doApply=True)
    assert l.rawData() == [42, 6, 5, 3, 1]
    assert l.index(6) == 1
    assert l.count(42) == 1
    assert l.get(2) == 5
    assert l[4] == 1
    assert len(l) == 5
Beispiel #3
0
class Master(SyncObj):

    _rpc_methods = ['assign_volumn', 'assign_fid', 'find_volumn', 'find_writable_volumn',
            'volumn_status', 'node_status']

    def __init__(self, logger, host, port):
        cfg = SyncObjConf()
        cfg.fullDumpFile = 'raft.bin'
        cfg.logCompactionMinTime = 10
        cfg.useFork = True

        self.serv = ThreadXMLRPCServer(
            (host, port),
            logRequests=True)

        for name in self._rpc_methods:
            self.serv.register_function(getattr(self, name))

        self.logger = logger
        self.host = host
        self.port = port

        self.lock = RWLock()

        self.act_vol_serv = dict()
        self.writable_vid = ReplList() # 可写的vid

        self.vid = ReplCounter()
        self.fkey = ReplCounter()
        self.db = ReplDict()

        super(Master, self).__init__(config.addr, config.clusters, cfg, consumers=[self.vid, self.fkey, self.db, self.writable_vid])


    def update_master(self, masters):
        pass

    def _recover(self, vid, dead_vid, from_vid, to_vid):
        from_proxy = ServerProxy(self.act_vol_serv[from_vid])
        to_addr = self.act_vol_serv[to_vid]

        self.logger.info('Begin to migrate volumn %d from %s to %s...!' % (vid, from_vid, to_vid))
        from_proxy.migrate_volumn_to(vid, to_addr)
        self.logger.info('Migrate volumn %d from %s to %s succeed!' % (vid, from_vid, to_vid))
        vids = self.db[vid]
        vids.remove(dead_vid)
        vids.append(to_vid)
        self.db.set(vid, vids, sync=True)
        self.update_writable_volumn()
        self.logger.info('Remove %s, append %s' % (dead_vid, to_vid))

    def _check(self, dead_vid):
        self.logger.info('Monitor dead volumn server %s ...' % dead_vid)

        t = 60

        while t > 0:
            time.sleep(1)
            if dead_vid in self.act_vol_serv.keys():
                self.logger.info('Volumn %s becomes live. Stop recover' % dead_vid)
                _thread.exit()
            t -= 1

        for vid, vvids in self.db.items():
            if dead_vid in vvids:
                for recov_vid in vvids:
                    if recov_vid != dead_vid and recov_vid in self.act_vol_serv.keys():
                        from_vid = recov_vid
                        avl_vids = list(set(self.act_vol_serv.keys()) - set(vvids))
                        if avl_vids:
                            to_vid = random.choice(avl_vids)
                            _thread.start_new_thread(self._recover, (vid, dead_vid, from_vid, to_vid))
                        else:
                            self.logger.warn('No available volumns to migrate')
                        break

    def update_writable_volumn(self, checkLeader=True):
        if checkLeader and not self._isLeader():
            return

        writable_vid = list()

        for vid, vvids in self.db.items():
            flag = True
            for vvid in vvids:
                if vvid not in self.act_vol_serv.keys():
                    flag = False
                    break
            if flag:
                writable_vid.append(vid)

        self.writable_vid.reset(writable_vid, sync=True)


    # 检查volumn下线的情况,搬运
    def update_volumn(self, volumns):
        if self._isLeader():
            old_volumns = set(self.act_vol_serv.keys())
            new_volumns = set([volumn[0] for volumn in volumns])

            off_volumns = list(old_volumns - new_volumns)

            if off_volumns:
                self.logger.info('{} volumns become offline'.format(off_volumns))

            for off_volumn in off_volumns:
                _thread.start_new_thread(self._check, (off_volumn,))

        self.act_vol_serv.clear()
        for volumn in volumns:
            self.act_vol_serv[volumn[0]] = volumn[1]

        while not self._isReady():
            time.sleep(1)

        self.update_writable_volumn()

    def assign_volumn(self, size):
        vid = self.vid.inc(sync=True)

        vids = random.sample(self.act_vol_serv.keys(), 2)

        for vvid in vids:
            s = ServerProxy(self.act_vol_serv[vvid])
            s.assign_volumn(vid, size)

        self.db.set(vid, vids, sync=True)
        self.update_writable_volumn(False)

        return vid

    def assign_fid(self):
        if not self.writable_vid:
            return ''

        vid = random.choice(list(self.writable_vid))
        fkey = self.fkey.inc(sync=True)

        fid = '%d,%d' % (vid, fkey)
        return fid

    def find_volumn(self, vid):
        vids = self.db[vid]
        addrs = []

        for vid in vids:
            if vid in self.act_vol_serv:
                addrs.append(self.act_vol_serv[vid])

        return addrs

    def find_writable_volumn(self, vid):
        if vid in self.writable_vid:
            return self.find_volumn(vid)
        else:
            return []

    def volumn_status(self):
        res = dict()

        vol_status = dict()

        for vol_serv_id, vol_serv in self.act_vol_serv.items():
            try:
                s = ServerProxy(vol_serv)
                vv = s.status()
                vol_status[vol_serv_id] = vv
            except:
                pass


        for vid, vvids in self.db.items():
            sdoc = dict()
            ava_nodes = list(set(vol_status.keys()) & set(vvids))
            sdoc['tat_node_num'] = len(vvids)
            sdoc['ava_node_num'] = len(ava_nodes)

            if ava_nodes:
                vol_sdoc = vol_status[ava_nodes[0]]
                vdoc = vol_sdoc['vdb'][str(vid)]
                sdoc['total_size'] = vdoc['size']
                sdoc['used_size'] = vdoc['counter']
                sdoc['free_size'] = sdoc['total_size'] - sdoc['used_size']
            else:
                sdoc['total_size'] = 0
                sdoc['used_size'] = 0
                sdoc['free_size'] = 0

            res[str(vid)] = sdoc

        return res

    def node_status(self):
        res = dict()

        for vol_serv_id, vol_serv in self.act_vol_serv.items():
            try:
                s = ServerProxy(vol_serv)
                vv = s.status()

                vol_status = dict()
                vol_status['addr'] = vol_serv
                vol_status['total'] = vv['total']
                vol_status['used'] = vv['used']
                vol_status['free'] = vv['free']
                vol_status['nodes'] = list(vv['vdb'].keys())

                for node in vol_status['nodes']:
                    vids = self.db.get(int(node), [])
                    if vol_serv_id not in vids:
                        vol_status['nodes'].remove(node)

                res[vol_serv_id] = vol_status
            except:
                self.logger.exception('Got an exception')

        return res


    def start(self):
        self.logger.info('Start serving at %s:%d' % (self.host, self.port))
        self.serv.serve_forever()
Beispiel #4
0
from pysyncobj.batteries import ReplDict, ReplList

from pywebio import start_server
from pywebio.input import *
from pywebio.output import *
from pywebio.session import *
from pywebio import session
from raft_server import join_cluster, get_node_info

# 最大消息记录保存
MAX_MESSAGES_CNT = 10**4

# 管理员账户名
ADMIN_USER = '******'

chat_msgs = ReplList()  # 聊天记录 (name, msg)
node_user_cnt = ReplDict()  # 每个节点的用户数
node_webui_addr = ReplDict()  # 每个节点Web聊天室的地址

local_online_users = set()  # 本节点在线用户

raft_server = None


def onStateChanged(oldState, newState, node):
    """节点角色发生变化时的回调函数"""
    states = ["folower", "candidate", "leader"]
    send_msg(ADMIN_USER,
             '节点`%s`角色发生变化, `%s` -> `%s`' %
             (node, states[oldState], states[newState]),
             instant_output=False)
Beispiel #5
0
#     def qPop(self, ):


def create(res, err, label):
    print("Created a queue for label {}".format(label), res, err)


if __name__ == '__main__':
    if len(sys.argv) < 3:
        print('Usage: %s self_port partner1_port partner2_port ...' %
              sys.argv[0])
        sys.exit(-1)

    port = int(sys.argv[1])
    partners = ['localhost:%d' % int(p) for p in sys.argv[2:]]
    q = ReplList()
    sync = SyncObj('localhost:%d' % port, partners, consumers=[q])
    # for item in range(5):
    #     q.append(item)
    while True:
        q.insert(0, 10)
        print q.rawData()
        time.sleep(10)

    # q.remove(4)
    # print q.rawData()

    # q = FTQueue('localhost:{}'.format(port),partners)
    # num_clients = 3
    # label = 0
    # # while num_clients > 0:
Beispiel #6
0
def main():
    # log everything to stderr because compose containers for some reason aren't logging stdout
    logging.basicConfig(level=logging.DEBUG,
                        filename='/proc/self/fd/2',
                        filemode='w')

    peers = None if "NBDD_PEERS" not in os.environ else os.environ[
        "NBDD_PEERS"].split(",")
    hostname = os.environ.get("NBDD_HOSTNAME")
    # contains all blocks for all devices as a contiguous list of bytes
    blocks = []
    # a list of all devices so we know the starting offset of a given device in `blocks`
    # (all devices are fixed size)
    volumes = []

    tracer = jaeger_client.Config(
        config={
            'sampler': {
                'type': 'const',
                'param': 1,
            },
            'logging': True,
        },
        service_name='nbd',
    ).initialize_tracer()

    if peers:
        LocalState.f = open('/tmp/blocks', 'r+b')
        write_cache = LoglessCache()
        LocalState.write_sharer = WriteSharer(peers, write_cache)
        _thread.start_new_thread(LocalState.write_sharer.listen_for_asks, ())
        LocalState.lock = threading.Lock()
        LocalState.hostname = hostname
        LocalState.write_count = 0
        blocks = ReplFile()
        volumes = ReplList()
        health_counter = ReplCounter()
        HealthHandler.counter = health_counter
        self_address = "{}:2001".format(hostname)
        peer_addresses = ["{}:2001".format(peer) for peer in peers]
        syncObj = SyncObj(self_address,
                          peer_addresses,
                          consumers=[blocks, volumes, health_counter])

    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.bind(('0.0.0.0', 2000))
    sock.setblocking(True)
    sock.listen(1)

    httpd = HTTPServer(('0.0.0.0', 8080), HealthHandler)
    _thread.start_new_thread(httpd.serve_forever, ())

    # Prototype will listen to one client at a time
    # -- can be made concurrent without much extra work
    logging.info("NBD Server '{}' Starting with peers {}...".format(
        hostname, peers))
    while True:
        cxn, client = sock.accept()
        logging.info("Connection accepted from client {}".format(client))
        _thread.start_new_thread(handle_cxn, (cxn, blocks, volumes, tracer))
        logging.info(
            "Connection closed by client {} -- listening for next client".
            format(client))