Exemplo n.º 1
0
def refresh_machine_resource():
    Redis.psetex("volumeOperate", 1000, "ll")
    success, cluster_list = get_cluster_list()
    # print cluster_list
    resource_list = list()
    disks_dict = dict()
    if not success:
        logging.warning("Can't get cluster list.")
        return
    total = 0
    used = 0
    for machine in cluster_list:
        resource, memory_usage, cpu_usages, pure_disks, total_size, used_size = monitor_resource(machine)
        total += total_size
        used += used_size
        # print resource
        # Log error when any machine's snmp service shut down.
        if none(resource, memory_usage, cpu_usages, pure_disks):
            logging.error('Please start snmp service in machine: ' + machine + '.')
            return
        resource_list.append(resource)
        disks_dict[machine] = pure_disks
        Redis.lpush(MEMORY_USAGE_PREFIX + machine, memory_usage)
        i = 0
        while i < len(resource['cpus']):
            Redis.lpush(CPU_USAGE_PREFIX + machine + ':' + str(i), cpu_usages[i])
            i += 1
    Redis.hmset(OVERALL_CAPACITY, {'total': total, 'used': used, 'available': (total - used)})
    Redis.set(CLUSTER_RESOURCE, json.dumps(resource_list))
    Redis.set(CLUSTER_DISKS, json.dumps(disks_dict))
    Redis.psetex("volumeOperate", 1, "ll")
Exemplo n.º 2
0
def delete_volume():
    try:
        Redis.psetex("Refresh", 10000, "ll")
        time = Redis.get("volumeOperate")
        if time:
            time.sleep(0.5)
        volume_name = request.args.get('name')
        logging.warning(volume_name)
        if none(volume_name):
            return jsonify(success=False, message=INVALID_PARAM)
        volume_name = str(volume_name)

        # check if volume exists and if volume has already stopped.
        volume_names = list(Redis.sget(VOLUME_NAMES))
        volume_status = Redis.hget(VOLUME_PREFIX + volume_name, VOLUME_STATUS)
        if volume_name not in volume_names or volume_status != VOLUME_STATUS_STOPPED:
            return jsonify(success=False, message=INVALID_PARAM)

        success, out = volume_delete(volume_name)

        # clear volume information in redis
        if success:
            # remove data from redis
            Redis.srem(VOLUME_NAMES, volume_name)
            Redis.delete(BRICK_PREFIX + volume_name)
            Redis.delete(VOLUME_PREFIX + volume_name)

            # remove data from samba config file
            volume_samba(volume_name, enable=False)
            Redis.psetex("Refresh", 1, "ll")
        return jsonify(success=success, message=out)

    except TypeError:
        return jsonify(success=False, message=NO_INFORMATION)
Exemplo n.º 3
0
def stop_samba():
    volume_name = request.args.get('volume_name')
    if none(volume_name):
        return jsonify(success=False, message=INVALID_PARAM)
    volume_name = str(volume_name)

    volume_samba(volume_name, enable=False)
    Redis.hset(VOLUME_PREFIX + volume_name, VOLUME_SAMBA, 'off')
    return jsonify(succuss=True, message=None)
Exemplo n.º 4
0
def stop_nfs():
    volume_name = request.args.get('volume_name')
    if none(volume_name):
        return jsonify(success=False, message=INVALID_PARAM)
    volume_name = str(volume_name)

    success, out = volume_nfs(volume_name, enable=False)
    if success:
        Redis.hset(VOLUME_PREFIX + volume_name, VOLUME_NFS, 'off')
    return jsonify(success=success, message=out)
Exemplo n.º 5
0
def start_volume():
    Redis.psetex("Refresh", 20000, "ll")
    time = Redis.get("volumeOperate")
    if time:
        time.sleep(0.5)
    volume_name = request.args.get('volume_name')
    if none(volume_name):
        return jsonify(success=False, message=INVALID_PARAM)
    volume_name = str(volume_name)
    success, out = volume_start(volume_name)
    Redis.psetex("Refresh", 1, "ll")
    return jsonify(success=success, message=out)
Exemplo n.º 6
0
def restart_volume():
    Redis.psetex("Refresh", 20000, "ll")
    time = Redis.get("volumeOperate")
    if time:
        time.sleep(0.5)
    volume_name = request.args.get('volume_name')

    if none(volume_name):
        return jsonify(success=False, message=INVALID_PARAM)
    volume_name = str(volume_name)
    volume_status = Redis.hget(VOLUME_PREFIX + volume_name, VOLUME_STATUS)
    if none(volume_status) or volume_status == VOLUME_STATUS_STOPPED:
        return jsonify(success=False, message=INVALID_PARAM)

    stop_success, stop_out = volume_stop(volume_name)
    if stop_success:
        start_success, start_out = volume_start(volume_name)
        if start_success:
            Redis.psetex("Refresh", 1, "ll")
            return jsonify(success=True, message=start_out)
        else:
            return jsonify(success=False, message=start_out)
    else:
        return jsonify(success=False, message=stop_out)
Exemplo n.º 7
0
def refresh_network_io():
    success, cluster_list = get_cluster_list()
    if not success:
        return
    list_network_in_speed = list()
    list_network_out_speed = list()
    old_time = Redis.get(TIMESTAMP)
    timestamp = time.time()
    timeStamp = timestamp * 1000
    Redis.set(TIMESTAMP, timestamp)
    timeperiod = float(timestamp) - float(old_time)
    for machine in cluster_list:
        network_in_pre = Redis.get(NETWORKIO_IN+machine)
        network_out_pre = Redis.get(NETWORKIO_OUT+machine)
        # print network_in_pre
        network_io_in, network_io_out = network_io(machine)
        if none(network_io_in, network_io_out):
            return
        Redis.set(NETWORKIO_IN+machine, network_io_in)
        Redis.set(NETWORKIO_OUT+machine, network_io_out)
        network_io_in_speed = int(network_io_in) - int(network_in_pre)
        network_io_out_speed = int(network_io_out) - int(network_out_pre)
        if network_io_out_speed <= 0:
            network_io_out_speed = 0
        if network_io_in_speed <= 0:
            network_io_in_speed = 0
        network_io_in_speed_throughput = network_io_in_speed/timeperiod
        network_io_out_speed_throughput = network_io_out_speed/timeperiod
        list_network_in_speed.append(network_io_in_speed_throughput)
        list_network_out_speed.append(network_io_out_speed_throughput)
        Redis.lpush(NETWORKIO_NAME_IN_INIT + machine, {DATA: network_io_in_speed_throughput, TIME: timeStamp})
        Redis.lpush(NETWORKIO_NAME_OUT_INIT + machine, {DATA: network_io_out_speed_throughput, TIME: timeStamp})

    sum_network_in_speed = 0
    sum_network_out_speed = 0
    print "hello"
    for x in list_network_in_speed:
        sum_network_in_speed += x
    for y in list_network_out_speed:
        sum_network_out_speed += y
    Redis.lpush(NETWORKIO_IN_SUM_INIT, {DATA: sum_network_in_speed, TIME: timeStamp})
    Redis.lpush(NETWORKIO_OUT_SUM_INIT, {DATA: sum_network_out_speed, TIME: timeStamp})
Exemplo n.º 8
0
def refresh_disk_io():
    success, cluster_list = get_cluster_list()
    # print cluster_list
    if not success:
        logging.warning("Can't get cluster list.")
        return
    sum_diskio_writes = list()
    sum_diskio_reads = list()
    old_time = Redis.get(TIMESTAMP)

    timestamp = time.time()
    timeStamp = timestamp*1000
    Redis.set(TIMESTAMP, timestamp)
    timeperiod = float(timestamp) - float(old_time)
    ISOTIMEFORMAT = "%Y-%m-%d %X"
    CurrentTime = time.strftime(ISOTIMEFORMAT, time.localtime())
    for machine in cluster_list:
        # print machine
        diskio_pre = Redis.get(DISKWRITE+machine)
        # print diskio_pre
        diskio_read_pre = Redis.get(DISKREAD+machine)
        disk_infmat, diskio_read_infmat = disk_io(machine)
        if none(disk_infmat, diskio_read_infmat):
            return
        Redis.set(DISKWRITE+machine, disk_infmat)
        Redis.set(DISKREAD+machine, diskio_read_infmat)

        # diskwrite str--list
        diskio_pre_list = diskio_pre.replace("[", "").replace("]", "").replace("'", "").strip().split(",")

        # diskread str--list
        diskio_read_pre_list = diskio_read_pre.replace("[", "").replace("]", "").replace("'", "").strip().split(",")

        # disk_write lambda
        diskio_pre_list_num = [int(item) for item in diskio_pre_list]
        disk_infmat_num = [int(item) for item in disk_infmat]
        write_D_value = list(map(lambda x: x[0] - x[1], zip(disk_infmat_num, diskio_pre_list_num)))
        sum_diskio_writes.append(write_D_value)

        sum_name_write = 0
        for x in write_D_value:
            sum_name_write += x
        sum_name_write_throughtput = sum_name_write / timeperiod
        Redis.lpush(DISK_NAME_WRITE + machine, {DATA:sum_name_write_throughtput, TIME:timeStamp})


        # disk_read lamba
        diskio_read_pre_list_num = [int(item) for item in diskio_read_pre_list]
        diskio_read_infmat_num = [int(item) for item in diskio_read_infmat]
        read_D_value = list(map(lambda x: x[0] - x[1], zip(diskio_read_infmat_num, diskio_read_pre_list_num)))
        sum_diskio_reads.append(read_D_value)

        sum_name_read = 0
        for x in read_D_value:
            sum_name_read += x
        sum_name_read_throughput = sum_name_read / timeperiod
        Redis.lpush(DISK_NAME_READ + machine, {DATA: sum_name_read_throughput, TIME: timeStamp})
    i = 0
    sum_writes = 0
    sum_reads = 0
    sum_diskio_write = list()
    sum_diskio_read = list()
    while i < len(sum_diskio_writes):
        sum_write = 0
        sum_read = 0
        for x in sum_diskio_writes[i]:
            sum_write += x
        sum_diskio_write.append(sum_write/timeperiod)
        for y in sum_diskio_reads[i]:
            sum_read += y
        sum_diskio_read.append(sum_read/timeperiod)
        i += 1
    for x in sum_diskio_write:
        sum_writes += x
    for y in sum_diskio_read:
        sum_reads += y

    Redis.lpush(DISKWRITEALL, {DATA: sum_writes, TIME: timeStamp})
    Redis.lpush(DISKREADALL, {DATA: sum_reads, TIME: timeStamp})
Exemplo n.º 9
0
def volume_read_perf():
    volume_name = request.args.get('volume_name')
    if none(volume_name):
        return jsonify(success=False, message=INVALID_PARAM)
    perf = [random.randint(1, 10), random.randint(1, 10)]
    return jsonify(success=True, message=perf)
Exemplo n.º 10
0
def add_volume():
    try:
        Redis.psetex("Refresh", 25000, "ll")
        time = Redis.get("volumeOperate")
        if time:
            time.sleep(0.5)
        # parse request form
        volume_name = request.args.get('name')
        capacity = request.args.get('capacity')
        redundancy_ratio = request.args.get('redundancy_ratio')
        if none(volume_name, capacity, redundancy_ratio):
            return jsonify(success=False,
                           message='Request form lacks parameters.')

        volume_name = str(volume_name)
        capacity = str(capacity)
        redundancy_ratio = str(redundancy_ratio)
        logging.warning(redundancy_ratio)

        # query nodes and disks,then generate max disk index per node
        cluster_disks = json.loads(Redis.get(CLUSTER_DISKS), 'utf-8')
        success, cluster_list = get_cluster_list()
        max_dict_idx = dict()
        if not success:
            return jsonify(success=False, message=NO_INFORMATION)
        keys = cluster_disks.keys()
        for node in cluster_list:
            if node not in keys:
                return jsonify(success=False, message=DIS_MATCH_INFORMATION)
            else:
                max_dict_idx[node] = len(cluster_disks[node])
        success, out, actual_capacity = volume_create(cluster_list,
                                                      max_dict_idx,
                                                      volume_name, capacity,
                                                      redundancy_ratio)

        message1 = {
            "tag": "volume_create",
            "cluster_list": cluster_list,
            "max_dict_idx": max_dict_idx,
            "volume_name": volume_name,
            "capacity": capacity,
            "redundancy_ratio": redundancy_ratio
        }
        print message1
        Redis.lpush("test1", message1)
        success, out, actual_capacity = volume_create(cluster_list,
                                                      max_dict_idx,
                                                      volume_name, capacity,
                                                      redundancy_ratio)

        if success:
            volume_start(volume_name)
            # related configuration: enable quota and set redis
            enable_volume_quota(volume_name)
            set_volume_quota(volume_name, actual_capacity)
            Redis.hmset(
                VOLUME_PREFIX + volume_name, {
                    VOLUME_CAPACITY: actual_capacity,
                    VOLUME_USAGE: 0,
                    VOLUME_NFS: 'on',
                    VOLUME_SAMBA: 'off',
                    VOLUME_ISCSI: 'off',
                    VOLUME_SWIFT: 'off'
                })
            refresh_createvolume_status(volume_name)
            volume_info = get_volume_info(volume_name)
            Redis.psetex("Refresh", 1, "ll")
            return jsonify(success=True, message=volume_info)
        else:
            return jsonify(success=False, message=out)
    except (KeyError, TypeError), e:
        return jsonify(success=False, message=str(e))