Example #1
0
 def write_to_csv(data, file, headers, write_header=True, model="w+"):
     from logger_util import get_logger
     logger = get_logger()
     with open(file, model) as f:
         try:
             f_csv = csv.DictWriter(f, headers)
             if write_header:
                 f_csv.writeheader()
             f_csv.writerows(data)
         except Exception as error:
             raise error
         else:
             logger.info("write data to {0} success".format(file))
Example #2
0
# from create_nfs_maps import *
# from create_build_area_tree import setup_cluster, change_ownership, change_group, chg_per_755, chg_per_777, cluster_files_with_644, change_ownership_perfect, change_group_perfect

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Cluster file system creation on cluster server')
    parser.add_argument('-f','--fileserver', type=str, required=True, help="Enter the cluster file server name Eg: 'batfs2502'")
    parser.add_argument('-c', '--cluster',type=str, required=True, help="Enter the cluster name Eg: 'Bfoo'")
    parser.add_argument('-q', '--quota',type=str, required=True, help="Enter the cluster quota Eg: '100G'")
    parser.add_argument('-r', '--refquota',type=str, required=True, help="Enter the cluster refquota Eg: '50G'")
    args = parser.parse_args()

    host_name = args.fileserver
    cluster_name = args.cluster
    quota = args.quota
    
    refquota = args.refquota
    logger = logger_util.get_logger()

    try:
        ret,out,err = cluster_zfs(host_name, cluster_name)
        if ret!=0:
            logger.error("Failed to create the file system")
            print("############################################################")
            logger.error(''.join(err))
            # sys.exit()
        else:
            logger.info("Cluster file system is created successfully")
            print("############################################################")
    except Exception as err:
        print("Something went wrong", err)
from flask import Flask, request
from filterUtil import getSahiReportURL
from logger_util import get_logger

app = Flask(__name__)
logger = get_logger()


@app.route("/sahifailedresult", methods=['POST'])
def _sahi_filter():
    request_values = request.json
    data = getSahiReportURL(request_values)
    return data


if __name__ == "__main__":
    app.run('0.0.0.0', port=7778, debug=True)

Example #4
0
import os
import argparse

from config_util import ConfigUtil
from time_util import TimeUtil
from urllib2 import urlopen as urlopen
from urllib2 import URLError as urlerror
from logger_util import get_logger
from file_operator import FileOperator
from pandas.core.frame import DataFrame
import datetime

PWD = os.getcwd()
CONFIG_FILE = PWD + "/conf/properties.conf"
FLAGS = None
logger = get_logger(refresh=True)
hadoop_util = None


class HadoopUtil(object):
    def __init__(self, file_path):
        self.confing_util = ConfigUtil(CONFIG_FILE)
        self.hadoop_url = self.confing_util.get_options("url", "hadoop_url")
        self.file_path = file_path
        self.application_url = self.confing_util.get_options(
            "url", "application_url")
        self.job_metrics = self.confing_util.get_options("job", "job_metrices")
        self.job_url = self.confing_util.get_options("url", "job_url")
        self.memcpu_info = {}

    def get_cluster_information(self):
Example #5
0
from kafka import KafkaConsumer
from kafka import TopicPartition

import paramiko
from enginedao.host import HostDao
from enginedao.workerprofile import WorkerProfileDao
from enginedao import host

from cloudriver import instance
from cloudriver import vpc

import config
import logger_util
import notify

logger = logger_util.get_logger(name='chworker.py')

KAFKA_HOST = config.KAFKA_HOST
KAFKA_TOPIC = config.KAFKA_TOPIC

try:
    KAFKA_PORT = int(config.KAFKA_PORT)
except Exception as e:
    logger.error(e)
    KAFKA_PORT = None

INSTANCE_NOT_RESOURCE = 'not_resource'

k_consumer = KafkaConsumer(
    KAFKA_TOPIC,
    group_id='g_scaling',