Ejemplo n.º 1
0
def messages(message, level="info", raised=None):
    #
    # Initial section for instantizing variables expected by remaining routine
    loggered = logging_getLogger()
    levels = {"debug": 10, "info": 20, "warn": 30, "err": 40, "crit": 50}
    argsdict = dict()
    #
    # Normalize the input parameters for simplifying the downstream procedures
    message = ['{0}="{1}"'.format(x, message[x]) for x in sorted(message)]
    if raised is not None: argsdict.update({"exc_info": raised})
    #
    # Handle standard messages generated by various procedures in the software
    logging_log(levels[level], str(" ").join(message), **argsdict)
    #
    # Returns default boolean indicating overall success or failure of routine
    return True
Ejemplo n.º 2
0
def color_logging_setup():
    #
    # Set logging to INFO by default (log everything except DEBUG).
    #
    # Also try to add colors to the logging output if the logging output goes
    # to a capable device (not a file and a terminal supporting colors).
    #
    # Actually adding the ANSI escape codes in the logging level name is pretty
    # much an ugly hack but it is the easiest way (less changes).
    #
    # An elegant way of doing this is described here:
    #  http://stackoverflow.com/questions/384076/how-can-i-color-python-logging-output
    #
    proc_name = current_process().name
    proc_pid = os.getpid()
    proc_info = " {}/{}:".format(proc_name, proc_pid) if proc_name != 'MainProcess' else ''
    fmt_str = "%(asctime)s %(levelname)s:{} %(message)s".format(proc_info)
    out_dev_istty = getattr(sys.stdout, 'isatty', None)

    if ((out_dev_istty is not None) and (out_dev_istty())):
        if ('256color' in os.environ['TERM']):
            for lvl in LOGGING_LEVELS.keys():
                logging_addLevelName(LOGGING_LEVELS[lvl]['level'],
                                     "\033[{0}{1}".format(
                                         LOGGING_LEVELS[lvl]['256color'],
                                         LOGGING_LEVELS[lvl]['name']))
            fmt_str = "\033[38;5;250m%(asctime)s\033[0m %(levelname)s:{} " \
                      "%(message)s\033[0m".format(proc_info)
        elif ('xterm' in os.environ['TERM']):
            for lvl in LOGGING_LEVELS.keys():
                logging_addLevelName(LOGGING_LEVELS[lvl]['level'],
                                     "\033[{0}{1}".format(
                                         LOGGING_LEVELS[lvl]['xterm'],
                                         LOGGING_LEVELS[lvl]['name']))
            fmt_str = "\033[37m%(asctime)s\033[0m %(levelname)s:{} " \
                      "%(message)s\033[0m".format(proc_info)
        else:
            logging_addLevelName(LOGGING_LEVELS['NORMAL']['level'],
                                 LOGGING_LEVELS['NORMAL']['name'])

    logging_basicConfig(format=fmt_str, level=logging_level_INFO,
                        stream=sys.stdout)
    logger = logging_getLogger()

    return logger
Ejemplo n.º 3
0
    def __init__(self, db='file', print_infos=False):

        #given by Thomas
        self.logger = logging_getLogger(__name__)
        self.logger.info('Started instance of rpCache')

        self.store_mode = db

        self.dirname = os.path.dirname(os.path.abspath(__file__)) + "/.."
        # input_cache
        self.input_cache_dir = self.dirname + '/input_cache/'

        if self.store_mode != 'file':
            self.redis = StrictRedis(host=self.store_mode,
                                     port=6379,
                                     db=0,
                                     decode_responses=True)
            if not wait_for_redis(self.redis, 30):
                self.logger.critical("Database " + self.store_mode +
                                     " is not reachable")
                exit()
            self.deprecatedMNXM_mnxm = CRedisDict('deprecatedMNXM_mnxm',
                                                  self.redis)
            self.deprecatedMNXR_mnxr = CRedisDict('deprecatedMNXR_mnxr',
                                                  self.redis)
            self.mnxm_strc = CRedisDict('mnxm_strc', self.redis)
            self.chemXref = CRedisDict('chemXref', self.redis)
            self.rr_reactions = CRedisDict('rr_reactions', self.redis)
            self.chebi_mnxm = CRedisDict('chebi_mnxm', self.redis)
            # rpReader attributes
            self.inchikey_mnxm = CRedisDict('inchikey_mnxm', self.redis)
            self.compXref = CRedisDict('compXref', self.redis)
            self.name_compXref = CRedisDict('name_compXref', self.redis)
            # rpCofactors attributes
            self.full_reactions = CRedisDict('full_reactions', self.redis)
        else:
            # cache
            self.cache_dir = self.dirname + '/cache/'
            if not os.path.isdir(self.cache_dir):
                os.mkdir(self.cache_dir)
            self.deprecatedMNXM_mnxm = None
            self.deprecatedMNXR_mnxr = None
            self.mnxm_strc = None
            self.chemXref = None
            self.rr_reactions = None
            self.chebi_mnxm = None
            # rpReader attributes
            self.inchikey_mnxm = None
            self.compXref = None
            self.name_compXref = None
            # rpCofactors attributes
            self.full_reactions = None

        self.print = print_infos

        # Common attribues
        self.convertMNXM = {
            'MNXM162231': 'MNXM6',
            'MNXM84': 'MNXM15',
            'MNXM96410': 'MNXM14',
            'MNXM114062': 'MNXM3',
            'MNXM145523': 'MNXM57',
            'MNXM57425': 'MNXM9',
            'MNXM137': 'MNXM588022'
        }

        if not self.loadCache():
            raise ValueError
Ejemplo n.º 4
0
from torch import float64 as torch_float64  # pylint: disable=E0611
from torch import stack as torch_stack  # pylint: disable=E0611
from torch import from_numpy as torch_from_numpy  # pylint: disable=E0611

# from lib.stft import stft
# from lib.non_iter_ls_inv_stft import non_iter_ls_inv_stft
from lib.stft_wrapper import stft
from lib.istft_wrapper import istft

CHANDAT_FNAME = 'chandat.mat'
NEW_STFT_FNAME = 'new_stft.mat'
LEN_EACH_SECTION = 16
FRAC_OVERLAP = 0.9
PADDING = 16
CHANDAT_DNN_SAVE_FNAME = 'chandat_dnn.mat'
LOGGER = logging_getLogger()


def r4_dnn_istft(target_dirname,
                 chandat_obj=None,
                 new_stft_object=None,
                 is_saving_chandat_dnn=True):
    LOGGER.info(
        '{}: r4: Doing istft on denoised stft...'.format(target_dirname))
    assert os_path_isdir(target_dirname)
    if chandat_obj is None:
        chandat_obj = loadmat(os_path_join(target_dirname, CHANDAT_FNAME))
    chandat_data = chandat_obj['chandat']
    num_rows, num_elements, num_beams = chandat_data.shape
    beam_position_x = chandat_obj['beam_position_x']
    if 'depth' in chandat_obj:
Ejemplo n.º 5
0
        nargs='?',
        default=-1,
        help=
        'The maximum number of models to evaluate, regardless of how many matched folders.'
    )
    parser.add_argument('-v',
                        '--verbose',
                        help='incrase output verbosity',
                        action='store_true')
    args = parser.parse_args()

    identifier = args.identifier
    max_to_evaluate = args.max_to_evaluate
    verbose = args.verbose

    logger = logging_getLogger('evaluate_keras')
    if verbose:
        logger.setLevel(logging_DEBUG)
    else:
        logger.setLevel(logging_INFO)

    model_search_path = os_path_join(MODELS_DIRNAME,
                                     str(identifier) + '_trained')
    models = glob_glob(model_search_path)
    num_models = len(models)

    if num_models == 0:
        raise ValueError('evaluate_models: given identifier ' +
                         str(identifier) + ' , expanded to ' +
                         str(model_search_path) + ' matched no model.')
def main():
    """
    Command line program to configure and deploy CN-Series
    """
    try:
        fmt_str = '%(asctime)s %(levelname)s: %(message)s'

        logging_basicConfig(
            format=fmt_str, level=logging_level_INFO,
            stream=sys.stdout)

        logging_getLogger("paramiko").setLevel(logging_level_WARN)

        #
        # The default signal handler for SIGINT / CTRL-C raises a KeyboardInterrupt
        # exception which prints a possibly very long traceback. To avoid it we
        # install a custom signal handler
        #
        signal_set_handler(signal_SIGINT, custom_signal_handler)

        args = get_args()
        # Panorama info:
        pan_hostname = args.pn_ip
        pan_username = args.pn_user
        pan_password = args.pn_pass
        pan_template_stack = args.pn_tmpl
        pan_dg = args.pn_dg
        pan_cg = args.pn_cg
        cn_auth_code = args.auth_code
        cn_tokens = args.tokens
        cn_bundle = args.cn_bnd

        # Kubernetes info:
        k8s_ip = args.k8s_ip
        ctl_ip = args.ctl_ip
        k8s_username = args.k8s_user
        k8s_password = args.k8s_pass
        k8s_port = args.k8s_port
        if args.k8s_mode == 'lite' or args.k8s_mode == 'full':
            k8s_mode = args.k8s_mode
        else:
            error("Sorry I don't support this mode. Only lite or full are supported.")
            sys.exit()
        k8s_name = args.k8s_name

        pv_type = args.pv_type

        cn_pin_id = args.cn_pin_id
        cn_pin_value = args.cn_pin_value

        if not cn_pin_id or not cn_pin_value:
            if k8s_mode == 'full':
                error("You selected full mode. CN Series registration pin id and value is required.")
                sys.exit()

        if args.k8s_type == 'native':
            k8s_type = 'Native-Kubernetes'
        elif args.k8s_type == 'openshift':
            k8s_type = 'OpenShift'
        else:
            error("Sorry I don't support this type yet. only native or openshift is supported.")
            sys.exit()

        if k8s_type == 'Native-Kubernetes':
            yaml_base_url = BASE_URL + "native/"
            if not pv_type:
                error("PV Type is required for Native deployment.")
                sys.exit()
        elif k8s_type == 'OpenShift':
            yaml_base_url = BASE_URL + "openshift/"

        ctl = 'kubectl' if k8s_type == 'Native-Kubernetes' else 'oc'

        cn_images_dict = {
            'cn_mgmt_image': args.cn_mgmt_image,
            'cn_ngfw_image': args.cn_ngfw_image,
            'cn_init_image': args.cn_init_image,
            'cn_cni_image': args.cn_cni_image
        }

        panorama_dict = {
            'pan_hostname': pan_hostname,
            'pan_username': pan_username,
            'pan_password': pan_password,
            'device_group': pan_dg,
            'template_stack': pan_template_stack,
            'cn_auth_code': cn_auth_code,
            'cn_tokens': cn_tokens,
            'c_group': pan_cg,
            'cn_bundle': cn_bundle,
            'auth_key': ''
        }

        k8s_dict = {
            'k8s_cluster_name': k8s_name,
            'ctl_ip': ctl_ip,
            'k8s_cluster_ip': k8s_ip,
            'k8s_port': k8s_port,
            'k8s_type': k8s_type,
            'svc_acocunt_b64': '',
            'yaml_base_url' : yaml_base_url,
            'k8s_mode': k8s_mode,
            'pv_type': pv_type,
            'cn_pin_id': cn_pin_id,
            'cn_pin_value': cn_pin_value,
            'ctl': ctl
        }

        try:
            info("Establishing API connection with Panorama.")
            pn_api_conn = create_panos_device(pan_hostname, pan_username, pan_password)
            info("Establishing SSH connection with Panorama.")
            pn_ssh_conn = ssh_login(pan_hostname, pan_username, pan_password)
            info("Establishing SSH connection with k8s master.")
            k8s_ssh_conn = ssh_login(ctl_ip, k8s_username, k8s_password)
            if not (pn_api_conn and pn_ssh_conn and k8s_ssh_conn):
                info("Without connection to both the kubernetes cluster and Panorama I can not work.")
                sys.exit()
        except:
            error("Something went wrong which establishing connection, exiting...")
            sys.exit()

        panorama_version = check_panos_version(pn_api_conn)

        if int(panorama_version.split('.')[0]) >= 10:
            info("Panorama PAN-OS version is {}".format(panorama_version))
        else:
            error("Panorama PAN-OS version is {}. I need Panorama that running PAN-OS 10.0 or later, Exiting....".format(panorama_version))
            sys.exit()

        commit_required = False

        info("checking for Kubernetes plugin.")
        k8s_plugin_version = check_k8s_plugin(pn_api_conn)
        if k8s_plugin_version:
            info("Kubernetes plugin version is {}".format(k8s_plugin_version.split('-')[1]))
        else:
            error("Kubernetes plugin is not installed, I will install the latest plugin")
            info("Updating plugin list")
            update_plugin_list(pn_api_conn)

            for p in range(3):
                latest_k8s = find_latest_k8s_plugin(pn_api_conn)
                if latest_k8s['name']:
                    if latest_k8s['downloaded'] == 'no':
                        download_plugin(pn_ssh_conn, latest_k8s['name'])
                    else:
                        info("Kubernetes plugin {} Downloaded.".format(latest_k8s['name']))
                        break
                    if not wait_for_panos(pn_api_conn, time.time() + 60 * 5):
                        error("Download job taking more than expected, exiting...")
                        sys.exit()
                    # Give the download some time
                    time.sleep(10)
                else:
                    error("No Kubernetes plugin found. Check Panorama connection or install the plugin manually.")
                    sys.exit()
                info("Checking if plugin is downloaded properly.")

            for p in range(3):
                if latest_k8s['downloaded'] != 'no':
                    info("Installing kubernetes plugin.")
                    install_k8s_plugin(pn_ssh_conn, latest_k8s['name'])
                    commit_required = True
                    if not wait_for_panos(pn_api_conn, time.time() + 60 * 5):
                        error("Download job taking more than expected, exiting...")
                        sys.exit()
                    info("Installation complete. I will check again if the plugin is installed properly.")
                    # Give the install some time
                    time.sleep(10)
                    k8s_plugin_version = check_k8s_plugin(pn_api_conn)
                    if k8s_plugin_version:
                        info("Kubernetes plugin version is {}".format(k8s_plugin_version.split('-')[1]))
                        break
                    else:
                        info("Plugin installation was not successful I will try again.")
                else:
                    info("Plugin is not installed, exiting.")
                    sys.exit()

        if commit_required:
            info("Committing configuration")
            panorama_commit(pn_api_conn)

        if check_device_group(pn_api_conn, pan_dg):
            info("Device group {} Found.".format(pan_dg))
        else:
            error("Device Group {} was not found in Panorama. "
                  "I will add the device group to Panorama config.".format(pan_dg))
            configure_device_group(pn_ssh_conn, pan_dg)

        if check_template_stack(pn_api_conn, pan_template_stack):
            info("Template Stack {} Found.".format(pan_template_stack))
        else:
            error("Template Stack {} was not found in Panorama. "
                  "I will add a Template and Template Stack to Panorama config.".format(pan_template_stack))
            configure_template(pn_ssh_conn, pan_template_stack + "-tmp")
            configure_template_stack(pn_ssh_conn, pan_template_stack)

        if check_collector_group(pn_api_conn, pan_cg):
            info("Collector group {} found.".format(pan_cg))
        else:
            info("Collector group {} not found. "
                 "I will add a dummy one you can add log collector to it later.".format(pan_cg))
            configure_collector_group(pn_ssh_conn, pan_cg)

        info("Applying CN-Series License.")

        activate_license(pn_ssh_conn, panorama_dict['cn_auth_code'], panorama_dict['cn_tokens'])

        info("Creating k8s service account for Panorama Plugin.")
        k8s_dict['svc_acocunt_b64'] = create_k8s_plugin_svc_account(k8s_ssh_conn, yaml_base_url, ctl)
        info("Configure Panorama Plugin")
        configure_panorama(pn_ssh_conn, panorama_dict, k8s_dict)

        info("Creating bootstrapping authentication key")
        panorama_dict['auth_key'] = create_auth_key(pn_ssh_conn)

        # Committing changes to Panorama.
        panorama_commit(pn_api_conn)

        info("Deploying CN-Series")
        if create_cn_series(k8s_ssh_conn, yaml_base_url, cn_images_dict, panorama_dict, k8s_dict):
            info("CN-Series is deployed successfully.")
            info("Depending on the image download speed, it will take some time to pull images and finish deployment.")
            info("")
            info("=======================================================================================================")
            info("")
            info("I AM DONE! You can now monitor the CN-Series deployment using the following command from the k8s master")
            info("")
            info("kubectl get pods -n kube-system")
            info("")
            info("")
            info("The script will keep checking for the pods status every 5 min. Installation will take about 15 min.")
            info("You can exit now and monitor manually if you prefer")
            info("=======================================================================================================")
            info("")
            info("")

        info("I will sleep for 5 min then I will start checking the pods status.")
        time.sleep(300)

        success = False
        for c_pod in range(6):
            if check_pods_status(k8s_ssh_conn, ctl):
                info("All pods are running. I will now check if all containers are ready.")
                for c_c in range(6):
                    if check_container_status(k8s_ssh_conn, ctl):
                       info("All containers are ready.")
                       success = True
                       break
                    else:
                       info("Not all containers are ready. I will check again after 5 min.")
                       time.sleep(300)
                break
            else:
                info("Not all pods are running. I will check again after 5 min.")
                time.sleep(300)

        if success:
            info("*******************************************************************************************************")
            info("")
            info("")
            info("Installation done successfully.")
            info("")
            info("")
            info("*******************************************************************************************************")
        else:
            error("Seem like there is some errors during deployment. Please log in the k8s cluster and check the status.")

        pn_ssh_conn.close()
        k8s_ssh_conn.close()
    except:
        error("An error occurred that I couldn't handle!")
Ejemplo n.º 7
0
def getLogger(*args, **kwargs):
    return logging_getLogger(*args, **kwargs)