Beispiel #1
0
def schedule_logger(job_id=None, delete=False):
    if not job_id:
        return getLogger("fate_flow_schedule")
    else:
        if delete:
            with LoggerFactory.lock:
                try:
                    for key in LoggerFactory.schedule_logger_dict.keys():
                        if job_id in key:
                            del LoggerFactory.schedule_logger_dict[key]
                except:
                    pass
            return True
        key = job_id + 'schedule'
        if key in LoggerFactory.schedule_logger_dict:
            return LoggerFactory.schedule_logger_dict[key]
        return get_job_logger(job_id, "schedule")
Beispiel #2
0
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.
#
import numpy as np

from fate_arch import storage
from fate_arch.abc import StorageTableABC, StorageTableMetaABC, AddressABC
from fate_arch.common import log, EngineType
from fate_arch.computing import ComputingEngine
from fate_arch.storage import StorageTableMeta, StorageEngine, Relationship
from fate_flow.entity.metric import MetricMeta
from fate_flow.utils import job_utils, data_utils
from fate_flow.components.component_base import ComponentBase

LOGGER = log.getLogger()
MAX_NUM = 10000


class Reader(ComponentBase):
    def __init__(self):
        super(Reader, self).__init__()
        self.parameters = None

    def run(self, component_parameters=None, args=None):
        self.parameters = component_parameters["ReaderParam"]
        output_storage_address = args["job_parameters"].engines_address[
            EngineType.STORAGE]
        table_key = [key for key in self.parameters.keys()][0]
        computing_engine = args["job_parameters"].computing_engine
        output_table_namespace, output_table_name = data_utils.default_output_table_info(
Beispiel #3
0
#  See the License for the specific language governing permissions and
#  limitations under the License.
#

import io
import os
from typing import Iterable

from pyarrow import fs

from fate_arch.common import hdfs_utils
from fate_arch.common.log import getLogger
from fate_arch.storage import StorageEngine, LocalFSStoreType
from fate_arch.storage import StorageTableBase

LOGGER = getLogger()


class StorageTable(StorageTableBase):
    def __init__(
        self,
        address=None,
        name: str = None,
        namespace: str = None,
        partitions: int = 1,
        storage_type: LocalFSStoreType = LocalFSStoreType.DISK,
        options=None,
    ):
        super(StorageTable, self).__init__(
            name=name,
            namespace=namespace,
Beispiel #4
0
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.
#
import hashlib
import time
import typing

import beautifultable

from fate_arch.common.log import getLogger
import inspect
from functools import wraps
from fate_arch.abc import CTableABC

profile_logger = getLogger("PROFILING")
_PROFILE_LOG_ENABLED = False
_START_TIME = None
_END_TIME = None


class _TimerItem(object):
    def __init__(self):
        self.count = 0
        self.total_time = 0.0
        self.max_time = 0.0

    def union(self, other: '_TimerItem'):
        self.count += other.count
        self.total_time += other.total_time
        if self.max_time < other.max_time:
Beispiel #5
0
JOB_START_TIMEOUT = 60 * 1000  # ms
END_STATUS_JOB_SCHEDULING_TIME_LIMIT = 5 * 60 * 1000  # ms
END_STATUS_JOB_SCHEDULING_UPDATES = 1

# Endpoint
FATE_FLOW_MODEL_TRANSFER_ENDPOINT = "/v1/model/transfer"
FATE_MANAGER_GET_NODE_INFO_ENDPOINT = "/fate-manager/api/site/secretinfo"
FATE_MANAGER_NODE_CHECK_ENDPOINT = "/fate-manager/api/site/checksite"
FATE_BOARD_DASHBOARD_ENDPOINT = "/index.html#/dashboard?job_id={}&role={}&party_id={}"

# Logger
log.LoggerFactory.LEVEL = 10
# {CRITICAL: 50, FATAL:50, ERROR:40, WARNING:30, WARN:30, INFO:20, DEBUG:10, NOTSET:0}
log.LoggerFactory.set_directory(
    os.path.join(file_utils.get_project_base_directory(), "logs", "fate_flow"))
stat_logger = log.getLogger("fate_flow_stat")
detect_logger = log.getLogger("fate_flow_detect")
access_logger = log.getLogger("fate_flow_access")
data_manager_logger = log.getLogger("fate_flow_data_manager")
peewee_logger = log.getLogger("peewee")

# Switch
UPLOAD_DATA_FROM_CLIENT = True
USE_AUTHENTICATION = False
PRIVILEGE_COMMAND_WHITELIST = []
CHECK_NODES_IDENTITY = False
DEFAULT_FEDERATED_STATUS_COLLECT_TYPE = get_base_config(
    FATEFLOW_SERVICE_NAME, {}).get("default_federated_status_collect_type",
                                   "PUSH")

# Init
Beispiel #6
0
########################################################
# Copyright 2019-2021 program was created VMware, Inc. #
# SPDX-License-Identifier: Apache-2.0                  #
########################################################

import logging
import json
import requests

from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from fate_arch.common.log import getLogger
from fate_arch.federation.pulsar._mq_channel import DEFAULT_SUBSCRIPTION_NAME

logger = getLogger()

MAX_RETRIES = 10
MAX_REDIRECT = 5
BACKOFF_FACTOR = 1

# sleep time equips to {BACKOFF_FACTOR} * (2 ** ({NUMBER_OF_TOTALRETRIES} - 1))

CLUSTER = 'clusters/{}'
TENANT = 'tenants/{}'

# APIs are refer to https://pulsar.apache.org/admin-rest-api/?version=2.7.0&apiversion=v2


class PulsarManager():
    def __init__(self, host: str, port: str, runtime_config: dict = {}):
        self.service_url = "http://{}:{}/admin/v2/".format(host, port)