コード例 #1
0
ファイル: agent.py プロジェクト: onisimchukv/supervisely
    def __init__(self):
        self.logger = get_task_logger('agent')
        sly.change_formatters_default_values(self.logger, 'service_type',
                                             ServiceType.AGENT)
        sly.change_formatters_default_values(self.logger, 'event_type',
                                             EventType.LOGJ)
        self.log_queue = LogQueue()
        add_task_handler(self.logger, self.log_queue)
        sly.add_default_logging_into_file(self.logger, constants.AGENT_LOG_DIR)

        self.task_pool_lock = threading.Lock()
        self.task_pool = {}  # task_id -> task_manager (process_id)

        self.thread_pool = ThreadPoolExecutor(max_workers=10)
        self.thread_list = []

        self.api = AgentAPI(constants.TOKEN, constants.SERVER_ADDRESS,
                            self.logger)
        self.agent_connect_initially()

        sly.clean_dir(constants.AGENT_TMP_DIR)
        self._stop_missed_containers()

        self.docker_api = docker.from_env(version='auto')
        self._docker_login()

        self.logger.info('Agent is ready to get tasks.',
                         extra={'event_type': EventType.AGENT_READY_FOR_TASKS})
コード例 #2
0
    def __init__(self):
        self.logger = sly.get_task_logger('agent')
        sly.change_formatters_default_values(self.logger, 'service_type',
                                             sly.ServiceType.AGENT)
        sly.change_formatters_default_values(self.logger, 'event_type',
                                             sly.EventType.LOGJ)
        self.log_queue = LogQueue()
        add_task_handler(self.logger, self.log_queue)
        sly.add_default_logging_into_file(self.logger,
                                          constants.AGENT_LOG_DIR())

        self.logger.info('Agent comes back...')

        self.task_pool_lock = threading.Lock()
        self.task_pool = {}  # task_id -> task_manager (process_id)

        self.thread_pool = ThreadPoolExecutor(max_workers=10)
        self.thread_list = []
        self.daemons_list = []

        sly.fs.clean_dir(constants.AGENT_TMP_DIR())
        self._stop_missed_containers()

        self.docker_api = docker.from_env(version='auto')
        self._docker_login()

        self.logger.info('Agent is ready to get tasks.')
        self.api = sly.AgentAPI(constants.TOKEN(), constants.SERVER_ADDRESS(),
                                self.logger, constants.TIMEOUT_CONFIG_PATH())
        self.agent_connect_initially()
        self.logger.info('Agent connected to server.')
コード例 #3
0
    def init_logger(self, loglevel=None):
        self.logger = sly.get_task_logger(self.info['task_id'], loglevel=loglevel)
        sly.change_formatters_default_values(self.logger, 'service_type', sly.ServiceType.AGENT)
        sly.change_formatters_default_values(self.logger, 'event_type', sly.EventType.LOGJ)

        self.log_queue = LogQueue()
        add_task_handler(self.logger, self.log_queue)
        sly.add_default_logging_into_file(self.logger, self.dir_logs)
        self.executor_log = concurrent.futures.ThreadPoolExecutor(max_workers=1)
コード例 #4
0
    def __init__(self):
        self.logger = sly.get_task_logger('agent')
        sly.change_formatters_default_values(self.logger, 'service_type',
                                             sly.ServiceType.AGENT)
        sly.change_formatters_default_values(self.logger, 'event_type',
                                             sly.EventType.LOGJ)
        self.log_queue = LogQueue()
        add_task_handler(self.logger, self.log_queue)
        sly.add_default_logging_into_file(self.logger,
                                          constants.AGENT_LOG_DIR())

        self._stop_log_event = threading.Event()
        self.executor_log = ThreadPoolExecutor(max_workers=1)
        self.future_log = None

        self.logger.info('Agent comes back...')

        self.task_pool_lock = threading.Lock()
        self.task_pool = {}  # task_id -> task_manager (process_id)

        self.thread_pool = ThreadPoolExecutor(max_workers=10)
        self.thread_list = []
        self.daemons_list = []

        self._remove_old_agent()
        self._validate_duplicated_agents()

        sly.fs.clean_dir(constants.AGENT_TMP_DIR())
        self._stop_missed_containers(constants.TASKS_DOCKER_LABEL())
        # for compatibility with old plugins
        self._stop_missed_containers(constants.TASKS_DOCKER_LABEL_LEGACY())

        self.docker_api = docker.from_env(
            version='auto', timeout=constants.DOCKER_API_CALL_TIMEOUT())
        self._docker_login()

        self.logger.info('Agent is ready to get tasks.')
        self.api = sly.AgentAPI(constants.TOKEN(), constants.SERVER_ADDRESS(),
                                self.logger, constants.TIMEOUT_CONFIG_PATH())
        self.agent_connect_initially()
        self.logger.info('Agent connected to server.')
コード例 #5
0
                init_fn = slim.assign_from_checkpoint_fn(
                    join(self.helper.paths.model_dir, 'model_weights',
                         'model.ckpt'),
                    variables_to_restore,
                    ignore_missing_vars=ignore_missing_vars)
                init_fn(sess)

        input_shape_hw = (self.input_size_wh[1], self.input_size_wh[0])
        train(data_dicts=self.tf_data_dicts,
              class_num=len(self.out_classes),
              input_size=input_shape_hw,
              lr=self.config['lr'],
              n_epochs=self.config['epochs'],
              num_clones=len(device_ids),
              iters_cnt=self.iters_cnt,
              val_every=self.config['val_every'],
              model_init_fn=init_model_fn,
              save_cback=dump_model)


def main():
    cv2.setNumThreads(0)
    x = DeepLabTrainer()
    x.train()


if __name__ == '__main__':
    if os.getenv('DEBUG_LOG_TO_FILE', None):
        sly.add_default_logging_into_file(logger, sly.TaskPaths().debug_dir)
    sly.main_wrapper('DEEPLAB_TRAIN', main)
コード例 #6
0
ファイル: main.py プロジェクト: sunnielyu/supervisely
        'AGENT_HOST_DIR',
        'SERVER_ADDRESS',
        'ACCESS_TOKEN',
        'DOCKER_LOGIN',
        'DOCKER_PASSWORD',
        'DOCKER_REGISTRY',
    ]}
    args_opt = {x: os.getenv(x, def_val) for x, def_val in [
        ('WITH_LOCAL_STORAGE', 'true'),
        ('UPLOAD_RESULT_IMAGES', 'false'),
        ('PULL_ALWAYS', 'true'),
        ('DEFAULT_TIMEOUTS', 'true'),
        ('DELETE_TASK_DIR_ON_FINISH', 'true'),
        ('DELETE_TASK_DIR_ON_FAILURE', 'false'),
        ('CHECK_VERSION_COMPATIBILITY', 'false')
    ]}
    args = {**args_opt, **args_req}
    return args


def main(args):
    sly.logger.info('ENVS', extra={**args, 'DOCKER_PASSWORD': '******'})
    agent = Agent()
    agent.inf_loop()
    agent.wait_all()


if __name__ == '__main__':
    sly.add_default_logging_into_file(sly.logger, constants.AGENT_LOG_DIR())
    sly.main_wrapper('agent', main, parse_envs())
コード例 #7
0
import os

import supervisely_lib as sly
from supervisely_lib.metric.precision_recall_metric import PrecisionRecallMetric
from supervisely_lib.metric.iou_metric import IOU
from supervisely_lib.metric.common import check_class_mapping, CLASSES_MAPPING
from supervisely_lib.io.json import load_json_file


def main():
    settings = load_json_file(sly.TaskPaths.SETTINGS_PATH)
    sly.logger.info('Input settings:', extra={'config': settings})

    if IOU not in settings:
        raise RuntimeError(
            '"{}" field is missing. Please set Intersection over Union threshold'
            .format(IOU))

    metric = PrecisionRecallMetric(settings[CLASSES_MAPPING], settings[IOU])
    applier = sly.MetricProjectsApplier(metric, settings)
    check_class_mapping(applier.project_gt, applier.project_pred,
                        settings[CLASSES_MAPPING])
    applier.run_evaluation()
    metric.log_total_metrics()


if __name__ == '__main__':
    if os.getenv('DEBUG_LOG_TO_FILE', None):
        sly.add_default_logging_into_file(sly.logger, sly.TaskPaths.DEBUG_DIR)
    sly.main_wrapper('METRIC_EVALUATION', main)