Exemplo n.º 1
0
    def sync_manual(self, computer: Computer, provider: ComputerProvider):
        """
        button sync was clicked manually
        """
        if not computer.meta:
            return

        meta = yaml_load(computer.meta)
        if 'manual_sync' not in meta:
            return

        manual_sync = meta['manual_sync']

        project_provider = ProjectProvider(self.session)
        docker_provider = DockerProvider(self.session)

        dockers = docker_provider.get_online()
        project = project_provider.by_id(manual_sync['project'])

        for docker in dockers:
            if docker.computer == computer.name:
                continue

            source = provider.by_name(docker.computer)
            ignore_folders = [
                [join('models', project.name), []]
            ]
            sync_directed(self.session, target=computer, source=source,
                          ignore_folders=ignore_folders)

        del meta['manual_sync']
        computer.meta = yaml_dump(meta)
        provider.update()
Exemplo n.º 2
0
def sync(project: str, computer: str, only_from: bool, only_to: bool):
    _create_computer()

    computer = computer or socket.gethostname()
    provider = ComputerProvider(_session)
    project_provider = ProjectProvider(_session)
    computer = provider.by_name(computer)
    computers = provider.all()
    folders_excluded = []
    p = project_provider.by_name(project)
    assert p, f'Project={project} is not found'

    ignore = yaml_load(p.ignore_folders)
    excluded = []
    for f in ignore:
        excluded.append(str(f))

    folders_excluded.append([join('data', p.name), excluded])
    folders_excluded.append([join('models', p.name), []])

    for c in computers:
        if c.name != computer.name:
            if not only_from:
                sync_directed(_session, computer, c, folders_excluded)
            if not only_to:
                sync_directed(_session, c, computer, folders_excluded)
Exemplo n.º 3
0
    def sync(self):
        hostname = socket.gethostname()
        try:
            provider = ComputerProvider(self.session)
            task_synced_provider = TaskSyncedProvider(self.session)

            computer = provider.by_name(hostname)
            sync_start = now()

            if FILE_SYNC_INTERVAL == 0:
                time.sleep(1)
            else:
                computers = provider.all_with_last_activtiy()
                computers = [
                    c for c in computers
                    if (now() - c.last_activity).total_seconds() < 10
                ]
                computers_names = {c.name for c in computers}

                for c, project, tasks in task_synced_provider.for_computer(
                        computer.name):
                    if c.name not in computers_names:
                        self.logger.info(
                            f'Computer = {c.name} '
                            f'is offline. Can not sync',
                            ComponentType.WorkerSupervisor, hostname)
                        continue

                    if c.syncing_computer:
                        continue

                    excluded = list(map(str,
                                        yaml_load(project.ignore_folders)))
                    folders_excluded = [[join('data', project.name), excluded],
                                        [join('models', project.name), []]]

                    computer.syncing_computer = c.name
                    provider.update()
                    sync_directed(self.session, c, computer, folders_excluded)

                    for t in tasks:
                        task_synced_provider.add(
                            TaskSynced(computer=computer.name, task=t.id))

                    time.sleep(FILE_SYNC_INTERVAL)

            computer.last_synced = sync_start
            computer.syncing_computer = None
            provider.update()
        except Exception as e:
            if Session.sqlalchemy_error(e):
                Session.cleanup('FileSync')
                self.session = Session.create_session(key='FileSync')
                self.logger = create_logger(self.session, 'FileSync')

            self.logger.error(traceback.format_exc(),
                              ComponentType.WorkerSupervisor, hostname)
Exemplo n.º 4
0
def copy_remote(session: Session, computer_from: str, path_from: str,
                path_to: str):
    provider = ComputerProvider(session)
    src = provider.by_name(computer_from)
    host = socket.gethostname()
    if host != computer_from:
        c = f'scp -P {src.port} {src.user}@{src.ip}:{path_from} {path_to}'
    else:
        f'cp {path_from} {path_to}'
    subprocess.check_output(c, shell=True)
    return os.path.exists(path_to)
Exemplo n.º 5
0
    def sync_manual(self, computer: Computer, provider: ComputerProvider):
        """
        button sync was clicked manually
        """
        if not computer.meta:
            return

        meta = yaml_load(computer.meta)
        if 'manual_sync' not in meta:
            return

        manual_sync = meta['manual_sync']

        project_provider = ProjectProvider(self.session)
        docker_provider = DockerProvider(self.session)

        dockers = docker_provider.get_online()
        project = project_provider.by_id(manual_sync['project'])
        sync_folders = manual_sync['sync_folders']
        ignore_folders = manual_sync['ignore_folders']

        sync_folders = correct_folders(sync_folders, project.name)
        ignore_folders = correct_folders(ignore_folders, project.name)

        if not isinstance(sync_folders, list):
            sync_folders = []
        if not isinstance(ignore_folders, list):
            ignore_folders = []

        for docker in dockers:
            if docker.computer == computer.name:
                continue

            source = provider.by_name(docker.computer)
            folders = [[s, ignore_folders] for s in sync_folders]

            computer.syncing_computer = source.name
            provider.update()

            try:
                sync_directed(
                    self.session,
                    target=computer,
                    source=source,
                    folders=folders
                )
            except Exception as e:
                self.process_error(e)
        del meta['manual_sync']
        computer.meta = yaml_dump(meta)
        provider.update()
Exemplo n.º 6
0
def sync(project: str, computer: str, only_from: bool, only_to: bool,
         online: bool):
    """
    Syncs specified project on this computer with other computers
    """
    check_statuses()

    _create_computer()
    _create_docker()

    computer = computer or socket.gethostname()
    provider = ComputerProvider(_session)
    project_provider = ProjectProvider(_session)
    computer = provider.by_name(computer)
    computers = provider.all_with_last_activtiy()
    p = project_provider.by_name(project)
    assert p, f'Project={project} is not found'

    sync_folders = yaml_load(p.sync_folders)
    ignore_folders = yaml_load(p.ignore_folders)

    sync_folders = correct_folders(sync_folders, p.name)
    ignore_folders = correct_folders(ignore_folders, p.name)

    if not isinstance(sync_folders, list):
        sync_folders = []
    if not isinstance(ignore_folders, list):
        ignore_folders = []

    folders = [[s, ignore_folders] for s in sync_folders]

    for c in computers:
        if c.name != computer.name:
            if online and (now() - c.last_activity).total_seconds() > 100:
                continue

            if not only_from:
                sync_directed(_session, computer, c, folders)
            if not only_to:
                sync_directed(_session, c, computer, folders)
Exemplo n.º 7
0
class Catalyst(Executor, Callback):
    def __init__(self, args: Args, report: ReportLayoutInfo, distr_info: dict,
                 resume: dict, grid_config: dict, trace: str, params: dict):
        super().__init__(order=0)

        self.resume = resume
        self.distr_info = distr_info
        self.args = args
        self.report = report
        self.experiment = None
        self.runner = None
        self.series_provider = ReportSeriesProvider(self.session)
        self.computer_provider = ComputerProvider(self.session)
        self.grid_config = grid_config
        self.master = True
        self.checkpoint_resume = False
        self.checkpoint_stage_epoch = 0
        self.trace = trace
        self.params = params

    def callbacks(self):
        result = OrderedDict()
        if self.master:
            result['catalyst'] = self

        return result

    def on_epoch_start(self, state: RunnerState):
        if self.checkpoint_resume and state.stage_epoch == 0:
            state.epoch += 1

        state.stage_epoch = state.stage_epoch + self.checkpoint_stage_epoch
        state.checkpoint_data = {'stage_epoch': state.stage_epoch}
        if self.master:
            if state.stage_epoch == 0:
                self.step.start(1, name=state.stage)

            self.step.start(2,
                            name=f'epoch {state.stage_epoch}',
                            index=state.stage_epoch)

    def on_epoch_end(self, state: RunnerState):
        self.step.end(2)

        for s in self.report.series:
            train = state.metrics.epoch_values['train'][s.key]
            val = state.metrics.epoch_values['valid'][s.key]

            task_id = self.task.parent or self.task.id
            train = ReportSeries(part='train',
                                 name=s.key,
                                 epoch=state.epoch,
                                 task=task_id,
                                 value=train,
                                 time=now(),
                                 stage=state.stage)

            val = ReportSeries(part='valid',
                               name=s.key,
                               epoch=state.epoch,
                               task=task_id,
                               value=val,
                               time=now(),
                               stage=state.stage)

            self.series_provider.add(train)
            self.series_provider.add(val)

            if s.key == self.report.metric.name:
                best = False
                task = self.task
                if task.parent:
                    task = self.task_provider.by_id(task.parent)

                if self.report.metric.minimize:
                    if task.score is None or val.value < task.score:
                        best = True
                else:
                    if task.score is None or val.value > task.score:
                        best = True
                if best:
                    task.score = val.value
                    self.task_provider.update()

    def on_stage_start(self, state: RunnerState):
        state.loggers = {
            'console': VerboseLogger(),
            'raise': RaiseExceptionLogger()
        }

    def on_stage_end(self, state: RunnerState):
        self.checkpoint_resume = False
        self.checkpoint_stage_epoch = 0
        self.step.end(1)

    @classmethod
    def _from_config(cls, executor: dict, config: Config,
                     additional_info: dict):
        args = Args()
        for k, v in executor['args'].items():
            v = str(v)
            if v in ['False', 'True']:
                v = v == 'True'
            elif v.isnumeric():
                v = int(v)

            setattr(args, k, v)

        assert 'report_config' in additional_info, 'layout was not filled'
        report_config = additional_info['report_config']
        grid_cell = additional_info.get('grid_cell')
        report = ReportLayoutInfo(report_config)
        if len(args.configs) == 0:
            args.configs = [args.config]

        grid_config = {}
        if grid_cell is not None:
            grid_config = grid_cells(executor['grid'])[grid_cell][0]

        distr_info = additional_info.get('distr_info', {})
        resume = additional_info.get('resume')
        params = executor.get('params', {})

        return cls(args=args,
                   report=report,
                   grid_config=grid_config,
                   distr_info=distr_info,
                   resume=resume,
                   trace=executor.get('trace'),
                   params=params)

    def set_dist_env(self, config):
        info = self.distr_info
        os.environ['MASTER_ADDR'] = info['master_addr']
        os.environ['MASTER_PORT'] = str(info['master_port'])
        os.environ['WORLD_SIZE'] = str(info['world_size'])

        os.environ['RANK'] = str(info['rank'])
        distributed_params = config.get('distributed_params', {})
        distributed_params['rank'] = 0
        config['distributed_params'] = distributed_params

        if info['rank'] > 0:
            self.master = False

    def parse_args_uargs(self):
        args, config = parse_args_uargs(self.args, [])
        config = merge_dicts_smart(config, self.grid_config)
        config = merge_dicts_smart(config, self.params)

        if self.distr_info:
            self.set_dist_env(config)
        return args, config

    def _checkpoint_fix_config(self, experiment):
        resume = self.resume
        if not resume:
            return

        checkpoint_dir = join(experiment.logdir, 'checkpoints')
        os.makedirs(checkpoint_dir, exist_ok=True)

        file = 'last_full.pth' if resume.get('load_last') else 'best_full.pth'

        path = join(checkpoint_dir, file)
        computer = socket.gethostname()
        if computer != resume['master_computer']:
            master_computer = self.computer_provider.by_name(
                resume['master_computer'])
            path_from = join(master_computer.root_folder,
                             str(resume['master_task_id']), 'log',
                             'checkpoints', file)
            self.info(f'copying checkpoint from: computer = '
                      f'{resume["master_computer"]} path_from={path_from} '
                      f'path_to={path}')

            success = copy_remote(session=self.session,
                                  computer_from=resume['master_computer'],
                                  path_from=path_from,
                                  path_to=path)

            if not success:
                self.error(f'copying from '
                           f'{resume["master_computer"]}/'
                           f'{path_from} failed')
            else:
                self.info('checkpoint copied successfully')

        elif self.task.id != resume['master_task_id']:
            path = join(TASK_FOLDER, str(resume['master_task_id']), 'log',
                        'checkpoints', file)
            self.info(f'master_task_id!=task.id, using checkpoint'
                      f' from task_id = {resume["master_task_id"]}')

        if not os.path.exists(path):
            self.info(f'no checkpoint at {path}')
            return

        ckpt = load_checkpoint(path)
        stages_config = experiment.stages_config
        for k, v in list(stages_config.items()):
            if k == ckpt['stage']:
                stage_epoch = ckpt['checkpoint_data']['stage_epoch'] + 1

                # if it is the last epoch in the stage
                if stage_epoch == v['state_params']['num_epochs'] \
                        or resume.get('load_best'):
                    del stages_config[k]
                    break

                self.checkpoint_stage_epoch = stage_epoch
                v['state_params']['num_epochs'] -= stage_epoch
                break
            del stages_config[k]

        stage = experiment.stages_config[experiment.stages[0]]
        for k, v in stage['callbacks_params'].items():
            if v.get('callback') == 'CheckpointCallback':
                v['resume'] = path

        self.info(f'found checkpoint at {path}')

    def _checkpoint_fix_callback(self, callbacks: dict):
        def mock(state):
            pass

        for k, c in callbacks.items():
            if not isinstance(c, CheckpointCallback):
                continue

            if c.resume:
                self.checkpoint_resume = True

            if not self.master:
                c.on_epoch_end = mock
                c.on_stage_end = mock

    def work(self):
        args, config = self.parse_args_uargs()
        set_global_seed(args.seed)

        Experiment, R = import_experiment_and_runner(Path(args.expdir))

        runner_params = config.pop('runner_params', {})

        experiment = Experiment(config)
        runner: Runner = R(**runner_params)

        register()

        self.experiment = experiment
        self.runner = runner

        stages = experiment.stages[:]

        if self.master:
            task = self.task if not self.task.parent \
                else self.task_provider.by_id(self.task.parent)
            task.steps = len(stages)
            self.task_provider.commit()

        self._checkpoint_fix_config(experiment)

        _get_callbacks = experiment.get_callbacks

        def get_callbacks(stage):
            res = self.callbacks()
            for k, v in _get_callbacks(stage).items():
                res[k] = v

            self._checkpoint_fix_callback(res)
            return res

        experiment.get_callbacks = get_callbacks

        if experiment.logdir is not None:
            dump_environment(config, experiment.logdir, args.configs)

        if self.distr_info:
            info = yaml_load(self.task.additional_info)
            info['resume'] = {
                'master_computer': self.distr_info['master_computer'],
                'master_task_id': self.task.id - self.distr_info['rank'],
                'load_best': True
            }
            self.task.additional_info = yaml_dump(info)
            self.task_provider.commit()

            experiment.stages_config = {
                k: v
                for k, v in experiment.stages_config.items()
                if k == experiment.stages[0]
            }

        runner.run_experiment(experiment, check=args.check)

        if self.master and self.trace:
            traced = trace_model_from_checkpoint(self.experiment.logdir, self)
            torch.jit.save(traced, self.trace)

        return {'stage': experiment.stages[-1], 'stages': stages}
Exemplo n.º 8
0
class Catalyst(Executor, Callback):
    def __init__(self, args: Args, report: ReportLayoutInfo, distr_info: dict,
                 resume: dict, grid_config: dict, trace: str, params: dict,
                 **kwargs):
        super().__init__(**kwargs)

        self.series_provider = ReportSeriesProvider(self.session)
        self.computer_provider = ComputerProvider(self.session)
        self.memory_provider = MemoryProvider(self.session)

        self.order = 0
        self.resume = resume
        self.distr_info = distr_info
        self.args = args
        self.report = report
        self.experiment = None
        self.runner = None
        self.grid_config = grid_config
        self.master = True
        self.trace = trace
        self.params = params
        self.last_batch_logged = None
        self.loader_started_time = None
        self.parent = None
        self.node = CallbackNode.All

    def get_parent_task(self):
        if self.parent:
            return self.parent
        return self.task

    def callbacks(self):
        result = OrderedDict()
        if self.master:
            result['catalyst'] = self

        return result

    def on_loader_start(self, state: State):
        self.loader_started_time = now()

    def on_epoch_start(self, state: State):
        stage_index = self.experiment.stages.index(state.stage_name)
        self.step.start(1, name=state.stage_name, index=stage_index)

        self.step.start(2, name=f'epoch {state.epoch}', index=state.epoch - 1)

    def on_batch_start(self, state: State):
        if self.last_batch_logged and state.loader_step != state.loader_len:
            if (now() - self.last_batch_logged).total_seconds() < 10:
                return

        task = self.get_parent_task()
        task.batch_index = state.loader_step
        task.batch_total = state.loader_len
        task.loader_name = state.loader_name

        duration = int((now() - self.loader_started_time).total_seconds())
        task.epoch_duration = duration
        task.epoch_time_remaining = int(
            duration *
            (task.batch_total / task.batch_index)) - task.epoch_duration
        if state.epoch_metrics.get('train_loss') is not None:
            task.loss = float(state.epoch_metrics['train_loss'])
        if state.epoch_metrics.get('valid_loss') is not None:
            task.loss = float(state.epoch_metrics['valid_loss'])

        self.task_provider.update()
        self.last_batch_logged = now()

    def on_epoch_end(self, state: State):
        self.step.end(2)

        values = state.epoch_metrics

        for k, v in values.items():
            part = ''
            name = k

            for loader in state.loaders:
                if k.startswith(loader):
                    part = loader
                    name = k.replace(loader, '')
                    if name.startswith('_'):
                        name = name[1:]

            task_id = self.task.parent or self.task.id
            series = ReportSeries(part=part,
                                  name=name,
                                  epoch=state.epoch - 1,
                                  task=task_id,
                                  value=v,
                                  time=now(),
                                  stage=state.stage_name)
            self.series_provider.add(series)

            if name == self.report.metric.name:
                best = False
                task = self.task
                if task.parent:
                    task = self.task_provider.by_id(task.parent)

                if self.report.metric.minimize:
                    if task.score is None or v < task.score:
                        best = True
                else:
                    if task.score is None or v > task.score:
                        best = True
                if best:
                    task.score = v
                    self.task_provider.update()

    def on_stage_end(self, state: State):
        self.step.end(1)

    @classmethod
    def _from_config(cls, executor: dict, config: Config,
                     additional_info: dict):
        args = Args()
        for k, v in executor['args'].items():
            v = str(v)
            if v in ['False', 'True']:
                v = v == 'True'
            elif v.isnumeric():
                v = int(v)

            setattr(args, k, v)

        assert 'report_config' in additional_info, 'layout was not filled'
        report_config = additional_info['report_config']
        report = ReportLayoutInfo(report_config)
        if len(args.configs) == 0:
            args.configs = [args.config]

        distr_info = additional_info.get('distr_info', {})
        resume = additional_info.get('resume')
        params = executor.get('params', {})
        params.update(additional_info.get('params', {}))

        grid_config = executor.copy()
        grid_config.pop('args', '')

        return cls(args=args,
                   report=report,
                   grid_config=grid_config,
                   distr_info=distr_info,
                   resume=resume,
                   trace=executor.get('trace'),
                   params=params)

    def set_dist_env(self, config):
        info = self.distr_info
        os.environ['MASTER_ADDR'] = info['master_addr']
        os.environ['MASTER_PORT'] = str(info['master_port'])
        os.environ['WORLD_SIZE'] = str(info['world_size'])

        os.environ['RANK'] = str(info['rank'])
        os.environ['LOCAL_RANK'] = "0"
        distributed_params = config.get('distributed_params', {})
        distributed_params['rank'] = info['rank']
        config['distributed_params'] = distributed_params

        torch.cuda.set_device(0)

        torch.distributed.init_process_group(backend="nccl",
                                             init_method="env://")

        if info['rank'] > 0:
            self.master = False
            self.node = CallbackNode.Worker
        else:
            self.node = CallbackNode.Master

    def parse_args_uargs(self):
        args, config = parse_args_uargs(self.args, [])
        config = merge_dicts_smart(config, self.grid_config)
        config = merge_dicts_smart(config, self.params)

        if self.distr_info:
            self.set_dist_env(config)
        return args, config

    def _fix_memory(self, experiment):
        if not torch.cuda.is_available():
            return
        max_memory = torch.cuda.get_device_properties(0).total_memory / (2**30)
        stages_config = experiment.stages_config
        for k, v in list(stages_config.items()):
            query = {}
            # noinspection PyProtectedMember
            for kk, vv in experiment._config['model_params'].items():
                query[kk] = vv
            for kk, vv in v['data_params'].items():
                query[kk] = vv
            variants = self.memory_provider.find(query)
            variants = [v for v in variants if v.memory < max_memory]
            if len(variants) == 0:
                continue
            variant = max(variants, key=lambda x: x.memory)
            v['data_params']['batch_size'] = variant.batch_size

    def _checkpoint_fix_config(self, experiment):
        resume = self.resume
        if not resume:
            return
        if experiment.logdir is None:
            return

        checkpoint_dir = join(experiment.logdir, 'checkpoints')
        os.makedirs(checkpoint_dir, exist_ok=True)

        file = 'last_full.pth' if resume.get('load_last') else 'best_full.pth'

        path = join(checkpoint_dir, file)
        computer = socket.gethostname()
        if computer != resume['master_computer']:
            master_computer = self.computer_provider.by_name(
                resume['master_computer'])
            path_from = join(master_computer.root_folder,
                             str(resume['master_task_id']), experiment.logdir,
                             'checkpoints', file)
            self.info(f'copying checkpoint from: computer = '
                      f'{resume["master_computer"]} path_from={path_from} '
                      f'path_to={path}')

            success = copy_remote(session=self.session,
                                  computer_from=resume['master_computer'],
                                  path_from=path_from,
                                  path_to=path)

            if not success:
                self.error(f'copying from '
                           f'{resume["master_computer"]}/'
                           f'{path_from} failed')
            else:
                self.info('checkpoint copied successfully')

        elif self.task.id != resume['master_task_id']:
            path = join(TASK_FOLDER, str(resume['master_task_id']),
                        experiment.logdir, 'checkpoints', file)
            self.info(f'master_task_id!=task.id, using checkpoint'
                      f' from task_id = {resume["master_task_id"]}')

        if not os.path.exists(path):
            self.info(f'no checkpoint at {path}')
            return

        ckpt = load_checkpoint(path)
        stages_config = experiment.stages_config
        for k, v in list(stages_config.items()):
            if k == ckpt['stage']:
                stage_epoch = ckpt['checkpoint_data']['epoch'] + 1

                # if it is the last epoch in the stage
                if stage_epoch >= v['state_params']['num_epochs'] \
                        or resume.get('load_best'):
                    del stages_config[k]
                    break

                self.checkpoint_stage_epoch = stage_epoch
                v['state_params']['num_epochs'] -= stage_epoch
                break
            del stages_config[k]

        stage = experiment.stages_config[experiment.stages[0]]
        for k, v in stage['callbacks_params'].items():
            if v.get('callback') == 'CheckpointCallback':
                v['resume'] = path

        self.info(f'found checkpoint at {path}')

    def _checkpoint_fix_callback(self, callbacks: dict):
        def mock(state):
            pass

        for k, c in callbacks.items():
            if not isinstance(c, CheckpointCallback):
                continue

            if c.resume:
                self.checkpoint_resume = True

            if not self.master:
                c.on_epoch_end = mock
                c.on_stage_end = mock
                c.on_batch_start = mock

    def work(self):
        args, config = self.parse_args_uargs()
        set_global_seed(args.seed)

        Experiment, R = import_experiment_and_runner(Path(args.expdir))

        runner_params = config.pop('runner_params', {})

        experiment = Experiment(config)
        runner: Runner = R(**runner_params)

        self.experiment = experiment
        self.runner = runner

        stages = experiment.stages[:]

        if self.task.parent:
            self.parent = self.task_provider.by_id(self.task.parent)

        if self.master:
            task = self.get_parent_task()
            task.steps = len(stages)
            self.task_provider.commit()

        self._checkpoint_fix_config(experiment)
        self._fix_memory(experiment)

        _get_callbacks = experiment.get_callbacks

        def get_callbacks(stage):
            res = self.callbacks()
            for k, v in _get_callbacks(stage).items():
                res[k] = v

            self._checkpoint_fix_callback(res)
            return res

        experiment.get_callbacks = get_callbacks

        if experiment.logdir is not None:
            dump_environment(config, experiment.logdir, args.configs)

        if self.distr_info:
            info = yaml_load(self.task.additional_info)
            info['resume'] = {
                'master_computer': self.distr_info['master_computer'],
                'master_task_id': self.task.id - self.distr_info['rank'],
                'load_best': True
            }
            self.task.additional_info = yaml_dump(info)
            self.task_provider.commit()

            experiment.stages_config = {
                k: v
                for k, v in experiment.stages_config.items()
                if k == experiment.stages[0]
            }

        runner.run_experiment(experiment)
        if runner.state.exception:
            raise runner.state.exception

        if self.master and self.trace:
            traced = trace_model_from_checkpoint(self.experiment.logdir, self)
            torch.jit.save(traced, self.trace)
        return {'stage': experiment.stages[-1], 'stages': stages}
Exemplo n.º 9
0
    def sync(self):
        hostname = socket.gethostname()
        try:
            provider = ComputerProvider(self.session)
            task_synced_provider = TaskSyncedProvider(self.session)

            computer = provider.by_name(hostname)
            sync_start = now()

            if FILE_SYNC_INTERVAL == 0:
                time.sleep(1)
            else:
                self.sync_manual(computer, provider)

                computers = provider.all_with_last_activtiy()
                computers = [
                    c for c in computers
                    if (now() - c.last_activity).total_seconds() < 10
                ]
                computers_names = {c.name for c in computers}

                for c, project, tasks in task_synced_provider.for_computer(
                        computer.name):
                    if c.sync_with_this_computer:
                        if c.name not in computers_names:
                            self.logger.info(f'Computer = {c.name} '
                                             f'is offline. Can not sync',
                                             ComponentType.WorkerSupervisor,
                                             hostname)
                            continue

                        if c.syncing_computer:
                            continue

                        sync_folders = yaml_load(project.sync_folders)
                        ignore_folders = yaml_load(project.ignore_folders)

                        sync_folders = correct_folders(sync_folders,
                                                       project.name)
                        ignore_folders = correct_folders(ignore_folders,
                                                         project.name)

                        if not isinstance(sync_folders, list):
                            sync_folders = []
                        if not isinstance(ignore_folders, list):
                            ignore_folders = []

                        folders = [[s, ignore_folders] for s in sync_folders]

                        computer.syncing_computer = c.name
                        provider.update()

                        sync_directed(self.session, c, computer, folders)

                    for t in tasks:
                        task_synced_provider.add(
                            TaskSynced(computer=computer.name, task=t.id)
                        )

                    time.sleep(FILE_SYNC_INTERVAL)

            computer.last_synced = sync_start
            computer.syncing_computer = None
            provider.update()
        except Exception as e:
            self.process_error(e)