예제 #1
0
    def execute(self):
        print("Executing History Command")
        saver = Saver()
        history = saver.getHistory()
        specified_services = [serv.ID for serv in self._services]

        if self._args.only != None:
            arg_services = self._args.only[0].split(',')
            # check if every specified service is in the available services list
            for s in arg_services:
                if s not in [serv.ID for serv in self._services]:
                    print("Invalid service: %s\nExiting" % s)
                    exit(-1)

            # remaining services are the ones in the 'only' argument list
            specified_services = [serv.ID for serv in self._services if serv.ID in arg_services]

        # print out the history
        for sid in history.keys():
            if sid in specified_services:
                print("=========[ %s ]=========" % (sid))
                ordered_dates = saver.order_entries_dic(history[sid])
                for date, status in history[sid].items():
                    print("  -> %s %s" % (date, status))
                print("==============================")
예제 #2
0
    def __init__(self, config, session=None):
        self.config = config
        self.save = config.bool("save", True)
        self.task = config.string("task", "train")
        self.dataset = config.string("dataset").lower()
        self.num_epochs = config.int("num_epochs", 1000)
        self.session = self._create_session(session)
        self.global_step = tf.Variable(0, name='global_step', trainable=False)
        self.iou_threshold = config.float("iou_threshold", 0.85)
        self.avg_clicks = {}

        # TODO
        need_train = True
        if need_train:
            self.train_data = load_eval_dataset(config, "train")
            freeze_batchnorm = config.bool("freeze_batchnorm", False)
            print("creating trainnet...", file=log.v1)
            self.train_network = Network(self.config,
                                         self.train_data,
                                         is_trainnet=True,
                                         freeze_batchnorm=freeze_batchnorm,
                                         name="trainnet")
        else:
            self.train_data = None
            self.train_network = None

        need_val = self.task != "train_no_val"
        if need_val:
            self.valid_data = load_eval_dataset(config, "valid")
            print("creating testnet...", file=log.v1)
            self.test_network = Network(config,
                                        self.valid_data,
                                        is_trainnet=False,
                                        freeze_batchnorm=True,
                                        name="testnet")
        else:
            self.valid_data = None
            self.test_network = None
        self.trainer = Trainer(config, self.train_network, self.test_network,
                               self.global_step, self.session)
        self.saver = Saver(config, self.session)

        tf.global_variables_initializer().run()
        tf.local_variables_initializer().run()

        self.start_epoch = self.saver.try_load_weights()
        self.session.graph.finalize()
        self.recursive_rounds = config.int("recursive_rounds", 20)
        self.avg_clicks = {}
        self.locality = config.bool("locality", False)
        self.locality_threshold = config.int("locality_threshold", 100)
        self.monotonicity = config.bool("monotonicity", False)

        if self.locality:
            print("Using locality postprocessing...", log.v1)
        if self.monotonicity:
            print("Using monotonicity postprocessing...", log.v1)
예제 #3
0
class DoPoll(Command):
    ID = 'poll'
    HELP = 'Retrieves the status from of all configured services'

    def __init__(self, service_list, args):
        self._services = service_list
        self._args = args
        self._saver = Saver()

    def __str__(self):
        return self.__class__.__name__

    def execute(self):
        st = StatusChecker()
        status_list = []

        if self._args.only != None:
            arg_services = self._args.only[0].split(',')
            # check if every specified service is in the available services list
            for s in arg_services:
                if s not in [serv.ID for serv in self._services]:
                    print("Invalid service: %s\nExiting" % s)
                    exit(-1)

            remaining_services = [
                serv for serv in self._services if serv.ID in arg_services
            ]

        elif self._args.exclude != None:
            arg_services = self._args.exclude[0].split(',')
            # check if every specified service is in the available services list
            for s in arg_services:
                if s not in [serv.ID for serv in self._services]:
                    print("Invalid service: %s\nExiting" % s)
                    exit(-1)

            remaining_services = [
                serv for serv in self._services if serv.ID not in arg_services
            ]
        else:
            remaining_services = self._services

        for service in remaining_services:
            status_list.append(service.accept(st))

        self._saver.json(status_list)

        for s in status_list:
            print(s)
예제 #4
0
    def __init__(self, config, session=None):
        self.config = config
        self.save = config.bool("save", True)
        self.task = config.string("task", "train")
        self.dataset = config.string("dataset").lower()
        self.num_epochs = config.int("num_epochs", 1000)
        self.session = self._create_session(session)
        self.global_step = tf.Variable(0, name='global_step', trainable=False)

        need_train = True  # TODO should be self.task != "eval", but right now testnet needs to reuse variables from train
        if need_train:
            self.train_data = load_dataset(config, "train", self.session,
                                           self.dataset)
            freeze_batchnorm = config.bool("freeze_batchnorm", False)
            print("creating trainnet...", file=log.v1)
            self.train_network = Network(self.config,
                                         self.train_data,
                                         is_trainnet=True,
                                         freeze_batchnorm=freeze_batchnorm,
                                         name="trainnet")
        else:
            self.train_data = None
            self.train_network = None

        need_val = self.task != "train_no_val"
        if need_val:
            self.valid_data = load_dataset(config, "val", self.session,
                                           self.dataset)
            print("creating testnet...", file=log.v1)
            reuse_variables = None if need_train else False
            self.test_network = Network(config,
                                        self.valid_data,
                                        is_trainnet=False,
                                        freeze_batchnorm=True,
                                        name="testnet",
                                        reuse_variables=reuse_variables)
        else:
            self.valid_data = None
            self.test_network = None
        self.trainer = Trainer(config, self.train_network, self.test_network,
                               self.global_step, self.session)
        self.saver = Saver(config, self.session)

        tf.global_variables_initializer().run()
        tf.local_variables_initializer().run()

        self.start_epoch = self.saver.try_load_weights()
        self.session.graph.finalize()
예제 #5
0
    def execute(self):
        saver = Saver()
        if self._args.format != None:
            saver.backup(self._args.path, self._args.format)
        else:
            saver.backup(self._args.path)

        print("Backup to %s was succesfull." % self._args.path)
예제 #6
0
 def __init__(self, services):
     self.local_storage = '../history.json'
     self.saver = Saver()
     self.allowed_extensions = ['txt', 'json']
     self.services = services
예제 #7
0
class Importer:
    def __init__(self, services):
        self.local_storage = '../history.json'
        self.saver = Saver()
        self.allowed_extensions = ['txt', 'json']
        self.services = services

    def json(self, data, path=None):
        pass

    def csv(self, path=None):
        pass

    def is_valid_extension(self, path):
        ext = path.split('.')[-1]

        return ext in self.allowed_extensions

    def import_file(self, path, merge=False):
        # check extension
        if not self.is_valid_extension(path):
            print("Invalid file format. Exiting...")
            exit(-1)
        print(path)
        external_data = self.load(path)
        print("EXTERNAL", external_data)
        self.check_integrity(external_data)

        # if it passed the integrity check, we can save it as our local history file
        if not merge:
            self.saver.overwrite_history(external_data)

        else:
            sids = [s.ID for s in self.services]
            history = self.saver.getHistory()
            print("HISTORY", history)
            final = {}
            print(sids)
            try:
                for sid in sids:
                    # join the dictionaries
                    final[sid] = {**history[sid], **external_data[sid]}
            except KeyError as err:
                if sid in history.keys():
                    # then external_data caused the exception
                    external_data[sid] = {}
                else:
                    # then history caused the exception
                    history[sid] = {}

                final[sid] = {**history[sid], **external_data[sid]}

            print("FINAL", final)
            self.saver.overwrite_history(final)

    def check_integrity(self, data):
        sids = [s.ID for s in self.services]

        # validate services
        for sid in data.keys():
            if sid not in sids:
                print("Invalid service. Exiting...")
                exit(-1)

        # validate dates
        try:
            for sid, entries in data.items():
                for date, value in entries.items():
                    datetime.datetime.strptime(date, "%x %X")
        except ValueError:
            print("Invalid date format. Exiting...")
            exit(-1)

    def load(self, path):
        # try to open the file
        try:
            file = open(path, "r")
        except OSError as err:
            # if it doesn't exist or permission is denied
            print("OS error: {0}".format(err))
            exit(-1)

        # try except in case there isn't any data in the file
        try:
            storage = json.load(file)
            file.close()
        except json.decoder.JSONDecodeError as err:
            storage = {}

        return storage
예제 #8
0
class Engine:
    def __init__(self, config, session=None):
        self.config = config
        self.save = config.bool("save", True)
        self.task = config.string("task", "train")
        self.dataset = config.string("dataset").lower()
        self.num_epochs = config.int("num_epochs", 1000)
        self.session = self._create_session(session)
        self.global_step = tf.Variable(0, name='global_step', trainable=False)

        need_train = True  # TODO should be self.task != "eval", but right now testnet needs to reuse variables from train
        if need_train:
            self.train_data = load_dataset(config, "train", self.session,
                                           self.dataset)
            freeze_batchnorm = config.bool("freeze_batchnorm", False)
            print("creating trainnet...", file=log.v1)
            self.train_network = Network(self.config,
                                         self.train_data,
                                         is_trainnet=True,
                                         freeze_batchnorm=freeze_batchnorm,
                                         name="trainnet")
        else:
            self.train_data = None
            self.train_network = None

        need_val = self.task != "train_no_val"
        if need_val:
            self.valid_data = load_dataset(config, "val", self.session,
                                           self.dataset)
            print("creating testnet...", file=log.v1)
            reuse_variables = None if need_train else False
            self.test_network = Network(config,
                                        self.valid_data,
                                        is_trainnet=False,
                                        freeze_batchnorm=True,
                                        name="testnet",
                                        reuse_variables=reuse_variables)
        else:
            self.valid_data = None
            self.test_network = None
        self.trainer = Trainer(config, self.train_network, self.test_network,
                               self.global_step, self.session)
        self.saver = Saver(config, self.session)

        tf.global_variables_initializer().run()
        tf.local_variables_initializer().run()

        self.start_epoch = self.saver.try_load_weights()
        self.session.graph.finalize()

    @staticmethod
    def _create_session(sess):
        if sess is None:
            sess_config = tf.ConfigProto(allow_soft_placement=True)
            sess_config.gpu_options.allow_growth = True
            sess = tf.InteractiveSession(config=sess_config)
        return sess

    def run(self):
        if self.task in ("train", "train_no_val"):
            self.train()
        elif self.task == "eval":
            self.eval()
        else:
            assert False, ("unknown task", self.task)

    def test_dataset_speed(self):
        n_total = self.train_data.n_examples_per_epoch()
        batch_size = self.config.int("batch_size")
        input_tensors_dict = self.train_network.input_tensors_dict
        n_curr = 0
        with Timer(message="elapsed"):
            while n_curr < n_total:
                self.session.run(input_tensors_dict)
                n_curr += batch_size
                print("{:>5}".format(n_curr), "/", n_total)

    def train(self):
        print("starting training", file=log.v1)
        for epoch in range(self.start_epoch, self.num_epochs):
            timer = Timer()
            train_measures = self.run_epoch(self.trainer.train_step,
                                            self.train_data,
                                            epoch,
                                            is_train_run=True)
            if self.valid_data is not None:
                valid_measures = self.run_epoch(self.trainer.validation_step,
                                                self.valid_data,
                                                epoch,
                                                is_train_run=False)
            else:
                valid_measures = {}
            if self.save:
                self.saver.save_model(epoch + 1)
                if hasattr(self.train_data, "save_masks"):
                    self.train_data.save_masks(epoch + 1)
            elapsed = timer.elapsed()
            train_measures_str = measures_string_to_print(train_measures)
            val_measures_str = measures_string_to_print(valid_measures)
            print("epoch",
                  epoch + 1,
                  "finished. elapsed:",
                  "%.5f" % elapsed,
                  "train:",
                  train_measures_str,
                  "valid:",
                  val_measures_str,
                  file=log.v1)

    def eval(self):
        timer = Timer()
        measures = self.run_epoch(self.trainer.validation_step,
                                  self.valid_data,
                                  epoch=0,
                                  is_train_run=False)
        elapsed = timer.elapsed()
        print("eval finished. elapsed:", elapsed, measures, file=log.v1)

    @staticmethod
    def run_epoch(step_fn, data, epoch, is_train_run):
        n_examples_processed = 0
        n_examples_per_epoch = data.n_examples_per_epoch()
        extraction_keys = data.get_extraction_keys()
        measures_accumulated = {}
        if not is_train_run and hasattr(data, "prepare_saving_epoch_measures"):
            data.prepare_saving_epoch_measures(epoch + 1)
        while n_examples_processed < n_examples_per_epoch:
            timer = Timer()
            n_examples_processed_total = (n_examples_per_epoch *
                                          epoch) + n_examples_processed
            res = step_fn(
                epoch,
                n_examples_processed_total=n_examples_processed_total,
                extraction_keys=extraction_keys)
            measures = res[Measures.MEASURES]
            n_examples_processed += measures[Measures.N_EXAMPLES]
            measures_str = measures_string_to_print(
                compute_measures_average(measures, for_final_result=False))
            accumulate_measures(measures_accumulated, measures)
            if not is_train_run and hasattr(data, "save_epoch_measures"):
                data.save_epoch_measures(measures)
            if hasattr(data, "use_segmentation_mask"):
                data.use_segmentation_mask(res)
            elapsed = timer.elapsed()
            print("{:>5}".format(n_examples_processed),
                  '/',
                  n_examples_per_epoch,
                  measures_str,
                  "elapsed",
                  elapsed,
                  file=log.v5)
        measures_averaged = compute_measures_average(measures_accumulated,
                                                     for_final_result=True)
        if not is_train_run and hasattr(data,
                                        "finalize_saving_epoch_measures"):
            new_measures = data.finalize_saving_epoch_measures()
            measures_averaged.update(new_measures)
        return measures_averaged
예제 #9
0
    def __init__(self, config, session=None):
        self.config = config
        self.save = config.bool("save", True)
        self.task = config.string("task", "train")
        self.dataset = config.string("dataset").lower()
        self.dataset_val = config.string("dataset_val", "").lower()
        if self.dataset_val == "":
            self.dataset_val = self.dataset
        self.num_epochs = config.int("num_epochs", 1000)
        self.session = self._create_session(session)
        self.global_step = tf.Variable(0, name='global_step', trainable=False)
        self.build_networks = config.bool("build_networks", True)

        need_train = self.task not in ("eval", "forward_detections_recurrent",
                                       "forward_tracking",
                                       "forward_clustering",
                                       "forward_refine_bboxes",
                                       "forward_crop_detection",
                                       "forward_mot_detection_rescore",
                                       "forward_mots_detection_rescore",
                                       "forward_mot_detection_linking",
                                       "forward_refine_detection_bboxes")
        if need_train:
            self.train_data = load_dataset(config, "train", self.session,
                                           self.dataset)
            freeze_batchnorm = config.bool("freeze_batchnorm", False)
            print("creating trainnet...", file=log.v1)
            if self.build_networks:
                self.train_network = Network(self.config,
                                             self.train_data,
                                             is_trainnet=True,
                                             freeze_batchnorm=freeze_batchnorm,
                                             name="trainnet")
            else:
                self.train_network = None
        else:
            self.train_data = None
            self.train_network = None

        need_val = self.task != "train_no_val"
        if need_val:
            self.valid_data = load_dataset(config, "val", self.session,
                                           self.dataset_val)
            print("creating testnet...", file=log.v1)
            reuse_variables = None if need_train else False
            if self.build_networks:
                self.test_network = Network(config,
                                            self.valid_data,
                                            is_trainnet=False,
                                            freeze_batchnorm=True,
                                            name="testnet",
                                            reuse_variables=reuse_variables)
            else:
                self.test_network = None
        else:
            self.valid_data = None
            self.test_network = None
        self.trainer = Trainer(config, self.train_network, self.test_network,
                               self.global_step, self.session)
        self.saver = Saver(config, self.session)

        tf.global_variables_initializer().run()
        tf.local_variables_initializer().run()

        self.start_epoch = self.saver.try_load_weights()
        self.session.graph.finalize()
예제 #10
0
    def execute(self):
        def mttf(entries):
            n_runs = 0
            durations = []
            in_run = False
            for e in entries:
                date = e[1]
                status = e[2]
                # if its the first of a run
                if not in_run and status != 'down':
                    in_run = datetime.datetime.strptime(date, "%x %X")
                    durations.append(in_run)
                    n_runs += 1
                # if it's in the middle of a run
                elif status == 'up':
                    durations[n_runs - 1] = datetime.datetime.strptime(
                        date, "%x %X") - in_run
                # if it's the end of a run
                elif in_run and status != 'up':
                    in_run = False

            if n_runs == 1:
                return None
            else:
                sum_durations = datetime.timedelta(seconds=0)
                for d in durations:
                    sum_durations = d + sum_durations

                return sum_durations / n_runs

        def uptime(entries):
            n_runs = 0
            durations = []
            in_run = False
            for e in entries:
                date = e[1]
                status = e[2]
                # if its the first of a run
                if not in_run and status != 'down':
                    in_run = datetime.datetime.strptime(date, "%x %X")
                    durations.append(in_run)
                    n_runs += 1
                # if it's in the middle of a run
                elif status == 'up':
                    durations[n_runs - 1] = datetime.datetime.strptime(
                        date, "%x %X") - in_run
                # if it's the end of a run
                elif in_run and status != 'up':
                    in_run = False

            uptime_val = datetime.timedelta(seconds=0)
            for d in durations:
                uptime_val += d

            return uptime_val

        saver = Saver()
        history = saver.getHistory()

        # print table header
        print("| service       | uptime        | mttf          |")
        print("+---------------+---------------+---------------+")
        space = 14

        template = "| {}| {}| {}|"
        for sid in history.keys():
            upt_t = str(
                int(
                    uptime(saver.get_entries(sid)) /
                    datetime.timedelta(minutes=1))) + "min"
            if mttf(saver.get_entries(sid)) != None:
                mttf_t = str(
                    int(
                        mttf(saver.get_entries(sid)) /
                        datetime.timedelta(minutes=1))) + "min"
            else:
                mttf_t = 'infinite'

            # assuming service id is always less than 16 characters long
            print(
                template.format(sid.ljust(space, ' '), upt_t.ljust(space, ' '),
                                mttf_t.ljust(space, ' ')))
예제 #11
0
class EvalPascalRecursive(Engine):
    def __init__(self, config, session=None):
        self.config = config
        self.save = config.bool("save", True)
        self.task = config.string("task", "train")
        self.dataset = config.string("dataset").lower()
        self.num_epochs = config.int("num_epochs", 1000)
        self.session = self._create_session(session)
        self.global_step = tf.Variable(0, name='global_step', trainable=False)
        self.iou_threshold = config.float("iou_threshold", 0.85)
        self.avg_clicks = {}

        # TODO
        need_train = True
        if need_train:
            self.train_data = load_eval_dataset(config, "train")
            freeze_batchnorm = config.bool("freeze_batchnorm", False)
            print("creating trainnet...", file=log.v1)
            self.train_network = Network(self.config,
                                         self.train_data,
                                         is_trainnet=True,
                                         freeze_batchnorm=freeze_batchnorm,
                                         name="trainnet")
        else:
            self.train_data = None
            self.train_network = None

        need_val = self.task != "train_no_val"
        if need_val:
            self.valid_data = load_eval_dataset(config, "valid")
            print("creating testnet...", file=log.v1)
            self.test_network = Network(config,
                                        self.valid_data,
                                        is_trainnet=False,
                                        freeze_batchnorm=True,
                                        name="testnet")
        else:
            self.valid_data = None
            self.test_network = None
        self.trainer = Trainer(config, self.train_network, self.test_network,
                               self.global_step, self.session)
        self.saver = Saver(config, self.session)

        tf.global_variables_initializer().run()
        tf.local_variables_initializer().run()

        self.start_epoch = self.saver.try_load_weights()
        self.session.graph.finalize()
        self.recursive_rounds = config.int("recursive_rounds", 20)
        self.avg_clicks = {}
        self.locality = config.bool("locality", False)
        self.locality_threshold = config.int("locality_threshold", 100)
        self.monotonicity = config.bool("monotonicity", False)

        if self.locality:
            print("Using locality postprocessing...", log.v1)
        if self.monotonicity:
            print("Using monotonicity postprocessing...", log.v1)

    def eval(self):
        for round in range(self.recursive_rounds):
            timer_round = Timer()
            n_examples_processed = 0
            n_examples_per_epoch = self.valid_data.n_examples_per_epoch()
            measures_accumulated = {}

            while n_examples_processed < n_examples_per_epoch:
                timer = Timer()
                extraction_keys = [
                    Extractions.SEGMENTATION_MASK_INPUT_SIZE,
                    Extractions.SEGMENTATION_MASK_ORIGINAL_SIZE,
                    DataKeys.IMAGE_FILENAMES, DataKeys.RAW_IMAGES,
                    DataKeys.INPUTS, DataKeys.SEGMENTATION_LABELS,
                    Extractions.SEGMENTATION_POSTERIORS
                ]
                res = self.trainer.validation_step(
                    round, extraction_keys=extraction_keys)
                res = self.postprocess(res)
                measures = res[Measures.MEASURES]
                n_examples_processed += measures[Measures.N_EXAMPLES]
                measures_str = measures_string_to_print(
                    compute_measures_average(measures, for_final_result=False))
                # add tag to the measures so that it is printed (postprocessing for davis)
                measures_str = self.add_tag(measures_str, round,
                                            res[Extractions.EXTRACTIONS])
                accumulate_measures(measures_accumulated, measures)
                elapsed = timer.elapsed()
                self.valid_data.use_segmentation_mask(res)
                self.update_iou(measures, res[Extractions.EXTRACTIONS],
                                round + 1)
                print("{:>5}".format(n_examples_processed),
                      '/',
                      n_examples_per_epoch,
                      measures_str,
                      "elapsed",
                      elapsed,
                      file=log.v5)
            measures_averaged = compute_measures_average(measures_accumulated,
                                                         for_final_result=True)
            print("Click ",
                  round,
                  " eval finished. elapsed:",
                  timer_round.elapsed(),
                  measures_averaged,
                  file=log.v1)

        print("Samples: " + str(len(self.avg_clicks.keys())), file=log.v1)
        print("Avg clicks for",
              self.iou_threshold * 100,
              " % IOU is ",
              np.average(list(self.avg_clicks.values())),
              file=log.v1)

    def add_tag(self, measures, round, extractions):
        img_filename = extract(extractions, DataKeys.IMAGE_FILENAMES)
        basename = img_filename.decode("utf-8").split("/")[-2:]
        img_filename = basename[0] + basename[1]
        measures += "  {" + img_filename + "}  " + "{clicks: " + str(
            round) + "}"
        return measures

    def update_iou(self, measures, extractions, click):
        img_filename = extract(extractions, DataKeys.IMAGE_FILENAMES)
        measures_avg = compute_measures_average(measures,
                                                for_final_result=False)
        iou = float(measures_avg[Measures.IOU])

        if (iou >= self.iou_threshold or click == self.recursive_rounds
            ) and img_filename not in self.avg_clicks:
            self.avg_clicks[img_filename] = click

    def postprocess(self, res):
        result_measures = res[Measures.MEASURES]
        img_filename = extract(res[Extractions.EXTRACTIONS],
                               DataKeys.IMAGE_FILENAMES)
        prediction = extract(res[Extractions.EXTRACTIONS],
                             Extractions.SEGMENTATION_MASK_INPUT_SIZE)
        prev_mask =  decodeMask(self.valid_data.previous_epoch_data[img_filename][PREV_MASK]) \
          if img_filename in self.valid_data.previous_epoch_data and \
             PREV_MASK in self.valid_data.previous_epoch_data[img_filename] else None
        label = extract(res[Extractions.EXTRACTIONS],
                        DataKeys.SEGMENTATION_LABELS)

        if prev_mask is not None:
            measures = result_measures.copy()
            current_click = self.valid_data.previous_epoch_data[img_filename][
                CURRENT_CLICK]
            dt = get_distance_transform(current_click, label)[:, :, 0]
            if self.locality:
                prediction = np.where(dt > self.locality_threshold, prev_mask,
                                      prediction)
                measures = Measures.compute_measures_for_binary_segmentation_single_image(
                    prediction, label[:, :, 0])
            if self.monotonicity:
                if len(current_click) > 0:
                    if label[current_click[0][0], current_click[0][1]] == 1:
                        # positive click
                        mask = prev_mask - prediction
                    else:
                        mask = prediction - prev_mask
                    prediction = np.where(mask == 1, prev_mask, prediction)
                    measures = Measures.compute_measures_for_binary_segmentation_single_image(
                        prediction, label[:, :, 0])

            for k, v in measures.items():
                result_measures[k] = v

        res[Measures.MEASURES] = result_measures
        res[Extractions.EXTRACTIONS][
            Extractions.SEGMENTATION_MASK_INPUT_SIZE][0] = [prediction]

        return res
예제 #12
0
 def __init__(self, service_list, args):
     self._services = service_list
     self._args = args
     self._saver = Saver()