Exemplo n.º 1
0
    def initialize(self, exp_config, resume):
        list_extra_params = self.get_init_extra_params()

        self.socket.wait_for_connections()

        if resume:
            print("Resuming server...")
            self.list_loss = load(os.path.join(self.save_path, "loss.pt"))
            self.list_acc = load(os.path.join(self.save_path, "accuracy.pt"))
            self.list_time_stamp = load(os.path.join(self.save_path, "time.pt"))
            self.list_model_size = load(os.path.join(self.save_path, "model_size.pt"))

            self.model = load(os.path.join(self.save_path, "model.pt"))

            num_loss_acc = len(self.list_loss)
            assert len(self.list_acc) == num_loss_acc

            num_evals = len(self.list_time_stamp)
            assert len(self.list_model_size) == num_evals

            if num_evals - num_loss_acc == 1:
                loss, acc = self.model.evaluate(self.test_loader)
                self.list_loss.append(loss)
                self.list_acc.append(acc)
            elif num_evals != num_loss_acc:
                raise RuntimeError("Cannot resume")

            self.round = (num_evals - 1) * self.config.EVAL_DISP_INTERVAL
            assert self.round >= 0
            self.start_time = timer() - self.list_time_stamp[-1]

            self.check_client_to_sparse()
            resume_param = (True, self.round + 1, self.client_is_sparse)
            list_params = [(idx, exp_config, self.model, list_extra_params[idx], resume_param) for idx in
                           range(self.config.NUM_CLIENTS)]
            resume_msgs_to_client = [ServerToClientInitMessage(init_params) for init_params in list_params]
            self.socket.init_connections(resume_msgs_to_client)

            self.round += 1

            print("Server resumed")
            print(self)

        else:
            self.list_loss = []
            self.list_acc = []
            self.list_time_stamp = []
            self.list_model_size = []
            self.start_time = timer() + self.init_time_offset
            self.round = 0
            mkdir_save(self.model, os.path.join(self.save_path, "init_model.pt"))
            self.model.eval()

            list_init_params = [(idx, exp_config, self.model, list_extra_params[idx], (False, 0, False)) for idx in
                                range(self.config.NUM_CLIENTS)]
            init_msgs_to_client = [ServerToClientInitMessage(init_params) for init_params in list_init_params]
            self.socket.init_connections(init_msgs_to_client)

            print("Server initialized")
            print(self)
Exemplo n.º 2
0
def grid_search(increment, data):
    """Generates candidates using ground_station_gs then from generated candidates
    the input data is filtered for visibility.

        Returns a list of [GroundStation, Visibility], where Visibility is in the
        ECEF_R basis
    """
    from basis_converters.from_ecef import ecef_to_lat_long_h
    from basis_converters.from_radians import degrees
    from utils.save_load import save, load

    results = load(['grid_search', increment, data])
    if results is not None:
        return results

    ground_stations = ground_station_gs(increment)
    results = []
    for gs in ground_stations:
        visible = [x for x in data.ecef_r if gs.visible(x)]
        lat, long = gs.lat_long
        print("{}, {}".format(degrees(lat), degrees(long)))
        results.append((gs, visible))

    save(['grid_search', increment, data], results)
    return results
Exemplo n.º 3
0
    def __init__(self, args, config, model, save_interval=50):
        self.config = config
        self.device = torch.device(
            "cuda:0" if torch.cuda.is_available() else "cpu")
        self.experiment_name = args.experiment_name
        self.save_path = os.path.join("results", config.EXP_NAME,
                                      args.experiment_name)
        self.save_interval = save_interval
        self.mode = args.mode
        assert self.mode in ["r", "rr"]

        self.model = model.to(self.device)
        self.adaptive_folder = "adaptive{}{}".format(
            "_target" if args.targeted else "",
            "_cs" if args.client_selection else "")
        init_model_path = os.path.join("results", config.EXP_NAME,
                                       self.adaptive_folder, "init_model.pt")
        final_model_path = os.path.join("results", config.EXP_NAME,
                                        self.adaptive_folder, "model.pt")
        final_model = load(final_model_path)

        # reinit
        if self.mode == "r":
            self.model = load(init_model_path).to(self.device)
            self.model.reinit_from_model(final_model)

        # random reinit, using different seed for initialization but same mask
        elif self.mode == "rr":
            for layer, final_layer in zip(self.model.prunable_layers,
                                          final_model.prunable_layers):
                layer.mask = final_layer.mask.clone().to(layer.mask.device)
        else:
            raise ValueError("Mode {} not supported".format(self.mode))

        with torch.no_grad():
            for layer in self.model.prunable_layers:
                layer.weight.mul_(layer.mask)

        disp_num_params(self.model)

        self.model.train()
        mkdir_save(self.model, os.path.join(self.save_path, "init_model.pt"))

        self.test_loader = None

        self.init_test_loader()
        self.init_clients()
Exemplo n.º 4
0
    def __init__(self, args, config, model, save_interval=50):
        self.config = config
        self.device = torch.device(
            "cuda:0" if torch.cuda.is_available() else "cpu")
        self.experiment_name = args.experiment_name
        self.save_path = os.path.join("results", config.EXP_NAME,
                                      args.experiment_name)
        self.save_interval = save_interval
        adaptive_folder = "adaptive_cs" if args.client_selection else "adaptive"
        self.prune_rate = 1 - load("results/{}/{}/model.pt".format(
            config.EXP_NAME,
            adaptive_folder)).density()**(1 / config.NUM_ITERATIVE_PRUNING)

        self.model = model.to(self.device)
        self.model.train()
        mkdir_save(self.model, os.path.join(self.save_path, "init_model.pt"))

        self.test_loader = None

        self.init_test_loader()
        self.init_clients()
Exemplo n.º 5
0
    def propogate_in_period(self, n, dt):
        """Checks to see if results exist, otherwise propogates the requested
        number of times
        """
        from models.results import Results
        from utils.save_load import save, load
        key = self.save_key(n, dt)
        data = load(['propogate_in_period', key])
        if data is not None:
            return data

        vectors = [(self._spacecraft.position, self._spacecraft.velocity)]
        position = self._spacecraft.position
        velocity = self._spacecraft.velocity
        for i in range(1, n + 1):
            # Generates the base propogation series and freezes the values
            vector = self._propogator(vectors[i - 1][0], vectors[i - 1][1], dt)
            for item in vector:
                item.flags.writeable = False
            vectors.append(vector)
        data = Results(vectors, dt, self._spacecraft._t, self._pname)
        save(['propogate_in_period', key], data)
        return data
Exemplo n.º 6
0
            self.optimizer,
            step_size=STEP_SIZE,
            gamma=0.5**(STEP_SIZE / LR_HALF_LIFE))
        self.optimizer_wrapper = OptimizerWrapper(self.model, self.optimizer,
                                                  self.optimizer_scheduler)

    def init_train_loader(self, tl):
        self.train_loader = tl


if __name__ == "__main__":
    args = parse_args()
    if args.mode == "rr":
        prev_config = load(
            os.path.join(
                "results", EXP_NAME,
                "adaptive_cs" if args.client_selection else "adaptive",
                "exp_config.pt"))
        args.seed = prev_config["seed"] + 1
    torch.manual_seed(args.seed)

    num_users = 100
    num_slices = num_users if args.client_selection else NUM_CLIENTS

    server = ITReinitServer(args, config, resnet18(num_classes=100))
    list_models, list_indices = server.init_clients()

    sampler = FLSampler(list_indices, MAX_ROUND,
                        NUM_LOCAL_UPDATES * CLIENT_BATCH_SIZE,
                        args.client_selection, num_slices)
    print("Sampler initialized")
Exemplo n.º 7
0
def grid_search_long(increment, data):
    """Conducts a two phase grid search, generating candidates using grid_search,
    and then filtering out candidates over water. The candidates are divided into
    Northern and Souther hemispheres.

    In the first phase the candidate ground station with the greatest visibility in
    each hemisphere is chosen. In the second phase the search space is reduced by
    removing points visible from the candidates chosen in the first phase, and then
    two further ground stations are selected based on the number of points visible.

    Returns a list of [(GroundStation, Visibility, Color)], where for chosen candidates
    Color='g' (Green), and all other candidates Color='r'
    """
    from basis_converters.from_topo import lat_long_to_ecef
    from utils.save_load import save, load
    from operator import itemgetter
    hemispheres = load(['filtered_grid_search', increment, data])
    if hemispheres is None:
        candidates = grid_search(increment, data)
        hemispheres = filter_candidates(candidates)
        save(['filtered_grid_search', increment, data], hemispheres)
    # Find the best candidate in both the Northern and Southern hemisphere
    maximums = [[], []]
    for hemisphere_index, hemisphere in enumerate(hemispheres):
        items = [len(visible) for _, visible, _ in hemisphere]
        index, _ = max(enumerate(items), key=itemgetter(1))
        maximums[hemisphere_index].append(index)

    # Find the second best ground station in each hemisphere after filtering out points
    # visible to the first station
    for hemisphere_index, hemisphere in enumerate(hemispheres):
        reduced_hemisphere = []
        best_gs, _, _ = hemisphere[maximums[hemisphere_index][0]]
        for item_index, (_, visibility, _) in enumerate(hemisphere):
            # Don't add the previous best candidate
            if item_index == maximums[hemisphere_index][0]:
                continue
            filtered_visiblity = [
                point for point in visibility
                if best_gs.visible(point) is False
            ]
            reduced_hemisphere.append(len(filtered_visiblity))

        index, _ = max(enumerate(reduced_hemisphere), key=itemgetter(1))
        if index >= maximums[hemisphere_index][0]:
            # Adjust for the removed list element (the best candidate)
            index += 1
        maximums[hemisphere_index].append(index)

    for hemisphere_index, hemisphere in enumerate(maximums):
        for maximum in hemisphere:
            gs, visibility, _ = hemispheres[hemisphere_index][maximum]
            hemispheres[hemisphere_index][maximum] = (gs, visibility, 'g')
            print(len(visibility))

    # Flatten hemispheres since there's no further need to divide the search
    hemispheres = [item for sublist in hemispheres for item in sublist]
    for gs, visibility, c in hemispheres:
        from basis_converters.from_radians import degrees
        if c == 'g':
            lat, long = map(degrees, gs.lat_long)
            print(lat, long, len(visibility))

    return hemispheres

    save(['filtered_grid_search', increment, data], hemispheres)
Exemplo n.º 8
0
def load_acc(exp, cs=False):
    return load(
        join(result_path, "{}{}".format(exp, "_cs" if cs else ""),
             "accuracy.pt"))
Exemplo n.º 9
0
def load_ms(exp, cs=False):
    return load(
        join(result_path, "{}{}".format(exp, "_cs" if cs else ""),
             "model_size.pt"))
Exemplo n.º 10
0
def load_time(exp, cs=False):
    return np.cumsum(
        load(
            join(result_path, "{}{}".format(exp, "_cs" if cs else ""),
                 "est_time.pt")))[::config.EVAL_DISP_INTERVAL]