コード例 #1
0
ファイル: test_cases.py プロジェクト: amirhpd/mvnas
def test_search_space():
    search_space = SearchSpace(target_classes=2)
    vocab = search_space.mapping
    vocab_decoded = search_space.decode_sequence(vocab)
    vocab_encoded = search_space.encode_sequence(vocab_decoded)

    sample_sequence = [8, 8, 30]
    input_shape = np.shape(np.zeros(13))
    model = search_space.create_architecture(sample_sequence, input_shape)
    keras.utils.plot_model(model, to_file="model.png", show_shapes=True)
    assert len(vocab) == 30
    assert len(vocab_decoded) == 30
    assert len(vocab_encoded) == 30
コード例 #2
0
ファイル: tests.py プロジェクト: amirhpd/emnas
def test_trainer():
    search_space = SearchSpace(model_output_shape=2)
    tokens = search_space.generate_token()
    # controller = Controller(tokens=tokens)
    trainer = Trainer()

    # samples = controller.generate_sequence()
    samples = [[65, 146, 143, 201, 281, 382]]
    architectures = search_space.create_models(samples=samples,
                                               model_input_shape=(128, 128, 3))
    epoch_performance = trainer.train_models(samples=samples,
                                             architectures=architectures)
    assert len(epoch_performance) != 0
コード例 #3
0
ファイル: tests.py プロジェクト: amirhpd/emnas
def test_search_space():
    search_space = SearchSpace(model_output_shape=2)
    token = search_space.generate_token()

    dense_tokens = [x for x, y in token.items()
                    if "Dense" in y]  # dense layers start from 865
    sample_sequence = [52, 146, 31, 119, 138, 244]
    translated_sequence = search_space.translate_sequence(sample_sequence)
    assert len(translated_sequence) == 4

    model = search_space.create_model(sequence=sample_sequence,
                                      model_input_shape=(128, 128, 3))
    keras.utils.plot_model(model, to_file="model.png", show_shapes=True)
    print(model.summary())
    assert len(token) == 890
コード例 #4
0
ファイル: tests.py プロジェクト: amirhpd/emnas
def test_controller_rnn_trainer():
    search_space = SearchSpace(model_output_shape=2)
    tokens = search_space.generate_token()
    controller = Controller(tokens=tokens)
    # samples = controller.generate_sequence()
    manual_epoch_performance = {
        (320, 96, 338, 84, 176, 382): (0.968, 0),  # (acc, lat)
        (22, 47, 225, 315, 223, 382): (0.87, 0),
        (74, 204, 73, 236, 309, 382): (0.74, 0),
        (110, 60, 191, 270, 199, 382): (0.51, 0)
    }

    loss_avg = controller.train_controller_rnn(
        epoch_performance=manual_epoch_performance)
    print(loss_avg)
コード例 #5
0
ファイル: tests.py プロジェクト: amirhpd/emnas
def test_controller_generate_sequence_naive():
    search_space = SearchSpace(model_output_shape=2)
    tokens = search_space.generate_token()
    controller = Controller(tokens=tokens)

    # samples = controller.generate_sequence_naive(mode="b")
    # for sequence in samples:
    #     sequence_ = sequence
    #     print(sequence_)

    # sequences_random = controller.generate_sequence_naive(mode="r")

    for i in range(20):
        sequences_random = controller.generate_sequence_naive(mode="r_var_len")
        print(sequences_random)
    print("Done.")
コード例 #6
0
    def optimize(self, run_f, params):
        search_tree = SearchSpace(params)

        lb = search_tree.get_lb()
        ub = search_tree.get_ub()
        f = param_decorator(run_f, search_tree)

        gs = RandomSearch(self.num_runs, lb, ub, self.sobol)

        start = timeit.default_timer()
        best_params, score = gs.optimize(f)
        end = timeit.default_timer() - start

        best_params = search_tree.transform(best_params)
        Result = namedtuple('Result', ['params', 'score', 'time'])

        return Result(best_params, score, end)
コード例 #7
0
    def optimize(self, run_f, params, parallel=False):

        search_tree = SearchSpace(params)

        lb = search_tree.get_lb()
        ub = search_tree.get_ub()
        f = Evaluator(run_f, search_tree)

        algorithm = self.algorithm(lb, ub, parallel, *self.args, **self.kwargs)

        start = timeit.default_timer()
        best_params, score = algorithm.run(f)
        end = timeit.default_timer() - start

        best_params = search_tree.transform(best_params)
        # Result = namedtuple('Result', ['params', 'score', 'time'])

        return Result(best_params, score, end)
コード例 #8
0
    def optimize(self, run_f, params):
        search_tree = SearchSpace(params)

        lb = search_tree.get_lb()
        ub = search_tree.get_ub()
        f = param_decorator(run_f, search_tree)

        pso = PSO(self.num_generations, self.num_particles, lb, ub, self.phi1,
                  self.phi2)

        start = timeit.default_timer()
        best_params, score = pso.optimize(f)
        end = timeit.default_timer() - start

        best_params = search_tree.transform(best_params)
        Result = namedtuple('Result', ['params', 'score', 'time'])

        return Result(best_params, score, end)
コード例 #9
0
def main():
  if not torch.cuda.is_available():
    logging.info('no gpu device available')
    sys.exit(1)
  
  logging.info("args = %s", args)

  torch.backends.cudnn.benchmark = True
  torch.backends.cudnn.enabled=True

  model = SearchSpace()
  model.cuda()

  optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
  scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), eta_min=args.learning_rate_min)

  architect = Architect(model, args)

  train_samples = Rain800(args.data+'training/', args.steps*args.batch_size, args.patch_size)
  train_queue = torch.utils.data.DataLoader(train_samples, batch_size=args.batch_size, pin_memory=True)
  val_samples = Rain800(args.data+'test_syn/', 30*args.batch_size, args.patch_size)
  valid_queue = torch.utils.data.DataLoader(val_samples, batch_size=args.batch_size, pin_memory=True)

  best_psnr = 0
  best_psnr_epoch = 0
  best_ssim = 0
  best_ssim_epoch = 0
  best_loss = float("inf") 
  best_loss_epoch = 0
  for epoch in range(args.epochs):
    lr = scheduler.get_lr()[0]
    logging.info('epoch %d/%d lr %e', epoch+1, args.epochs, lr)

    # training
    train(epoch, train_queue, valid_queue, model, architect, optimizer, lr)
    # validation
    psnr, ssim, loss = infer(valid_queue, model)
    
    if psnr > best_psnr and not math.isinf(psnr):
      utils.save(model, os.path.join(args.save, 'best_psnr_weights.pt'))
      best_psnr_epoch = epoch+1
      best_psnr = psnr
    if ssim > best_ssim:
      utils.save(model, os.path.join(args.save, 'best_ssim_weights.pt'))
      best_ssim_epoch = epoch+1
      best_ssim = ssim
    if loss < best_loss:
      utils.save(model, os.path.join(args.save, 'best_loss_weights.pt'))
      best_loss_epoch = epoch+1
      best_loss = loss

    scheduler.step()
    logging.info('psnr:%6f ssim:%6f loss:%6f -- best_psnr:%6f best_ssim:%6f best_loss:%6f', psnr, ssim, loss, best_psnr, best_ssim, best_loss)
    logging.info('arch:%s', torch.argmax(model.arch_parameters()[0], dim=1))
    
  logging.info('BEST_LOSS(epoch):%6f(%d), BEST_PSNR(epoch):%6f(%d), BEST_SSIM(epoch):%6f(%d)', best_loss, best_loss_epoch, best_psnr, best_psnr_epoch, best_ssim, best_ssim_epoch)
  utils.save(model, os.path.join(args.save, 'last_weights.pt'))
コード例 #10
0
 def __init__(self, tokens):
     self.max_no_of_layers = config.controller["max_no_of_layers"]
     self.agent_lr = config.controller["agent_lr"]
     self.min_reward = config.controller["min_reward"]
     self.min_plays = config.controller["min_plays"]
     self.max_plays = config.controller["max_plays"]
     self.alpha = config.controller["alpha"]
     self.gamma = config.controller["gamma"]
     self.model_input_shape = config.emnas["model_input_shape"]
     self.valid_sequence_timeout = config.controller[
         "valid_sequence_timeout"]
     self.tokens = tokens
     self.len_search_space = len(tokens) + 1
     self.end_token = list(tokens.keys())[-1]
     self.model = self.rl_agent()
     self.states = []
     self.gradients = []
     self.rewards = []
     self.probs = []
     if config.search_space["mode"] == "MobileNets":
         self.search_space = SearchSpaceMn(
             config.emnas["model_output_shape"])
     else:
         self.search_space = SearchSpace(config.emnas["model_output_shape"])
コード例 #11
0
    def initialize(self, width, height, robot_pos, goal_pos, map_data):
        """ Initialize the Moving Target D* Lite algorithm """
        # Search variables
        self.km = 0
        self.open_list = OpenList()
        self.deleted_list = []
        self.path = []

        # Create a search space
        self.search_space = SearchSpace(width, height)
        self.search_space.load_search_space_from_map(map_data)

        # Get the node the robot is on (row, col is the pos argument)
        self.start_node = self.search_space.get_node(robot_pos)

        # Get the node the goal is on (row, col is the pos argument)
        self.goal_node = self.search_space.get_node(goal_pos)

        # Set the robot's node's rhs = 0
        self.start_node.set_rhs(0)

        # Add the robot's node to the open list
        self.start_node.set_key(self.calculate_key(self.start_node))
        self.open_list.insert(self.start_node)
コード例 #12
0
ファイル: main.py プロジェクト: davison0487/Motion-Planning
def rrt_star_test(index=0):
    t0 = tic()
    mapfile = [
        './maps/single_cube.txt', './maps/maze.txt', './maps/window.txt',
        './maps/tower.txt', './maps/flappy_bird.txt', './maps/room.txt',
        './maps/monza.txt'
    ]
    name = [
        'single_cube', 'maze', 'window', 'tower', 'flappy_bird', 'room',
        'monza'
    ]
    start_list = [(2.3, 2.3, 1.3), (0.0, 0.0, 1.0), (0.2, -4.9, 0.2),
                  (2.5, 4.0, 0.5), (0.5, 2.5, 5.5), (1.0, 5.0, 1.5),
                  (0.5, 1.0, 4.9)]
    goal_list = [(7.0, 7.0, 5.5), (12.0, 12.0, 5.0), (6.0, 18.0, 3.0),
                 (4.0, 2.5, 19.5), (19.0, 2.5, 5.5), (9.0, 7.0, 1.5),
                 (3.8, 1.0, 0.1)]

    print('Running RRT* ' + name[index] + ' test...\n')

    start = start_list[index]
    goal = goal_list[index]
    boundary, blocks = load_map(mapfile[index])
    dimension = np.array([(boundary[0, 0], boundary[0, 3]),
                          (boundary[0, 1], boundary[0, 4]),
                          (boundary[0, 2], boundary[0, 5])])
    new_blocks = np.zeros((1, 6))
    for block in blocks:
        new_blocks = np.concatenate((new_blocks, [block[:6]]))
    new_blocks = np.delete(new_blocks, 0, 0)
    Q = np.array([(0.5, 4)])
    r = 0.05
    max_samples = 102400
    prc = 0.1
    rewire = 10

    X = SearchSpace(dimension, new_blocks)

    rrt = RRTStar(X, Q, start, goal, max_samples, r, prc, rewire)
    path = rrt.rrt_star()
    plot_rrt(name[index], X, rrt, path, new_blocks, start, goal)
    toc(t0, name[index] + ' RRT*')

    pathlength = np.sum(
        np.sqrt(np.sum(np.diff(np.array(path), axis=0)**2, axis=1)))
    print('Path length is:', pathlength)
コード例 #13
0
def generate_models():
    if config.search_space["mode"] == "MobileNets":
        search_space = SearchSpaceMn(model_output_shape=2)
    else:
        search_space = SearchSpace(model_output_shape=2)

    if os.listdir(latency_dataset):
        raise ValueError("Dataset folder is not empty.")
    tokens = search_space.generate_token()
    controller = Controller(tokens=tokens)
    sequences = []
    df = pd.DataFrame(columns=["model", "params [K]", "sipeed_latency [ms]", "kmodel_memory [KB]", "cpu_latency [ms]",
                               "accuracy", "token_sequence", "length", "model_info"])

    i = 0
    while i < no_of_examples:
        sequence = controller.generate_sequence_naive(mode="r_var_len")
        if (sequence in sequences) or (not search_space.check_sequence(sequence)):
            continue
        try:
            architecture = search_space.create_model(sequence=sequence, model_input_shape=model_input_shape)
        except Exception as e:
            # print(sequence)
            # print(e)
            continue
        sequences.append(sequence)
        i += 1
        i_str = format(i, f"0{len(str(no_of_examples))}d")  # add 0s
        file_name = f"model_{i_str}"
        architecture.save(f"{latency_dataset}/{file_name}.h5")
        model_params = round(architecture.count_params()/1000, 4)
        model_info = search_space.translate_sequence(sequence)
        model_info_json = json.dumps(dict(zip(range(len(model_info)), model_info)))
        df = df.append({"model": file_name, "params [K]": model_params,
                        "token_sequence": sequence, "length": len(sequence),
                        "model_info": model_info_json}, ignore_index=True)
        print(file_name, ", length:", len(sequence))

    df.to_csv(f"{latency_dataset}/table.csv", index=False)
コード例 #14
0
class mt_dstar_lite:
    def __init__(self):
        """ Initialize the Moving Target D* Lite algorithm """
        # Moving Target
        self.km = 0
        self.search_space = None

        # Lists
        self.open_list = None
        self.deleted_list = None
        self.path = None

        # Nodes
        self.start_node = None
        self.goal_node = None
        self.old_start = None
        self.old_goal = None

    def initialize(self, width, height, robot_pos, goal_pos, map_data):
        """ Initialize the Moving Target D* Lite algorithm """
        # Search variables
        self.km = 0
        self.open_list = OpenList()
        self.deleted_list = []
        self.path = []

        # Create a search space
        self.search_space = SearchSpace(width, height)
        self.search_space.load_search_space_from_map(map_data)

        # Get the node the robot is on (row, col is the pos argument)
        self.start_node = self.search_space.get_node(robot_pos)

        # Get the node the goal is on (row, col is the pos argument)
        self.goal_node = self.search_space.get_node(goal_pos)

        # Set the robot's node's rhs = 0
        self.start_node.set_rhs(0)

        # Add the robot's node to the open list
        self.start_node.set_key(self.calculate_key(self.start_node))
        self.open_list.insert(self.start_node)

    def calculate_key(self, node):
        """ Calculates a node key based on its G and RHS values, as well as the distance to the goal """
        key_1 = min(
            min(node.G, node.rhs) + self.heuristic(node, self.goal_node) +
            self.km, Node.INFINITY)
        key_2 = min(min(node.G, node.rhs), Node.INFINITY)
        return (key_1, key_2)

    def heuristic(self, node1, node2):
        """ Compute the distance from one node to another """
        return sqrt((node2.row - node1.row)**2 + (node2.col - node1.col)**2)

    def cost(self, node1, node2):
        """ Computer actual cost incurred by moving from one node to another """
        if node1.cost < Node.INFINITY and node2.cost < Node.INFINITY:
            return self.heuristic(node1, node2) + node1.cost + node2.cost
        else:
            return Node.INFINITY

    def update_state(self, node):
        """ Updates a node's status based on the progression of the search """
        # If the node is on the open list and is inconsistent, update it
        if node.G != node.rhs and self.open_list.contains(node):
            node.set_key(self.calculate_key(node))
            self.open_list.update(node)

        # If the node is not on the open list, but is inconsistent, then add it to the open list
        elif node.G != node.rhs and not self.open_list.contains(node):
            node.set_key(self.calculate_key(node))
            self.open_list.insert(node)

        # If the node is on the open list and is consistent, close it by removing it from the open list
        elif node.G == node.rhs and self.open_list.contains(node):
            self.open_list.delete(node)

    def compute_cost_minimal_path(self):
        """ Finds the best path from the start state to the goal state """
        while (self.open_list.top_key() < self.calculate_key(
                self.goal_node)) or (self.goal_node.rhs > self.goal_node.G):
            # Get the highest priority node from the open list
            u_node = self.open_list.top()

            # Compute keys for this node
            old_key = u_node.key
            new_key = self.calculate_key(u_node)

            # If the new key is greater than the old key, update the open list
            if old_key < new_key:
                u_node.set_key(new_key)
                self.open_list.update(u_node)

            # Otherwise, if the node's G value is higher than its RHS value, close the node and update its successors
            elif u_node.G > u_node.rhs:
                # Make the node consistent and remove it from the open list
                u_node.set_g(u_node.rhs)
                self.open_list.delete(u_node)

                # Update successors
                for s_node in self.search_space.get_successors(u_node):
                    if s_node != self.start_node and s_node.rhs > u_node.G + self.cost(
                            u_node, s_node):
                        s_node.set_rhs(u_node.G + self.cost(u_node, s_node))
                        s_node.set_par(u_node)
                        self.update_state(s_node)

            # Otherwise make the node inconsistent and calculate the parent nodes of its successors
            else:
                # Set G value to infinity
                u_node.set_g(Node.INFINITY)

                # Update successors
                succ = self.search_space.get_successors(u_node)
                succ.append(u_node)
                for s_node in succ:
                    if s_node != self.start_node and s_node.par == u_node:
                        # Set the RHS to be the minimum one-step lookahead
                        s_rhs = min([
                            node.G + self.cost(node, s_node) for node in
                            self.search_space.get_successors(s_node)
                        ])
                        s_node.set_rhs(s_rhs)

                        if s_node.rhs >= Node.INFINITY:
                            s_node.set_rhs(Node.INFINITY)
                            s_node.set_par(None)
                        else:
                            # Set the parent node pointer based on the parent node that has the minimum path cost to goal from this node
                            par_nodes = [
                                node for node in
                                self.search_space.get_successors(s_node)
                            ]
                            s_par = [
                                node.G + self.cost(node, s_node)
                                for node in par_nodes
                            ]
                            s_par_node_idx = s_par.index(min(s_par))
                            s_par_node = par_nodes[s_par_node_idx]
                            s_node.set_par(s_par_node)
                    self.update_state(s_node)

    def optimized_deletion(self):
        """ """
        # Initialize deletion
        self.deleted_list = []
        self.start_node.set_par(None)

        # Go through the nodes in the search tree that aren't on the path from the current start node to the goal node
        for s_node in self.search_space.get_deleteable_nodes(self.start_node):
            # Reset the node's data
            s_node.set_par(None)
            s_node.set_rhs(Node.INFINITY)
            s_node.set_g(Node.INFINITY)

            # Get this node off the open list and add it to the deleted list
            if self.open_list.contains(s_node):
                self.open_list.delete(s_node)

            # Add this node to the deleted list
            self.deleted_list.append(s_node)

        # Go through the deleted list and update its costs
        for node in self.deleted_list:
            # Get the best cost from the successors
            for succ_node in self.search_space.get_successors(node):
                if node.rhs > succ_node.G + self.cost(succ_node, node):
                    node.set_rhs(succ_node.G + self.cost(succ_node, node))
                    node.set_par(succ_node)

            # Add the node to the open list if it has a finite cost
            if node.rhs < Node.INFINITY:
                node.set_key(self.calculate_key(node))
                self.open_list.insert(node)

    def get_best_path(self):
        """ Get the best path using the parent pointers """
        # Start at the start and keep track of the footsteps
        node = self.goal_node
        path = [(node.row, node.col)]

        # Go until the goal is found
        while node != self.start_node:
            # Add the next node to the path
            next_node = node.par
            path.append((next_node.row, next_node.col))
            node = next_node

        # The best path should be found by traversing the pointers
        return path

    # NOTE: we assume that the hunter never catches the target. We also assume that the edge costs change
    # during every configuration space update for now - the SLAM node will (in the future) provide new maps only when the
    # robot has moved. Since the target cannot move off the path unless the map updates, and the map only updates to show new
    # edge costs, we don't need to check for the conditions in the while loop that waits for the hunter to follow the target,
    # as described in the original algorithm

    def plan(self):
        """ Plan the robot's initial path """
        # Set the last robot positions
        self.old_start = self.start_node
        self.old_goal = self.goal_node

        # Find a low cost path from the start to the goal
        self.compute_cost_minimal_path()

        # If the goal has a RHS of infinity, then there is no path
        if self.goal_node.rhs >= Node.INFINITY:
            #self.search_space.print_search_space_rhs()
            print("Get rekt " + str(self.goal_node.rhs))
            return None

        # Figure out path by following the parent pointers (par) to the goal node
        self.path = self.get_best_path()
        return self.path

    # NOTE: Per the note above, we must unwrap the main while loop since there is a discontinuity caused by the "hunter follows target"
    # while loop. Therefore, plan() should only be called by an external class the first time we want to plan,
    # and all subsequent external planning requests should be to the following replan() function

    def replan(self, robot_pos, goal_pos, new_map):
        # Get the node the robot is on (row, col is the pos argument)
        self.start_node = self.search_space.get_node(robot_pos)

        # Get the node the goal is on (row, col is the pos argument)
        self.goal_node = self.search_space.get_node(goal_pos)

        # Update the km search parameter
        self.km = self.km + self.heuristic(self.goal_node, self.old_goal)

        # If the robot has moved, delete the nodes that need updating and revalue them
        if self.old_start != self.start_node:
            # Perform optimized deletion to update the out-of-date parts of the search tree
            self.optimized_deletion()

            # Shift the map anyways because our environment grows
            # TODO

        # Update all changed edge costs from the map update
        for c_node in self.search_space.update_map(new_map):
            for succ in self.search_space.get_successors(c_node):
                # If the old cost was bigger than the new cost (i.e., the current cost is low now)
                if c_node.cost < Node.INFINITY:
                    if succ != self.start_node and succ.rhs > c_node.G + self.cost(
                            c_node, succ):
                        succ.set_par(c_node)
                        succ.set_rhs(c_node.G + self.cost(c_node, succ))
                # Otherwise the cost has increased and we need to reroute the successors that map to this node
                else:
                    # Only update successors that map to the current node (and that aren't the start node)
                    if succ != self.start_node and succ.par is c_node:
                        # Set the RHS to be the minimum one-step lookahead
                        succ_rhs = min([
                            node.G + self.cost(node, succ)
                            for node in self.search_space.get_successors(succ)
                        ])
                        succ.set_rhs(succ_rhs)

                        if succ.rhs >= Node.INFINITY:
                            succ.set_rhs(Node.INFINITY)
                            succ.set_par(None)
                        else:
                            # Set the parent node pointer based on the parent node that has the minimum path cost to goal from this node
                            par_nodes = [
                                node for node in
                                self.search_space.get_successors(succ)
                            ]
                            s_par = [
                                node.G + self.cost(node, succ)
                                for node in par_nodes
                            ]
                            s_par_node_idx = s_par.index(min(s_par))
                            s_par_node = par_nodes[s_par_node_idx]
                            succ.set_par(s_par_node)

                        # update the state of the successor
                        self.update_state(succ)

        # Plan the path after adjustments
        return self.plan()

    def get_search_space_map(self):
        return self.search_space.get_search_space_rhs_map()
コード例 #15
0
from optimization_problem import OptimizationProblem
from bayes_optimizer import BayesOptimizer
from search_space import SearchSpace, Parameter
import random
from sklearn.gaussian_process import GaussianProcessRegressor
from benchmark import branin
from numpy import average
branin_problem = OptimizationProblem(
    SearchSpace(
        [
            Parameter("", random.uniform, a=-5, b=10),
            Parameter("", random.uniform, a=0, b=15),
        ]
    ),
    function=branin,
    optimal_value=-0.397887,
)

branin_optimizer = BayesOptimizer(
    branin_problem, GaussianProcessRegressor(), max_iterations=1000, epsilon=0.01
)


def objective(array, *args, **kwargs):
    return average([branin_optimizer.optimize(start_sample_size=array[0], sample_size=array[1]) for i in range(20)]) * -1


bayes_optimization_problem = OptimizationProblem(
    SearchSpace(
        [
            # Parameter("target", random.choice, [branin_optimizer]),
コード例 #16
0
 def to_resource_graph(self, space: SearchSpace, *args, **kwargs):
     return space.to_resource_graph(self.arch, self.input_shape,
                                    self.num_classes, *args, **kwargs)
コード例 #17
0
 def to_keras_model(self, space: SearchSpace, *args, **kwargs):
     return space.to_keras_model(self.arch, self.input_shape,
                                 self.num_classes, *args, **kwargs)
コード例 #18
0
    def __init__(self, wrapped_arch: Architecture, space: SearchSpace,
                 input_shape, num_classes):
        self.arch = wrapped_arch
        self.input_shape = input_shape
        self.num_classes = num_classes
        self.val_error = None  # Filled out by the experiment caller
        self.test_error = None

        rg = space.to_resource_graph(self.arch, input_shape, num_classes)
        self.resource_features = self.compute_resource_features(rg)

        # Here we convert the architecture into a representation compatible with Dragonfly's kernel,
        # which is:
        # - `layer_labels`: a string describing the kind of a layer ("ip" and "op" for input and
        #    output layers, or e.g. "DWConv2D-3" for a 3x3 DW convolution);
        # - `edge_list`: a list of tuples representing connections between layers (indices are
        #    determined by the position of the latter in the `layer_labels` list);
        # - `units`: number of units in a layer, which Dragonfly uses to compute "layer mass".
        # The conversion can be done for any `Architecture` from its resource graph representation

        layer_labels = []
        edge_list = []
        units = []
        mass = [
        ]  # -1 marks non-processing layers, mass will be filled out later

        layers = [t for t in rg.tensors.values()
                  if not t.is_constant]  # input and layer output tensors
        tensor_name_to_id = {t.name: i for i, t in enumerate(layers)}

        # Add input tensors as "input layers"
        for tensor in rg.inputs:
            layer_labels.append("ip")
            units.append(tensor.shape[-1])
            mass.append(-1)

        # Add each operator as its own layer and connect its input layers to itself
        for op in rg.operators.values():
            layer_labels.append(op_to_label(op))
            units.append(op.output.shape[-1])
            inputs = [i for i in op.inputs if i.name in tensor_name_to_id]
            edge_list.extend(
                (tensor_name_to_id[i.name], tensor_name_to_id[op.output.name])
                for i in inputs)
            mass.append(macs(op))

        # Invent a dummy layer for each output tensor
        for tensor in rg.outputs:
            i = len(layer_labels)
            layer_labels.append("op")
            units.append(tensor.shape[-1])
            edge_list.append((tensor_name_to_id[tensor.name], i))
            mass.append(-1)

        num_layers = len(layer_labels)
        conn_mat = dok_matrix((num_layers, num_layers))
        for (a, b) in edge_list:
            conn_mat[a, b] = 1

        non_proc_layer_mass = max(0.1 * sum(f for f in mass if f != -1),
                                  100)  # according to Dragonfly
        self.layer_masses = np.array([(non_proc_layer_mass if f == -1 else f)
                                      for f in mass],
                                     dtype=np.float) / 1000

        super().__init__("unas-net",
                         layer_labels,
                         conn_mat,
                         num_units_in_each_layer=units,
                         all_layer_label_classes=all_possible_labels(),
                         layer_label_similarities=None)
コード例 #19
0
class Controller(object):
    def __init__(self, tokens):
        self.max_no_of_layers = config.controller["max_no_of_layers"]
        self.agent_lr = config.controller["agent_lr"]
        self.min_reward = config.controller["min_reward"]
        self.min_plays = config.controller["min_plays"]
        self.max_plays = config.controller["max_plays"]
        self.alpha = config.controller["alpha"]
        self.gamma = config.controller["gamma"]
        self.model_input_shape = config.emnas["model_input_shape"]
        self.valid_sequence_timeout = config.controller[
            "valid_sequence_timeout"]
        self.tokens = tokens
        self.len_search_space = len(tokens) + 1
        self.end_token = list(tokens.keys())[-1]
        self.model = self.rl_agent()
        self.states = []
        self.gradients = []
        self.rewards = []
        self.probs = []
        if config.search_space["mode"] == "MobileNets":
            self.search_space = SearchSpaceMn(
                config.emnas["model_output_shape"])
        else:
            self.search_space = SearchSpace(config.emnas["model_output_shape"])

    def rl_agent(self):
        model_output_shape = (self.max_no_of_layers - 1, self.len_search_space)
        model = keras.models.Sequential()
        model.add(
            keras.layers.Dense(512,
                               input_shape=(self.max_no_of_layers - 1, ),
                               activation="relu"))
        model.add(keras.layers.Dense(256, activation="relu"))
        model.add(keras.layers.Dense(128, activation="relu"))
        model.add(keras.layers.Dense(64, activation="relu"))
        model.add(keras.layers.Dense(32, activation="relu"))
        model.add(keras.layers.Dense(16, activation="relu"))
        model.add(keras.layers.Dense(16, activation="relu"))
        model.add(keras.layers.Dense(32, activation="relu"))
        model.add(keras.layers.Dense(64, activation="relu"))
        model.add(keras.layers.Dense(128, activation="relu"))
        model.add(keras.layers.Dense(256, activation="relu"))
        model.add(keras.layers.Dense(512, activation="relu"))
        model.add(
            keras.layers.Dense(model_output_shape[0] * model_output_shape[1],
                               activation="softmax"))
        model.add(keras.layers.Reshape(model_output_shape))

        model.compile(loss="categorical_crossentropy",
                      optimizer=keras.optimizers.Adam(lr=self.agent_lr))
        return model

    def get_all_action(self, state: np.ndarray) -> (List, np.ndarray, bool):
        true_sequence = False
        actions = []
        distributions = self.model.predict(state)
        for distribution in distributions[0]:
            distribution /= np.sum(distribution)
            action = np.random.choice(self.len_search_space, 1,
                                      p=distribution)[0]
            action = 1 if action == 0 else action
            actions.append(int(action))
            if action == self.end_token:
                break

        sequence = actions + [self.end_token
                              ] if self.end_token not in actions else actions
        valid_sequence = self.search_space.check_sequence(sequence)
        if valid_sequence:
            valid_model = self.search_space.create_models(
                samples=[sequence], model_input_shape=self.model_input_shape)
            true_sequence = True if (valid_model[0] is not None
                                     and valid_sequence is True) else False

        if len(actions) < self.max_no_of_layers - 1:
            for _ in range((self.max_no_of_layers - 1) - len(actions)):
                actions.append(0)

        return actions, distributions, true_sequence

    def get_valid_action(self, state: np.ndarray) -> (List, np.ndarray, int):
        true_sequence = False
        counter = 0
        while not true_sequence:
            counter += 1
            actions = []
            distributions = self.model.predict(state)
            for distribution in distributions[0]:
                distribution /= np.sum(distribution)
                action = np.random.choice(self.len_search_space,
                                          1,
                                          p=distribution)[0]
                action = 1 if action == 0 else action
                actions.append(int(action))
                if action == self.end_token:
                    break

            sequence = actions + [
                self.end_token
            ] if self.end_token not in actions else actions
            valid_sequence = self.search_space.check_sequence(sequence)
            if valid_sequence:
                valid_model = self.search_space.create_models(
                    samples=[sequence],
                    model_input_shape=self.model_input_shape)
                true_sequence = True if (valid_model[0] is not None
                                         and valid_sequence is True) else False
            if counter > self.valid_sequence_timeout:
                return None, None, None  # timeout

        if len(actions) < self.max_no_of_layers - 1:
            for _ in range((self.max_no_of_layers - 1) - len(actions)):
                actions.append(0)

        return actions, distributions, counter - 1

    def remember(self, state, actions, prob, reward):
        model_output_shape = (self.max_no_of_layers - 1, self.len_search_space)
        encoded_action = np.zeros(model_output_shape, np.float32)
        for i, action in enumerate(actions):
            encoded_action[i][action] = 1

        self.gradients.append(encoded_action - prob)
        self.states.append(state)
        self.rewards.append(reward)
        self.probs.append(prob)

    def clear_memory(self):
        self.states.clear()
        self.gradients.clear()
        self.rewards.clear()
        self.probs.clear()

    def get_discounted_rewards(self, rewards_in):
        discounted_rewards = []
        cumulative_total_return = 0

        for reward in rewards_in[::-1]:
            cumulative_total_return = (cumulative_total_return *
                                       self.gamma) + reward
            discounted_rewards.insert(0, cumulative_total_return)

        mean_rewards = np.mean(discounted_rewards)
        std_rewards = np.std(discounted_rewards)
        norm_discounted_rewards = (discounted_rewards -
                                   mean_rewards) / (std_rewards + 1e-7)

        return norm_discounted_rewards

    def update_policy(self):
        states_ = np.vstack(self.states)

        gradients_ = np.vstack(self.gradients)
        rewards_ = np.vstack(self.rewards)
        discounted_rewards = self.get_discounted_rewards(rewards_)
        discounted_rewards = discounted_rewards.reshape(
            discounted_rewards.shape[0], discounted_rewards.shape[1],
            discounted_rewards.shape[1])
        gradients_ *= discounted_rewards
        gradients_ = self.alpha * gradients_ + np.vstack(self.probs)

        history = self.model.train_on_batch(states_, gradients_)
        self.clear_memory()
        return history

    def generate_sequence_naive(self, mode: str):
        token_keys = list(self.tokens.keys())
        if mode == "b":  # Brute-force
            space = itertools.permutations(token_keys,
                                           self.max_no_of_layers - 1)
            return space
        if mode == "r":  # Random
            sequence = []
            sequence_length = np.random.randint(3, self.max_no_of_layers)
            for i in range(sequence_length):
                token = np.random.choice(token_keys)
                sequence.append(token)
            return sequence
        if mode == "r_var_len":
            sequence = []
            length = np.random.randint(12 - 1, self.max_no_of_layers, 1)[0]
            for i in range(length):
                token = np.random.choice(token_keys)
                sequence.append(token)
            sequence.append(token_keys[-1])
            return sequence
コード例 #20
0
ファイル: rrt_7d.py プロジェクト: cjohns94/RoboticsProject
# Obstacles = np.array([
# 	(20, 20, 20, 20, 20, 20, 20, 40, 40, 40, 40, 40, 40, 40),
# 	(10, 10, 10, 10, 10, 10, 10, 18, 18, 18, 18, 18, 18, 18)
# 	])

x_init = (0, 0, 0, 0, 0, 0, 0)  # starting location
x_goal = (0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5)  # goal location

Q = np.array([(.05, .05, .05, .05, .05, .05, .05)])  # length of tree edges
r = .01  # length of smallest edge to check for intersection with obstacles
max_samples = 100  # max number of samples to take before timing out
prc = 0.1  # probability of checking for a connection to goal

# create search space
#X = SearchSpace(X_dimensions, Obstacles)
X = SearchSpace(X_dimensions)  #can set obstacles to none.

# create rrt_search
rrt = RRT(X, Q, x_init, x_goal, max_samples, r, prc)
print('rrt built. Searching for path...')
path = rrt.rrt_search()

#TODO: figure out plotting with plotly
# plot
#plot = Plot("rrt_2d")
#plot.plot_tree(X, rrt.trees)
#if path is not None:
#    plot.plot_path(X, path)
#plot.plot_obstacles(X, Obstacles)
#plot.plot_start(X, x_init)
#plot.plot_goal(X, x_goal)
コード例 #21
0
ファイル: tests.py プロジェクト: amirhpd/emnas
def test_controller_sample_generator():
    search_space = SearchSpace(model_output_shape=2)
    tokens = search_space.generate_token()
    controller = Controller(tokens=tokens)
    samples = controller.generate_sequence()
    print(samples)
コード例 #22
0
ファイル: emnas.py プロジェクト: amirhpd/emnas
model_input_shape = config.emnas["model_input_shape"]
search_mode = config.emnas["search_mode"]
naive_threshold = config.emnas["naive_threshold"]
naive_timeout = config.emnas["naive_timeout"]
no_of_episodes = config.emnas["no_of_episodes"]
log_path = config.emnas["log_path"]
max_no_of_layers = config.controller["max_no_of_layers"]
dynamic_min_reward = config.controller["dynamic_min_reward"]
variance_threshold = config.controller["variance_threshold"]
valid_actions = config.controller["valid_actions"]
valid_sequence_timeout = config.controller["valid_sequence_timeout"]

if config.search_space["mode"] == "MobileNets":
    search_space = SearchSpaceMn(model_output_shape=model_output_shape)
else:
    search_space = SearchSpace(model_output_shape=model_output_shape)
tokens = search_space.generate_token()
controller = Controller(tokens=tokens)
trainer = Trainer(tokens)


def _plot(history, path):
    img_size = (24, 5)

    fig = plt.figure(figsize=img_size)
    plt.plot(np.arange(0, len(history["loss"])), history["loss"])
    plt.title("Agent loss")
    plt.xlabel("Episode")
    plt.ylabel("Loss")
    plt.grid()
    plt.savefig(path + "/fig_1.png", bbox_inches="tight", pad_inches=0.2)
コード例 #23
0
ファイル: rrt_nd.py プロジェクト: cjohns94/RoboticsProject
# obstacles (q1 lower, q2 lower, ..., qn lower, q1 upper, q2 upper,..., qn upper), each obstacle should have 2n coordinates
Obstacles = np.array([(20, 20, 20, 20, 20, 20, 20, 40, 40, 40, 40, 40, 40, 40),
                      (10, 10, 10, 10, 10, 10, 10, 18, 18, 18, 18, 18, 18, 18)
                      ])

x_init = (0, 0, 0, 0, 0, 0, 0)  # starting location
x_goal = (50, 50, 50, 50, 50, 50, 50)  # goal location

Q = np.array([(8, 4)])  # length of tree edges
r = 1  # length of smallest edge to check for intersection with obstacles
max_samples = 1024  # max number of samples to take before timing out
prc = 0.1  # probability of checking for a connection to goal

# create search space
X = SearchSpace(X_dimensions, Obstacles)

# create rrt_search
rrt = RRT(X, Q, x_init, x_goal, max_samples, r, prc)
path = rrt.rrt_search()

#TODO: figure out plotting with plotly
# plot
#plot = Plot("rrt_2d")
#plot.plot_tree(X, rrt.trees)
#if path is not None:
#    plot.plot_path(X, path)
#plot.plot_obstacles(X, Obstacles)
#plot.plot_start(X, x_init)
#plot.plot_goal(X, x_goal)
#plot.draw(auto_open=True)
コード例 #24
0
ファイル: run_trial.py プロジェクト: mesosphere/katib
def main():

    parser = argparse.ArgumentParser(description='TrainingContainer')
    parser.add_argument('--algorithm-settings',
                        type=str,
                        default="",
                        help="algorithm settings")
    parser.add_argument('--search-space',
                        type=str,
                        default="",
                        help="search space for the neural architecture search")
    parser.add_argument('--num-layers',
                        type=str,
                        default="",
                        help="number of layers of the neural network")

    args = parser.parse_args()

    # Get Algorithm Settings
    algorithm_settings = args.algorithm_settings.replace("\'", "\"")
    algorithm_settings = json.loads(algorithm_settings)
    print(">>> Algorithm settings")
    for key, value in algorithm_settings.items():
        if len(key) > 13:
            print("{}\t{}".format(key, value))
        elif len(key) < 5:
            print("{}\t\t\t{}".format(key, value))
        else:
            print("{}\t\t{}".format(key, value))
    print()

    num_epochs = int(algorithm_settings["num_epochs"])

    w_lr = float(algorithm_settings["w_lr"])
    w_lr_min = float(algorithm_settings["w_lr_min"])
    w_momentum = float(algorithm_settings["w_momentum"])
    w_weight_decay = float(algorithm_settings["w_weight_decay"])
    w_grad_clip = float(algorithm_settings["w_grad_clip"])

    alpha_lr = float(algorithm_settings["alpha_lr"])
    alpha_weight_decay = float(algorithm_settings["alpha_weight_decay"])

    batch_size = int(algorithm_settings["batch_size"])
    num_workers = int(algorithm_settings["num_workers"])

    init_channels = int(algorithm_settings["init_channels"])

    print_step = int(algorithm_settings["print_step"])

    num_nodes = int(algorithm_settings["num_nodes"])
    stem_multiplier = int(algorithm_settings["stem_multiplier"])

    # Get Search Space
    search_space = args.search_space.replace("\'", "\"")
    search_space = json.loads(search_space)
    search_space = SearchSpace(search_space)

    # Get Num Layers
    num_layers = int(args.num_layers)
    print("Number of layers {}\n".format(num_layers))

    # Set GPU Device
    # Currently use only first available GPU
    # TODO: Add multi GPU support
    # TODO: Add functionality to select GPU
    all_gpus = list(range(torch.cuda.device_count()))
    if len(all_gpus) > 0:
        device = torch.device("cuda")
        torch.cuda.set_device(all_gpus[0])
        np.random.seed(2)
        torch.manual_seed(2)
        torch.cuda.manual_seed_all(2)
        torch.backends.cudnn.benchmark = True
        print(">>> Use GPU for Training <<<")
        print("Device ID: {}".format(torch.cuda.current_device()))
        print("Device name: {}".format(torch.cuda.get_device_name(0)))
        print("Device availability: {}\n".format(torch.cuda.is_available()))
    else:
        device = torch.device("cpu")
        print(">>> Use CPU for Training <<<")

    # Get dataset with meta information
    # TODO: Add support for more dataset
    input_channels, num_classes, train_data = utils.get_dataset()

    criterion = nn.CrossEntropyLoss().to(device)

    model = NetworkCNN(init_channels, input_channels, num_classes, num_layers,
                       criterion, search_space, num_nodes, stem_multiplier)

    model = model.to(device)

    # Weights optimizer
    w_optim = torch.optim.SGD(model.getWeights(),
                              w_lr,
                              momentum=w_momentum,
                              weight_decay=w_weight_decay)

    # Alphas optimizer
    alpha_optim = torch.optim.Adam(model.getAlphas(),
                                   alpha_lr,
                                   betas=(0.5, 0.999),
                                   weight_decay=alpha_weight_decay)

    # Split data to train/validation
    num_train = len(train_data)
    split = num_train // 2
    indices = list(range(num_train))

    train_sampler = torch.utils.data.sampler.SubsetRandomSampler(
        indices[:split])
    valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(
        indices[split:])

    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=batch_size,
                                               sampler=train_sampler,
                                               num_workers=num_workers,
                                               pin_memory=True)

    valid_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=batch_size,
                                               sampler=valid_sampler,
                                               num_workers=num_workers,
                                               pin_memory=True)

    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(w_optim,
                                                              num_epochs,
                                                              eta_min=w_lr_min)

    architect = Architect(model, w_momentum, w_weight_decay)

    # Start training
    best_top1 = 0.

    for epoch in range(num_epochs):
        lr_scheduler.step()
        lr = lr_scheduler.get_lr()[0]

        model.print_alphas()

        # Training
        print(">>> Training")
        train(train_loader, valid_loader, model, architect, w_optim,
              alpha_optim, lr, epoch, num_epochs, device, w_grad_clip,
              print_step)

        # Validation
        print("\n>>> Validation")
        cur_step = (epoch + 1) * len(train_loader)
        top1 = validate(valid_loader, model, epoch, cur_step, num_epochs,
                        device, print_step)

        # Print genotype
        genotype = model.genotype(search_space)
        print("\nModel genotype = {}".format(genotype))

        # Modify best top1
        if top1 > best_top1:
            best_top1 = top1
            best_genotype = genotype

    print("Final best Prec@1 = {:.4%}".format(best_top1))
    print("\nBest-Genotype={}".format(str(best_genotype).replace(" ", "")))
コード例 #25
0
from optimization_problem import OptimizationProblem
from benchmark import branin
from bayes_optimizer import BayesOptimizer
from search_space import SearchSpace, Parameter
import random
from sklearn.gaussian_process import GaussianProcessRegressor

branin_problem = OptimizationProblem(
    SearchSpace([
        Parameter("", random.uniform, a=-5, b=10),
        Parameter("", random.uniform, a=0, b=15),
    ]),
    function=branin,
    optimal_value=-0.397887,
)

sum = 0

optimizer = BayesOptimizer(branin_problem,
                           GaussianProcessRegressor(),
                           epsilon=0.0001,
                           max_iterations=3000)
for i in range(1):
    sum += optimizer.optimize(
        debug=True,
        start_sample_size=200,
        sample_size=1000,
    )

print("\n\n\n\nAverage iterations is ", sum)