コード例 #1
0
    def maximize_acq(self, model_ids):
        overall_max_acq_value = -1
        father_id = None
        target_graph = None

        # exploration
        for model_id in model_ids:
            model = self.load_model_by_id(model_id)
            graph = Graph(to_stub_model(model))
            graph.clear_operation_history()
            graphs = transform(graph)
            for temp_graph in graphs:
                temp_acq_value = self._acq(temp_graph)
                if temp_acq_value > overall_max_acq_value:
                    overall_max_acq_value = temp_acq_value
                    father_id = model_id
                    target_graph = temp_graph

        # exploitation
        for i in range(constant.ACQ_EXPLOITATION_DEPTH):
            graphs = transform(target_graph)
            for temp_graph in graphs:
                temp_acq_value = self._acq(temp_graph)
                if temp_acq_value > overall_max_acq_value:
                    overall_max_acq_value = temp_acq_value
                    target_graph = temp_graph

        model = self.load_model_by_id(father_id)
        nm_graph = NetworkMorphismGraph(model)
        for args in target_graph.operation_history:
            getattr(nm_graph, args[0])(*list(args[1:]))
        return nm_graph.produce_model(), father_id
コード例 #2
0
ファイル: greedy.py プロジェクト: yeongseon/autokeras
    def generate(self, descriptors, timeout, sync_message):
        """Generate new neighbor architectures from the best model.

        Args:
            descriptors: All the searched neural architectures.
            timeout: An integer. The time limit in seconds.
            sync_message: the Queue for multiprocessing return value.

        Returns:
            out: A list of 2-elements tuple. Each tuple contains
                an instance of Graph, a morphed neural network with weights
                and the father node id in the search tree.
        """
        out = []
        start_time = time.time()
        descriptors = deepcopy(descriptors)

        if isinstance(sync_message, Queue) and sync_message.qsize() != 0:
            return out
        model_id = self.searcher.get_neighbour_best_model_id()
        graph = self.searcher.load_model_by_id(model_id)
        father_id = model_id
        for temp_graph in transform(graph):
            if contain(descriptors, temp_graph.extract_descriptor()):
                continue
            out.append((deepcopy(temp_graph), father_id))
        remaining_time = timeout - (time.time() - start_time)

        if remaining_time < 0:
            raise TimeoutError
        return out
コード例 #3
0
    def search(self, x_train, y_train, x_test, y_test):
        """Override parent's search function. First model is randomly generated"""
        if not self.history:
            model = DefaultClassifierGenerator(self.n_classes,
                                               self.input_shape).generate()
            self.add_model(model, x_train, y_train, x_test, y_test)
            pickle_to_file(self, os.path.join(self.path, 'searcher'))

        else:
            model = self.load_best_model()
            new_graphs = transform(Graph(model, False))
            new_models = []
            for graph in new_graphs:
                nm_graph = Graph(model, True)
                for args in graph.operation_history:
                    getattr(nm_graph, args[0])(*list(args[1:]))
                    new_models.append(nm_graph.produce_model())
            new_models = self._remove_duplicate(list(new_models))

            for model in new_models:
                if self.model_count < constant.MAX_MODEL_NUM:
                    self.add_model(model, x_train, y_train, x_test, y_test)
                    pickle_to_file(self, os.path.join(self.path, 'searcher'))

            backend.clear_session()

        return self.load_best_model()
コード例 #4
0
ファイル: greedy.py プロジェクト: Saiuz/autokeras
    def generate(self, descriptors, timeout, sync_message):
        """Generate new neighbor architectures from the best model.

        Args:
            descriptors: All the searched neural architectures.
            timeout: An integer. The time limit in seconds.
            sync_message: the Queue for multiprocessing return value.

        Returns:
            out: A list of 2-elements tuple. Each tuple contains
                an instance of Graph, a morphed neural network with weights
                and the father node id in the search tree.
        """
        out = []
        start_time = time.time()
        descriptors = deepcopy(descriptors)

        if isinstance(sync_message, Queue) and sync_message.qsize() != 0:
            return out
        model_id = self.searcher.get_neighbour_best_model_id()
        graph = self.searcher.load_model_by_id(model_id)
        father_id = model_id
        for temp_graph in transform(graph):
            if contain(descriptors, temp_graph.extract_descriptor()):
                continue
            out.append((deepcopy(temp_graph), father_id))
        remaining_time = timeout - (time.time() - start_time)

        if remaining_time < 0:
            raise TimeoutError
        return out
コード例 #5
0
ファイル: search.py プロジェクト: sunminhuai/autokeras
    def generate(self, multiprocessing_queue):
        """Generate the next neural architecture.

        Args:
            multiprocessing_queue: the Queue for multiprocessing return value.

        Returns:
            list of 2-element tuples: generated_graph and other_info,
            for random searcher the length of list is 1.
            generated_graph: An instance of Graph.
            other_info: Anything to be saved in the training queue together with the architecture.

        """
        random_index = randrange(len(self.history))
        model_id = self.history[random_index]['model_id']
        graph = self.load_model_by_id(model_id)
        new_father_id = None
        generated_graph = None
        for temp_graph in transform(graph):
            if not contain(self.descriptors, temp_graph.extract_descriptor()):
                new_father_id = model_id
                generated_graph = temp_graph
                break
        if new_father_id is None:
            new_father_id = 0
            generated_graph = self.generators[0](self.n_classes, self.input_shape). \
                generate(self.default_model_len, self.default_model_width)

        return [(generated_graph, new_father_id)]
コード例 #6
0
ファイル: random.py プロジェクト: Saiuz/autokeras
    def generate(self, multiprocessing_queue):
        """Generate the next neural architecture.

        Args:
            multiprocessing_queue: the Queue for multiprocessing return value.

        Returns:
            list of 2-element tuples: generated_graph and other_info,
            for random searcher the length of list is 1.
            generated_graph: An instance of Graph.
            other_info: Anything to be saved in the training queue together with the architecture.

        """
        random_index = randrange(len(self.history))
        model_id = self.history[random_index]['model_id']
        graph = self.load_model_by_id(model_id)
        new_father_id = None
        generated_graph = None
        for temp_graph in transform(graph):
            if not contain(self.descriptors, temp_graph.extract_descriptor()):
                new_father_id = model_id
                generated_graph = temp_graph
                break
        if new_father_id is None:
            new_father_id = 0
            generated_graph = self.generators[0](self.n_classes, self.input_shape). \
                generate(self.default_model_len, self.default_model_width)

        return [(generated_graph, new_father_id)]
コード例 #7
0
    def optimize_acq(self, model_ids, descriptors, timeout):
        start_time = time.time()
        target_graph = None
        father_id = None
        descriptors = deepcopy(descriptors)
        elem_class = Elem
        if self.metric.higher_better():
            elem_class = ReverseElem

        # Initialize the priority queue.
        pq = PriorityQueue()
        temp_list = []
        for model_id in model_ids:
            metric_value = self.searcher.get_metric_value_by_id(model_id)
            temp_list.append((metric_value, model_id))
        temp_list = sorted(temp_list)
        for metric_value, model_id in temp_list:
            graph = self.searcher.load_model_by_id(model_id)
            graph.clear_operation_history()
            graph.clear_weights()
            pq.put(elem_class(metric_value, model_id, graph))

        t = 1.0
        t_min = self.t_min
        alpha = 0.9
        opt_acq = self._get_init_opt_acq_value()
        remaining_time = timeout
        while not pq.empty() and t > t_min and remaining_time > 0:
            elem = pq.get()
            if self.metric.higher_better():
                temp_exp = min((elem.metric_value - opt_acq) / t, 1.0)
            else:
                temp_exp = min((opt_acq - elem.metric_value) / t, 1.0)
            ap = math.exp(temp_exp)
            if ap >= random.uniform(0, 1):
                for temp_graph in transform(elem.graph):
                    if contain(descriptors, temp_graph.extract_descriptor()):
                        continue

                    temp_acq_value = self.acq(temp_graph)
                    pq.put(
                        elem_class(temp_acq_value, elem.father_id, temp_graph))
                    descriptors.append(temp_graph.extract_descriptor())
                    if self._accept_new_acq_value(opt_acq, temp_acq_value):
                        opt_acq = temp_acq_value
                        father_id = elem.father_id
                        target_graph = deepcopy(temp_graph)
            t *= alpha
            remaining_time = timeout - (time.time() - start_time)

        if remaining_time < 0:
            raise TimeoutError
        # Did not found a not duplicated architecture
        if father_id is None:
            return None, None
        nm_graph = self.searcher.load_model_by_id(father_id)
        for args in target_graph.operation_history:
            getattr(nm_graph, args[0])(*list(args[1:]))
        return nm_graph, father_id
コード例 #8
0
ファイル: bayesian.py プロジェクト: karolmajek/autokeras
    def optimize_acq(self, model_ids, descriptors, timeout):
        start_time = time.time()
        target_graph = None
        father_id = None
        descriptors = deepcopy(descriptors)
        elem_class = Elem
        if self.metric.higher_better():
            elem_class = ReverseElem

        # Initialize the priority queue.
        pq = PriorityQueue()
        temp_list = []
        for model_id in model_ids:
            metric_value = self.searcher.get_metric_value_by_id(model_id)
            temp_list.append((metric_value, model_id))
        temp_list = sorted(temp_list)
        for metric_value, model_id in temp_list:
            graph = self.searcher.load_model_by_id(model_id)
            graph.clear_operation_history()
            graph.clear_weights()
            pq.put(elem_class(metric_value, model_id, graph))

        t = 1.0
        t_min = self.t_min
        alpha = 0.9
        opt_acq = self._get_init_opt_acq_value()
        remaining_time = timeout
        while not pq.empty() and t > t_min and remaining_time > 0:
            elem = pq.get()
            if self.metric.higher_better():
                temp_exp = min((elem.metric_value - opt_acq) / t, 1.0)
            else:
                temp_exp = min((opt_acq - elem.metric_value) / t, 1.0)
            ap = math.exp(temp_exp)
            if ap >= random.uniform(0, 1):
                for temp_graph in transform(elem.graph):
                    if contain(descriptors, temp_graph.extract_descriptor()):
                        continue

                    temp_acq_value = self.acq(temp_graph)
                    pq.put(elem_class(temp_acq_value, elem.father_id, temp_graph))
                    descriptors.append(temp_graph.extract_descriptor())
                    if self._accept_new_acq_value(opt_acq, temp_acq_value):
                        opt_acq = temp_acq_value
                        father_id = elem.father_id
                        target_graph = deepcopy(temp_graph)
            t *= alpha
            remaining_time = timeout - (time.time() - start_time)

        if remaining_time < 0:
            raise TimeoutError
        # Did not found a not duplicated architecture
        if father_id is None:
            return None, None
        nm_graph = self.searcher.load_model_by_id(father_id)
        for args in target_graph.operation_history:
            getattr(nm_graph, args[0])(*list(args[1:]))
        return nm_graph, father_id
コード例 #9
0
ファイル: search.py プロジェクト: yongfeng-li/autokeras
    def maximize_acq(self, timeout):
        start_time = time.time()
        model_ids = self.search_tree.adj_list.keys()
        target_graph = None
        father_id = None
        descriptors = deepcopy(self.descriptors)

        # Initialize the priority queue.
        pq = PriorityQueue()
        temp_list = []
        for model_id in model_ids:
            metric_value = self.get_metric_value_by_id(model_id)
            temp_list.append((metric_value, model_id))
        temp_list = sorted(temp_list)
        for metric_value, model_id in temp_list:
            graph = self.load_model_by_id(model_id)
            graph.clear_operation_history()
            pq.put(Elem(metric_value, model_id, graph))

        t = 1.0
        t_min = self.t_min
        alpha = 0.9
        max_acq = -1
        remaining_time = timeout - (time.time() - start_time)
        while not pq.empty() and t > t_min and remaining_time > 0:
            elem = pq.get()
            temp_exp = min((elem.metric_value - max_acq) / t, 709.0)
            ap = math.exp(temp_exp)
            if ap > random.uniform(0, 1):
                graphs = transform(elem.graph)

                for temp_graph in graphs:
                    if contain(descriptors, temp_graph.extract_descriptor()):
                        continue

                    temp_acq_value = self.acq(temp_graph)
                    pq.put(Elem(temp_acq_value, elem.father_id, temp_graph))
                    descriptors.append(temp_graph.extract_descriptor())
                    if temp_acq_value > max_acq:
                        max_acq = temp_acq_value
                        father_id = elem.father_id
                        target_graph = temp_graph
            t *= alpha
            remaining_time = timeout - (time.time() - start_time)

        if remaining_time < 0:
            raise TimeoutError
        nm_graph = self.load_model_by_id(father_id)
        if self.verbose:
            print('Father ID: ', father_id)
            print(target_graph.operation_history)
        for args in target_graph.operation_history:
            getattr(nm_graph, args[0])(*list(args[1:]))
        return nm_graph, father_id
コード例 #10
0
    def maximize_acq(self):
        model_ids = self.search_tree.adj_list.keys()
        target_graph = None
        father_id = None
        descriptors = self.descriptors

        pq = PriorityQueue()
        temp_list = []
        for model_id in model_ids:
            accuracy = self.get_accuracy_by_id(model_id)
            temp_list.append((accuracy, model_id))
        temp_list = sorted(temp_list)
        if len(temp_list) > 5:
            temp_list = temp_list[:-5]
        for accuracy, model_id in temp_list:
            model = self.load_model_by_id(model_id)
            graph = Graph(model, False)
            pq.put(Elem(accuracy, model_id, graph))

        t = 1.0
        t_min = self.t_min
        alpha = 0.9
        max_acq = -1
        while not pq.empty() and t > t_min:
            elem = pq.get()
            ap = math.exp((elem.accuracy - max_acq) / t)
            if ap > random.uniform(0, 1):
                graphs = transform(elem.graph)
                graphs = list(
                    filter(lambda x: x.extract_descriptor() not in descriptors,
                           graphs))
                if not graphs:
                    continue
                for temp_graph in graphs:
                    temp_acq_value = self.acq(temp_graph)
                    pq.put(Elem(temp_acq_value, elem.father_id, temp_graph))
                    descriptors[temp_graph.extract_descriptor()] = True
                    if temp_acq_value > max_acq:
                        max_acq = temp_acq_value
                        father_id = elem.father_id
                        target_graph = temp_graph
            t *= alpha

        model = self.load_model_by_id(father_id)
        nm_graph = Graph(model, True)
        if self.verbose:
            print('Father ID: ', father_id)
            print(target_graph.operation_history)
        for args in target_graph.operation_history:
            getattr(nm_graph, args[0])(*list(args[1:]))
        return nm_graph.produce_model(), father_id
コード例 #11
0
ファイル: search.py プロジェクト: wyp19930313/autokeras
    def maximize_acq(self):
        model_ids = self.search_tree.adj_list.keys()
        target_graph = None
        father_id = None
        descriptors = deepcopy(self.descriptors)

        pq = PriorityQueue()
        temp_list = []
        for model_id in model_ids:
            accuracy = self.get_accuracy_by_id(model_id)
            temp_list.append((accuracy, model_id))
        temp_list = sorted(temp_list)
        # if len(temp_list) > 5:
        #     temp_list = temp_list[:-5]
        for accuracy, model_id in temp_list:
            graph = self.load_model_by_id(model_id)
            pq.put(Elem(accuracy, model_id, graph))

        t = 1.0
        t_min = self.t_min
        alpha = 0.9
        max_acq = -1
        while not pq.empty() and t > t_min:
            elem = pq.get()
            temp_exp = min((elem.accuracy - max_acq) / t, 709.0)
            ap = math.exp(temp_exp)
            if ap > random.uniform(0, 1):
                graphs = transform(elem.graph)

                for temp_graph in graphs:
                    if contain(descriptors, temp_graph.extract_descriptor()):
                        continue
                    temp_acq_value = self.acq(temp_graph)
                    pq.put(Elem(temp_acq_value, elem.father_id, temp_graph))
                    descriptors[temp_graph.extract_descriptor()] = True
                    if temp_acq_value > max_acq:
                        max_acq = temp_acq_value
                        father_id = elem.father_id
                        target_graph = temp_graph
            t *= alpha

        nm_graph = self.load_model_by_id(father_id)
        if self.verbose:
            print('Father ID: ', father_id)
            print(target_graph.operation_history)
        for args in target_graph.operation_history:
            getattr(nm_graph, args[0])(*list(args[1:]))
        return nm_graph, father_id
コード例 #12
0
ファイル: search.py プロジェクト: rbn42/autokeras
    def search(self, x_train, y_train, x_test, y_test):
        """Override parent's search function. First model is randomly generated"""
        if not self.history:
            model = DefaultClassifierGenerator(self.n_classes,
                                               self.input_shape).generate()
            self.add_model(model, x_train, y_train, x_test, y_test)

        optimal_accuracy = 0.0
        while self.model_count < constant.MAX_MODEL_NUM:
            model = self.load_best_model()
            new_models = self._remove_duplicate(transform(model))

            for model in new_models:
                if self.model_count < constant.MAX_MODEL_NUM:
                    self.add_model(model, x_train, y_train, x_test, y_test)

            max_accuracy = max(self.history,
                               key=lambda x: x['accuracy'])['accuracy']
            if max_accuracy <= optimal_accuracy:
                break
            optimal_accuracy = max_accuracy

        return self.load_best_model()
コード例 #13
0
    def generate(self, descriptors, timeout, sync_message=None):
        """Generate new architecture.

        Args:
            descriptors: All the searched neural architectures.
            timeout: An integer. The time limit in seconds.
            sync_message: the Queue for multiprocessing return value.

        Returns:
            graph: An instance of Graph. A morphed neural network with weights.
            father_id: The father node ID in the search tree.
        """
        model_ids = self.search_tree.adj_list.keys()
        start_time = time.time()
        target_graph = None
        father_id = None
        descriptors = deepcopy(descriptors)
        elem_class = Elem
        if self.metric.higher_better():
            elem_class = ReverseElem

        # Initialize the priority queue.
        pq = PriorityQueue()
        temp_list = []
        for model_id in model_ids:
            metric_value = self.searcher.get_metric_value_by_id(model_id)
            temp_list.append((metric_value, model_id))
        temp_list = sorted(temp_list)
        for metric_value, model_id in temp_list:
            graph = self.searcher.load_model_by_id(model_id)
            graph.clear_operation_history()
            graph.clear_weights()
            pq.put(elem_class(metric_value, model_id, graph))

        t = 1.0
        t_min = self.t_min
        alpha = 0.9
        opt_acq = self._get_init_opt_acq_value()
        remaining_time = timeout
        while not pq.empty() and remaining_time > 0 and t > t_min:
            if isinstance(sync_message, Queue) and sync_message.qsize() != 0:
                break
            elem = pq.get()
            if self.metric.higher_better():
                temp_exp = min((elem.metric_value - opt_acq) / t, 1.0)
            else:
                temp_exp = min((opt_acq - elem.metric_value) / t, 1.0)
            ap = math.exp(temp_exp)
            if ap >= random.uniform(0, 1):
                for temp_graph in transform(elem.graph):
                    if contain(descriptors, temp_graph.extract_descriptor()):
                        continue

                    temp_acq_value = self.acq(temp_graph)
                    pq.put(
                        elem_class(temp_acq_value, elem.father_id, temp_graph))
                    descriptors.append(temp_graph.extract_descriptor())
                    if self._accept_new_acq_value(opt_acq, temp_acq_value):
                        opt_acq = temp_acq_value
                        father_id = elem.father_id
                        target_graph = deepcopy(temp_graph)
            t *= alpha
            remaining_time = timeout - (time.time() - start_time)

        if remaining_time < 0:
            raise TimeoutError
        # Did not found a not duplicated architecture
        if father_id is None:
            return None, None
        nm_graph = self.searcher.load_model_by_id(father_id)
        for args in target_graph.operation_history:
            getattr(nm_graph, args[0])(*list(args[1:]))
        return nm_graph, father_id
コード例 #14
0
ファイル: bayesian.py プロジェクト: Saiuz/autokeras
    def generate(self, descriptors, timeout, sync_message=None):
        """Generate new architecture.

        Args:
            descriptors: All the searched neural architectures.
            timeout: An integer. The time limit in seconds.
            sync_message: the Queue for multiprocessing return value.

        Returns:
            graph: An instance of Graph. A morphed neural network with weights.
            father_id: The father node ID in the search tree.
        """
        model_ids = self.search_tree.adj_list.keys()
        start_time = time.time()
        target_graph = None
        father_id = None
        descriptors = deepcopy(descriptors)
        elem_class = Elem
        if self.metric.higher_better():
            elem_class = ReverseElem

        # Initialize the priority queue.
        pq = PriorityQueue()
        temp_list = []
        for model_id in model_ids:
            metric_value = self.searcher.get_metric_value_by_id(model_id)
            temp_list.append((metric_value, model_id))
        temp_list = sorted(temp_list)
        for metric_value, model_id in temp_list:
            graph = self.searcher.load_model_by_id(model_id)
            graph.clear_operation_history()
            graph.clear_weights()
            pq.put(elem_class(metric_value, model_id, graph))

        t = 1.0
        t_min = self.t_min
        alpha = 0.9
        opt_acq = self._get_init_opt_acq_value()
        remaining_time = timeout
        while not pq.empty() and remaining_time > 0 and t > t_min:
            if isinstance(sync_message, Queue) and sync_message.qsize() != 0:
                break
            elem = pq.get()
            if self.metric.higher_better():
                temp_exp = min((elem.metric_value - opt_acq) / t, 1.0)
            else:
                temp_exp = min((opt_acq - elem.metric_value) / t, 1.0)
            ap = math.exp(temp_exp)
            if ap >= random.uniform(0, 1):
                for temp_graph in transform(elem.graph):
                    if contain(descriptors, temp_graph.extract_descriptor()):
                        continue

                    temp_acq_value = self.acq(temp_graph)
                    pq.put(elem_class(temp_acq_value, elem.father_id, temp_graph))
                    descriptors.append(temp_graph.extract_descriptor())
                    if self._accept_new_acq_value(opt_acq, temp_acq_value):
                        opt_acq = temp_acq_value
                        father_id = elem.father_id
                        target_graph = deepcopy(temp_graph)
            t *= alpha
            remaining_time = timeout - (time.time() - start_time)

        if remaining_time < 0:
            raise TimeoutError
        # Did not found a not duplicated architecture
        if father_id is None:
            return None, None
        nm_graph = self.searcher.load_model_by_id(father_id)
        for args in target_graph.operation_history:
            getattr(nm_graph, args[0])(*list(args[1:]))
        return nm_graph, father_id