Exemplo n.º 1
0
    def fit(self):
        begin = datetime.now()

        # Initialization
        self._TermsManager = TermsManager(self._Dataset,
                                          self.min_case_per_rule)
        self._Pruner = Pruner(self._Dataset, self._TermsManager, self.alpha)
        self._no_of_uncovered_cases = self._Dataset.get_no_of_uncovered_cases()
        self._get_population_Survival()

        while not self._global_stopping_condition():

            # local variables
            ant_index = 0
            converg_test_index = 1

            # Initialize rules
            previous_rule = Rule(self._Dataset, self.alpha)
            best_rule = copy.deepcopy(previous_rule)

            # Local search
            while not self._local_stopping_condition(ant_index,
                                                     converg_test_index):

                current_rule = Rule(self._Dataset, self.alpha)
                current_rule.construct(self._TermsManager,
                                       self.min_case_per_rule)
                current_rule = self._Pruner.prune(current_rule)

                if current_rule.equals(previous_rule):
                    converg_test_index += 1
                else:
                    converg_test_index = 1
                    if current_rule.fitness > best_rule.fitness:
                        best_rule = copy.deepcopy(current_rule)

                self._TermsManager.pheromone_updating(current_rule.antecedent,
                                                      current_rule.fitness)
                previous_rule = copy.deepcopy(current_rule)
                ant_index += 1

            # case: local search didnt find any exceptional rules
            if best_rule.fitness < 1 - self.alpha:
                break
            # saving local search results
            elif self._can_add_rule(
                    best_rule):  # check if rule already exists on the list
                self.discovered_rule_list.append(best_rule)
                self._Dataset.update_covered_cases(best_rule.sub_group_cases)
                self._no_of_uncovered_cases = self._Dataset.get_no_of_uncovered_cases(
                )
            self._TermsManager.pheromone_init()
            self._iterations += 1
        self._run_time = datetime.now() - begin

        # generates the rules representative strings
        for index, rule in enumerate(self.discovered_rule_list):
            rule.set_string_repr(index)
            rule.set_KMmodel()
        return
Exemplo n.º 2
0
    def test_call_initial_without_list(self):
        args = [
            'pruner', '--nocolor', 'pruner/tests/fake_proj/requirements.txt',
            'pruner/tests/fake_proj/output.txt', 'python',
            'pruner/tests/fake_proj/fake_proj.py'
        ]
        try:
            with patch.object(sys, 'argv', args):
                p = Pruner()
                s = p._call('ls', shell=True, initial=True)
        except:
            assert 1 == 0

        assert s == 0
Exemplo n.º 3
0
    def test_nocolor(self):
        args = [
            'pruner', '--nocolor', 'pruner/tests/fake_proj/requirements.txt',
            'pruner/tests/fake_proj/output.txt', 'python',
            'pruner/tests/fake_proj/fake_proj.py'
        ]
        try:
            with patch.object(sys, 'argv', args):
                p = Pruner()
                p.run()
        except:
            assert 1 == 0

        assert p.args.nocolor == True
    def prune_graph(self, graph_file_name, seed_file_name, oov_list_file,
                    output_graph_type, neighbour_prunning_method,
                    neighbour_prunning_input):
        #oov_list
        oov_list = []
        with open(oov_list_file) as inp:
            for line in inp:
                if line.strip() in self.phrase_to_id:
                    oov_list.append(self.phrase_to_id[line.strip()])

# Pruning
        graph = {}
        with open(graph_file_name) as inp:
            for line in inp:
                parts = line.strip().split()
                node1 = parts[0]
                node2 = parts[1]
                weight = parts[2]
                if node1 in graph:
                    graph[node1].append((node2, weight))
                else:
                    graph[node1] = [(node2, weight)]
        pruner = Pruner()
        new_graph = pruner.prune(graph, self.labeled_nodes, oov_list,
                                 output_graph_type, neighbour_prunning_method,
                                 neighbour_prunning_input)

        # reading graph file
        with open(graph_file_name, 'r') as inp:
            #TODO add pruning details to the file
            with open(graph_file_name + ".pruned", 'w') as inp2:
                for line in inp:
                    parts = line.strip().split()
                    if parts[0] in new_graph and parts[1] in new_graph:
                        inp2.write(line)

# reading seeds file
        with open(seed_file_name, 'r') as inp:
            #TODO add pruning details to the file
            with open(seed_file_name + ".pruned", 'w') as inp2:
                for line in inp:
                    parts = line.strip().split()
                    if parts[0] in new_graph:
                        inp2.write(line)
    def prune_graph(self, graph_file_name, seed_file_name, oov_list_file, output_graph_type, neighbour_prunning_method, neighbour_prunning_input):
   #oov_list
        oov_list = []
        with open(oov_list_file) as inp:
            for line in inp:
                if line.strip() in self.phrase_to_id:
                    oov_list.append(self.phrase_to_id[line.strip()])

   # Pruning 
        graph = {}
        with open(graph_file_name) as inp:
            for line in inp:
                parts = line.strip().split()
                node1= parts[0]
                node2= parts[1]
                weight= parts[2]
                if node1 in graph:
                    graph[node1].append((node2,weight))
                else:
                    graph[node1] = [(node2,weight)]
        pruner = Pruner()
        new_graph =  pruner.prune(graph, self.labeled_nodes, oov_list, output_graph_type, neighbour_prunning_method, neighbour_prunning_input)
       

   # reading graph file 
        with open(graph_file_name,'r') as inp:
           #TODO add pruning details to the file
            with open(graph_file_name+".pruned",'w') as inp2:
                for line in inp:
                    parts = line.strip().split()
                    if parts[0] in new_graph and parts[1] in new_graph:
                        inp2.write(line)

   # reading seeds file  
        with open(seed_file_name,'r') as inp:
            #TODO add pruning details to the file
            with open(seed_file_name+".pruned",'w') as inp2:
                for line in inp:
                    parts = line.strip().split()
                    if parts[0] in new_graph:
                        inp2.write(line)       
Exemplo n.º 6
0
    def DenseNet40(self,
                   inputs=None,
                   is_train=True,
                   reload_w=None,
                   num_classes=None):

        model = Pruner(reload_file=reload_w)
        # net header
        x = model._add_layer(inputs,
                             mode="conv",
                             out_c=32,
                             k_size=3,
                             strides=1,
                             with_bn=False,
                             act=None)

        # stage 1
        x = self.densenet_block(model, x, is_train=is_train)
        x = self.densenet_trans(model, x, is_train=is_train)  #16
        # stage 2
        x = self.densenet_block(model, x, is_train=is_train)
        x = self.densenet_trans(model, x, is_train=is_train)  #8
        # stage 3
        x = self.densenet_block(model, x, is_train=is_train)

        x = model.bn_act_layer(x, is_train=is_train)
        x = model.gap_layer(x)
        x = model._add_layer(x,
                             mode="fc",
                             out_c=num_classes,
                             act=None,
                             with_bn=False)

        return x, model
Exemplo n.º 7
0
def main():
    # initialize the pruner
    pruner = Pruner(args)
    # pruner.prune(args.checkpoint)
    pruner.evaluate()

    # Run regularization
    pruner.prune(args.checkpoint,
                 fake_mask=True,
                 perm=args.perm,
                 num_iters=args.num_sort_iters)
    pruner.evaluate()
    pruner.regularize()
    pruner.apply_mask()
    pruner.evaluate()

    logging.debug("Fine-tuning model for {} epochs".format(args.epochs))
    best_acc = pruner.fine_tune(args.epochs)
    logging.debug("Fine-tuned model")
    pruner.evaluate()

    write_summary(args, best_acc=best_acc)
class AntMinerSA:
    def __init__(self, no_of_ants, min_case_per_rule, max_uncovered_cases,
                 no_rules_converg):
        self.no_of_ants = no_of_ants
        self.min_case_per_rule = min_case_per_rule
        self.max_uncovered_cases = max_uncovered_cases
        self.no_rules_converg = no_rules_converg

        self.discovered_rule_list = []
        self._Dataset = None
        self._TermsManager = None
        self._Pruner = None
        self._no_of_uncovered_cases = None
        self._iterations = 0

    def _global_stopping_condition(self, converg_list_index):
        if self._no_of_uncovered_cases < self.max_uncovered_cases:
            return True
        if self._iterations >= self.no_of_ants:
            return True
#        if converg_list_index >= self.no_rules_converg:
#            return True
        return False

    def _local_stopping_condition(self, ant_index, converg_test_index):
        if ant_index >= self.no_of_ants:
            return True
        elif converg_test_index >= self.no_rules_converg:
            return True
        return False

    def read_data(self,
                  data_path=UserInputs.data_path,
                  header_path=UserInputs.header_path,
                  attr_survival_name=UserInputs.attr_survival_name,
                  attr_event_name=UserInputs.attr_event_name,
                  attr_id_name=UserInputs.attr_id_name,
                  attr_to_ignore=UserInputs.attr_to_ignore,
                  discretization=False):

        header = list(pd.read_csv(header_path, delimiter=','))
        data = pd.read_csv(data_path,
                           delimiter=',',
                           header=None,
                           names=header,
                           index_col=False)
        data.reset_index()
        self._Dataset = Dataset(data, attr_survival_name, attr_event_name,
                                attr_id_name, attr_to_ignore, discretization)

        return

    def fit(self):
        # Initialization
        self._TermsManager = TermsManager(self._Dataset,
                                          self.min_case_per_rule)
        self._Pruner = Pruner(self._Dataset, self._TermsManager)
        self._no_of_uncovered_cases = self._Dataset.get_no_of_uncovered_cases()

        converg_list_index = 0
        while not self._global_stopping_condition(converg_list_index):

            # local variables
            ant_index = 0
            converg_test_index = 1

            # Initialize rules
            previous_rule = Rule(self._Dataset)
            best_rule = copy.deepcopy(previous_rule)
            best_rule.quality = 1 - UserInputs.alpha

            while not self._local_stopping_condition(ant_index,
                                                     converg_test_index):

                current_rule = Rule(self._Dataset)
                current_rule.construct(self._TermsManager,
                                       self.min_case_per_rule)
                current_rule = self._Pruner.prune(current_rule)

                if current_rule.equals(previous_rule):
                    converg_test_index += 1
                else:
                    converg_test_index = 1
                    if current_rule.quality > best_rule.quality:
                        best_rule = copy.deepcopy(current_rule)

                self._TermsManager.pheromone_updating(current_rule.antecedent,
                                                      current_rule.quality)
                previous_rule = copy.deepcopy(current_rule)
                ant_index += 1

            if best_rule.quality == 1 - UserInputs.alpha:  # did not generate any rules
                break
            else:
                if self._can_add_rule(
                        best_rule):  # check if rule already exists on the list
                    self.discovered_rule_list.append(best_rule)
                    self._Dataset.update_covered_cases(
                        best_rule.sub_group_cases)
                    self._no_of_uncovered_cases = self._Dataset.get_no_of_uncovered_cases(
                    )
                    converg_list_index = 0
                else:
                    converg_list_index += 1

            self._TermsManager.pheromone_init()
            self._iterations += 1
        # END OF WHILE (AVAILABLE_CASES > MAX_UNCOVERED_CASES)

        # generates the rules representative strings
        for index, rule in enumerate(self.discovered_rule_list):
            rule.set_string_repr(index)

        return

    def save_results(self, log_file):
        f = open(log_file, "a+")
        f.write('\n\n====== ANT-MINER PARAMETERS ======')
        f.write('\nNumber of ants: {}'.format(self.no_of_ants))
        f.write('\nNumber of minimum cases per rule: {}'.format(
            self.min_case_per_rule))
        f.write('\nNumber of maximum uncovered cases: {}'.format(
            self.max_uncovered_cases))
        f.write('\nNumber of rules for convergence: {}'.format(
            self.no_rules_converg))
        f.write('\n\n====== USER INPUTS PARAMETERS ======')
        f.write('\nHeuristic method: {}'.format(UserInputs.heuristic_method))
        f.write('\nAlpha value for KM function confidence interval: {}'.format(
            UserInputs.kmf_alpha))
        f.write('\nAlpha value for LogRank confidence: {}'.format(
            UserInputs.alpha))
        f.write('\n\n====== RUN INFO ======')
        f.write('\nDatabase path: {}'.format(UserInputs.data_path))
        f.write('\nInstances: {}'.format(self._Dataset.data.shape[0]))
        f.write('\nAttributes: {}'.format(self._Dataset.data.shape[1]))
        f.write('\nNumber of remaining uncovered cases: {}'.format(
            self._no_of_uncovered_cases))
        f.write('\nNumber of iterations: {}'.format(self._iterations))
        f.write('\nNumber of discovered rules: {}'.format(
            len(self.discovered_rule_list)))
        f.write('\n\n====== DISCRETIZATION INFO ======')
        f.write('\nDiscretization method: {}'.format(
            UserInputs.discretization_method))
        f.write('\nDiscretized attributes: ' +
                repr(UserInputs.attr_2disc_names))
        f.write('\n\n====== DISCOVERED RULES ======')
        f.write('\n> Average survival on dataset: {}'.format(
            self._Dataset.average_survival) + '\n')
        f.close()

        # print all rules representatives and plots
        for index, rule in enumerate(self.discovered_rule_list):
            rule.print_rule(log_file)
            rule.plot_km_estimates(index)

        # print rules info
        f = open(log_file, "a+")
        f.write('\n\n====== DISCOVERED RULES INFO ======\n')
        f.close()
        for rule in self.discovered_rule_list:
            rule.print_rule(log_file)
            with open(log_file, "a+") as f:
                f.write('\n> Number of covered cases: {}'.format(
                    rule.no_covered_cases))
                f.write('\n> Covered cases: ' + repr(rule.sub_group_cases))
                f.write('\n> Quality: ' + repr(rule.quality))
                f.write('\n> p-value of LogRank test: ' +
                        repr(rule.logrank_test.p_value))
                f.write('\n')

        return

    def print_discovered_rules(self):

        for rule in self.discovered_rule_list:
            print(rule.string_repr[0] + ': ' + rule.string_repr[1])

        return

    def get_data(self):
        return self._Dataset.get_data()

    def get_train_data(self):
        return self._Dataset.data

    def _can_add_rule(self, new_rule):
        # check if generated rule already exists on the list

        for rule in self.discovered_rule_list:
            if new_rule.equals(rule):
                return False
        return True
    def fit(self):
        # Initialization
        self._TermsManager = TermsManager(self._Dataset,
                                          self.min_case_per_rule)
        self._Pruner = Pruner(self._Dataset, self._TermsManager)
        self._no_of_uncovered_cases = self._Dataset.get_no_of_uncovered_cases()

        converg_list_index = 0
        while not self._global_stopping_condition(converg_list_index):

            # local variables
            ant_index = 0
            converg_test_index = 1

            # Initialize rules
            previous_rule = Rule(self._Dataset)
            best_rule = copy.deepcopy(previous_rule)
            best_rule.quality = 1 - UserInputs.alpha

            while not self._local_stopping_condition(ant_index,
                                                     converg_test_index):

                current_rule = Rule(self._Dataset)
                current_rule.construct(self._TermsManager,
                                       self.min_case_per_rule)
                current_rule = self._Pruner.prune(current_rule)

                if current_rule.equals(previous_rule):
                    converg_test_index += 1
                else:
                    converg_test_index = 1
                    if current_rule.quality > best_rule.quality:
                        best_rule = copy.deepcopy(current_rule)

                self._TermsManager.pheromone_updating(current_rule.antecedent,
                                                      current_rule.quality)
                previous_rule = copy.deepcopy(current_rule)
                ant_index += 1

            if best_rule.quality == 1 - UserInputs.alpha:  # did not generate any rules
                break
            else:
                if self._can_add_rule(
                        best_rule):  # check if rule already exists on the list
                    self.discovered_rule_list.append(best_rule)
                    self._Dataset.update_covered_cases(
                        best_rule.sub_group_cases)
                    self._no_of_uncovered_cases = self._Dataset.get_no_of_uncovered_cases(
                    )
                    converg_list_index = 0
                else:
                    converg_list_index += 1

            self._TermsManager.pheromone_init()
            self._iterations += 1
        # END OF WHILE (AVAILABLE_CASES > MAX_UNCOVERED_CASES)

        # generates the rules representative strings
        for index, rule in enumerate(self.discovered_rule_list):
            rule.set_string_repr(index)

        return
Exemplo n.º 10
0
    def MobileNetV1(self,
                    inputs=None,
                    is_train=None,
                    reload_w=None,
                    num_classes=None):

        model = Pruner(reload_file=reload_w)

        x = model._add_layer(inputs,
                             mode="conv",
                             out_c=32,
                             k_size=3,
                             strides=1)

        x = model._add_layer(x,
                             mode="dconv",
                             out_c=64,
                             k_size=3,
                             strides=1,
                             with_bn=True,
                             is_train=is_train)
        x = model._add_layer(x,
                             mode="dconv",
                             out_c=128,
                             k_size=3,
                             strides=2,
                             with_bn=True,
                             is_train=is_train)

        x = model._add_layer(x,
                             mode="dconv",
                             out_c=128,
                             k_size=3,
                             strides=1,
                             with_bn=True,
                             is_train=is_train)
        x = model._add_layer(x,
                             mode="dconv",
                             out_c=256,
                             k_size=3,
                             strides=2,
                             with_bn=True,
                             is_train=is_train)

        x = model._add_layer(x,
                             mode="dconv",
                             out_c=256,
                             k_size=3,
                             strides=1,
                             with_bn=True,
                             is_train=is_train)
        x = model._add_layer(x,
                             mode="dconv",
                             out_c=512,
                             k_size=3,
                             strides=2,
                             with_bn=True,
                             is_train=is_train)

        x = model._add_layer(x,
                             mode="dconv",
                             out_c=512,
                             k_size=3,
                             strides=1,
                             with_bn=True,
                             is_train=is_train)
        x = model._add_layer(x,
                             mode="dconv",
                             out_c=512,
                             k_size=3,
                             strides=1,
                             with_bn=True,
                             is_train=is_train)
        x = model._add_layer(x,
                             mode="dconv",
                             out_c=512,
                             k_size=3,
                             strides=1,
                             with_bn=True,
                             is_train=is_train)
        x = model._add_layer(x,
                             mode="dconv",
                             out_c=512,
                             k_size=3,
                             strides=1,
                             with_bn=True,
                             is_train=is_train)
        x = model._add_layer(x,
                             mode="dconv",
                             out_c=512,
                             k_size=3,
                             strides=1,
                             with_bn=True,
                             is_train=is_train)

        x = model.gap_layer(x)

        x = model._add_layer(x, mode="fc", out_c=num_classes, act=None)

        return x, model
Exemplo n.º 11
0
    def simpleNet(self,
                  inputs=None,
                  is_train=True,
                  reload_w=None,
                  num_classes=None):

        model = Pruner(reload_file=reload_w)

        k_size = 5
        x = model._add_layer(inputs,
                             mode='conv',
                             out_c=32,
                             k_size=k_size,
                             strides=1,
                             is_train=is_train)
        x = model._add_layer(x,
                             mode='conv',
                             out_c=64,
                             k_size=k_size,
                             strides=2,
                             is_train=is_train)
        x1 = model._add_layer(x,
                              mode='conv',
                              out_c=64,
                              k_size=k_size,
                              strides=1,
                              is_train=is_train)
        x2 = model._add_layer(x,
                              mode='conv',
                              out_c=64,
                              k_size=k_size,
                              strides=1,
                              is_train=is_train)

        x = model.Add_layer(x1, x2)

        x = model._add_layer(x,
                             mode='conv',
                             out_c=64,
                             k_size=k_size,
                             strides=1,
                             is_train=is_train)
        x = model._add_layer(x,
                             mode='conv',
                             out_c=96,
                             k_size=k_size,
                             strides=2,
                             is_train=is_train)

        x = model._add_layer(x,
                             mode='conv',
                             out_c=96,
                             k_size=k_size,
                             strides=1,
                             is_train=is_train)
        x = model._add_layer(x,
                             mode='conv',
                             out_c=128,
                             k_size=k_size,
                             strides=2,
                             is_train=is_train)

        x = model.gap_layer(x)

        x = model._add_layer(x,
                             mode="fc",
                             out_c=num_classes,
                             with_bn=False,
                             act=None)

        return x, model
Exemplo n.º 12
0
class ESMAM:
    def __init__(self, no_of_ants, min_case_per_rule, max_uncovered_cases,
                 no_rules_converg, alpha):
        self.no_of_ants = no_of_ants
        self.min_case_per_rule = min_case_per_rule
        self.max_uncovered_cases = max_uncovered_cases
        self.no_rules_converg = no_rules_converg
        self.alpha = alpha

        self.discovered_rule_list = []
        self._Dataset = None
        self._TermsManager = None
        self._Pruner = None
        self._data_path = None
        self._population_survModel = None
        self._no_of_uncovered_cases = None
        self._iterations = 0
        self._run_time = None

    def _get_population_Survival(self):

        kmf = KaplanMeierFitter()
        kmf.fit(self._Dataset.survival_times[1],
                self._Dataset.events[1],
                label='KM estimates for population',
                alpha=self.alpha)
        self._population_survModel = kmf
        return

    def _save_SurvivalFunctions(self, prefix):

        index = self._population_survModel.survival_function_.index.copy()
        columns = ['times', 'population'] + [
            rule.string_repr[0] for rule in self.discovered_rule_list
        ]
        df = pd.DataFrame(columns=columns)
        df.times = index.values
        df.population = self._population_survModel.survival_function_.values

        for rule in self.discovered_rule_list:
            survival_fnc = rule.KMmodel['subgroup'].survival_function_.reindex(
                index)
            survival_fnc.fillna(method='ffill', inplace=True)
            df[rule.string_repr[0]] = survival_fnc.values

        log_file = '{}_KM-Estimates.txt'.format(prefix)
        df.to_csv(log_file, index=False, header=True)

        return

    def _global_stopping_condition(self):
        if self._no_of_uncovered_cases <= self.max_uncovered_cases:
            return True
        if self._iterations >= self.no_of_ants:
            return True
        return False

    def _local_stopping_condition(self, ant_index, converg_test_index):
        if ant_index >= self.no_of_ants:
            return True
        elif converg_test_index >= self.no_rules_converg:
            return True
        return False

    def _can_add_rule(self, new_rule):
        # check if generated rule already exists on the list
        for rule in self.discovered_rule_list:
            if new_rule.equals(rule):
                return False
        return True

    def read_data(self, data_path, dtype_path, attr_survival_name,
                  attr_event_name):

        if dtype_path:
            with open(dtype_path, 'r') as f:
                dtypes = json.load(f)
            data = pd.read_csv(data_path,
                               delimiter=',',
                               header=0,
                               index_col=False,
                               dtype=dtypes)
            data.reset_index(drop=True, inplace=True)
        else:
            data = pd.read_csv(data_path,
                               delimiter=',',
                               header=0,
                               index_col=False)
            data.reset_index(drop=True, inplace=True)

        self._data_path = data_path
        self._Dataset = Dataset(data, attr_survival_name, attr_event_name)
        return

    def fit(self):
        begin = datetime.now()

        # Initialization
        self._TermsManager = TermsManager(self._Dataset,
                                          self.min_case_per_rule)
        self._Pruner = Pruner(self._Dataset, self._TermsManager, self.alpha)
        self._no_of_uncovered_cases = self._Dataset.get_no_of_uncovered_cases()
        self._get_population_Survival()

        while not self._global_stopping_condition():

            # local variables
            ant_index = 0
            converg_test_index = 1

            # Initialize rules
            previous_rule = Rule(self._Dataset, self.alpha)
            best_rule = copy.deepcopy(previous_rule)

            # Local search
            while not self._local_stopping_condition(ant_index,
                                                     converg_test_index):

                current_rule = Rule(self._Dataset, self.alpha)
                current_rule.construct(self._TermsManager,
                                       self.min_case_per_rule)
                current_rule = self._Pruner.prune(current_rule)

                if current_rule.equals(previous_rule):
                    converg_test_index += 1
                else:
                    converg_test_index = 1
                    if current_rule.fitness > best_rule.fitness:
                        best_rule = copy.deepcopy(current_rule)

                self._TermsManager.pheromone_updating(current_rule.antecedent,
                                                      current_rule.fitness)
                previous_rule = copy.deepcopy(current_rule)
                ant_index += 1

            # case: local search didnt find any exceptional rules
            if best_rule.fitness < 1 - self.alpha:
                break
            # saving local search results
            elif self._can_add_rule(
                    best_rule):  # check if rule already exists on the list
                self.discovered_rule_list.append(best_rule)
                self._Dataset.update_covered_cases(best_rule.sub_group_cases)
                self._no_of_uncovered_cases = self._Dataset.get_no_of_uncovered_cases(
                )
            self._TermsManager.pheromone_init()
            self._iterations += 1
        self._run_time = datetime.now() - begin

        # generates the rules representative strings
        for index, rule in enumerate(self.discovered_rule_list):
            rule.set_string_repr(index)
            rule.set_KMmodel()
        return

    def save_results(self, prefix):

        log_file = '{}_log.txt'.format(prefix)
        # LOG FILE FOR GENERAL INFO:
        f = open(log_file, "a+")
        f.write('\n\n====== ESMAM PARAMETERS ======')
        f.write('\nNumber of ants: {}'.format(self.no_of_ants))
        f.write('\nNumber of minimum cases per rule: {}'.format(
            self.min_case_per_rule))
        f.write('\nNumber of maximum uncovered cases: {}'.format(
            self.max_uncovered_cases))
        f.write('\nNumber of rules for convergence: {}'.format(
            self.no_rules_converg))
        f.write('\nAlpha value for LogRank confidence: {}'.format(self.alpha))
        f.write('\n\n====== RUN INFO ======')
        f.write('\nDatabase path: {}'.format(self._data_path))
        f.write('\nInstances: {}'.format(self._Dataset.data.shape[0]))
        f.write('\nAttributes: {}'.format(self._Dataset.data.shape[1]))
        f.write('\n# discovered rules: {}'.format(
            len(self.discovered_rule_list)))
        f.write('\nremaining uncovered cases (%): {}'.format(
            (self._no_of_uncovered_cases / self._Dataset.data.shape[0])))
        f.write('\n>run-time: {}'.format(self._run_time))
        f.close()

        # RULE-SET FILE (RULE MODEL INFO):
        rules_file = '{}_RuleSet.txt'.format(prefix)
        f = open(rules_file, "a+")
        f.write('> Average survival on dataset: {}'.format(
            self._Dataset.average_survival))
        f.write('\nDISCOVERED RULES:')
        f.close()
        for index, rule in enumerate(
                self.discovered_rule_list
        ):  # print all rules representatives and plots
            rule.print_rule(rules_file)

        # LOG FILE FOR KM ESTIMATES
        self._save_SurvivalFunctions(prefix)

        return
Exemplo n.º 13
0
    e = time.time()
    print("Inference Time:  " + str(e - s))

if args.prune == 1:
    print("=" * 60)
    print("PRUNING")
    print("=" * 60)
    print("")

    name = args.data + '_' + args.load[:-4]
    set_sparsity(network, args.sensitivity, name)
    rule = get_rules("rules/" + name + ".rule")
    fname = args.load[:-4] + '_pruned'
    original_param, o_total = get_num_weights(network, verbose=False)

    pruner = Pruner(rule=rule)
    pruner.prune(model=network, stage=0, update_masks=True, verbose=False)

    if args.init_param == 1:
        network.apply(weights_init_uniform_rule)
        print("\nRe-initialised weights...")

    # prune
    for i in range(args.prune_iter):
        print("")
        print("-" * 60)
        print("PRUNE ITERATION", i)
        print("-" * 60)
        print("")

        run_training(args,
Exemplo n.º 14
0
    def ResNet18(self,
                 inputs=None,
                 is_train=True,
                 reload_w=None,
                 num_classes=None):

        model = Pruner(reload_file=reload_w)

        num_block = self.model_config["resnet18"]

        block_func = self.__resnet_block_v1 if self.block_version == 1 else self.__resnet_block_v2

        if self.block_version == 1:
            x = model._add_layer(inputs,
                                 mode="conv",
                                 out_c=self.init_channels,
                                 k_size=3,
                                 strides=1,
                                 with_bn=True,
                                 is_train=is_train)
        else:
            x = model._add_layer(inputs,
                                 mode="conv",
                                 out_c=self.init_channels,
                                 k_size=3,
                                 strides=1,
                                 with_bn=False,
                                 act=None)

        # stage 1 out size = 32
        for _ in range(num_block[0]):
            x = block_func(model,
                           inputs=x,
                           out_c=self.init_channels,
                           strides=1,
                           is_train=is_train)

        # stage 2 out_size = 16
        x = block_func(model,
                       inputs=x,
                       out_c=self.init_channels * 2,
                       strides=2,
                       is_train=is_train)
        for _ in range(num_block[1] - 1):
            x = block_func(model,
                           inputs=x,
                           out_c=self.init_channels * 2,
                           strides=1,
                           is_train=is_train)

        # stage 3 out_size = 8
        x = block_func(model,
                       inputs=x,
                       out_c=self.init_channels * 4,
                       strides=2,
                       is_train=is_train)
        for _ in range(num_block[2] - 1):
            x = block_func(model,
                           inputs=x,
                           out_c=self.init_channels * 4,
                           strides=1,
                           is_train=is_train)

        # stage 4 out_size = 4
        x = block_func(model,
                       inputs=x,
                       out_c=self.init_channels * 8,
                       strides=2,
                       is_train=is_train)
        for _ in range(num_block[3] - 1):
            x = block_func(model,
                           inputs=x,
                           out_c=self.init_channels * 8,
                           strides=1,
                           is_train=is_train)

        if self.block_version == 2:
            x = model.bn_act_layer(x, is_train=is_train)

        x = model.gap_layer(x)
        x = model._add_layer(x,
                             mode="fc",
                             out_c=num_classes,
                             act=None,
                             with_bn=False)

        return x, model
Exemplo n.º 15
0
def train(model, train_data, val_data, params):

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=params['lr'],
                                 weight_decay=params['decay'])

    # optimizer = torch.optim.SGD(model.parameters(), lr=params['lr'], momentum=params['p_i'], dampening=params['p_i'])

    scheduler = lr_scheduler.StepLR(optimizer,
                                    step_size=params['step_size'],
                                    gamma=params['gamma'])

    pruner = Pruner(train_data, optimizer, params)

    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=params['batch_size'],
                                               shuffle=True)
    val_loader = torch.utils.data.DataLoader(val_data,
                                             batch_size=params['batch_size'],
                                             shuffle=True)
    # output_file = 'bs_'+str(batch_size)+'_lr_'+str(learning_rate)+'_wd_'+str(decay)+'.csv'
    # with open(output_file, 'w', newline="") as f_out:
    #     writer = csv.writer(f_out, delimiter=',')
    #     writer.writerow(["Epoch",'Train Loss','Val Loss','Val Acc'])

    df = pd.DataFrame(index=list(range(params['epochs'])),
                      columns=["trainacc", "trainloss", "valacc", "valloss"])

    # fig = plt.gcf()
    # fig.show()
    # fig.canvas.draw()

    for epoch in range(
            params['epochs']):  # loop over the dataset multiple times
        t1 = time.perf_counter()
        model.train()
        train_losses, train_accs = [], []
        acc = 0
        for batch, (x_train, y_train) in enumerate(train_loader):

            model.zero_grad()
            pred, std = model(x_train)

            if epoch > params['prune_milestones'][0]:
                loss = F.cross_entropy(pred, y_train)
            else:
                d, yd = model.sample_sigma(pred, std, y_train)
                loss = F.cross_entropy(pred, y_train) + loss_l2(d, yd)

            loss.backward()
            optimizer.step()

            # optimizer.param_groups[0]['lr'] = get_lr(epoch, params['lr'], params['gamma'])
            # optimizer.param_groups[0]['momentum'] = get_momentum(epoch, params['p_i'], params['p_f'], params['T'])
            # optimizer.param_groups[0]['dampening'] = get_momentum(epoch, params['p_i'], params['p_f'], params['T'])

            acc = (pred.argmax(dim=-1) == y_train).to(torch.float32).mean()
            train_accs.append(acc.mean().item())
            train_losses.append(loss.item())

        with torch.no_grad():
            model.eval()
            val_losses, val_accs = [], []
            acc = 0
            for i, (x_val, y_val) in enumerate(val_loader):
                val_pred, val_std = model(x_val)

                if epoch > params['prune_milestones'][0]:
                    val_loss = F.cross_entropy(val_pred, y_val)
                else:
                    val_d, val_yd = model.sample_sigma(val_pred, val_std,
                                                       y_val)
                    val_loss = F.cross_entropy(val_pred, y_val) + loss_l2(
                        val_d, val_yd)

                # val_d, val_yd = model.sample_sigma(val_pred, val_std,y_val)
                # loss =  F.cross_entropy(val_pred,y_val) + loss_l2(val_d,val_yd)
                acc = (val_pred.argmax(dim=-1) == y_val).to(
                    torch.float32).mean()
                val_losses.append(val_loss.item())
                val_accs.append(acc.mean().item())

            train_loader = pruner.prune_dataloaders(model, train_loader)

        scheduler.step()

        print(
            'Decay: {}, Epoch: {}, Loss: {}, VAccuracy: {}, Ignore_list_Size: {}, LR: {}'
            .format(params['decay'], epoch, np.mean(val_losses),
                    np.mean(val_accs), len(pruner.ignore_list),
                    optimizer.param_groups[0]['lr']))

        df.loc[epoch, 'trainacc'] = np.mean(train_accs)
        df.loc[epoch, 'trainloss'] = np.mean(train_losses)
        df.loc[epoch, 'valacc'] = np.mean(val_accs)
        df.loc[epoch, 'valloss'] = np.mean(val_losses)

        # plt.plot(df[:epoch+1]['valloss'])
        # plt.plot(df[:epoch+1]['trainloss'])

        # plt.pause(0.01)  # I ain't needed!!!
        # fig.canvas.draw()

    fig1 = plt.figure()
    ax1 = fig1.add_subplot()
    ax1.plot(df['valloss'])
    ax1.plot(df['trainloss'])
    ax1.grid()
    ax1.set_xlabel('Epoch')
    fig1.show()
    return model, df
Exemplo n.º 16
0
def train_prune(model, train_data, val_data, params):

    init_model_dict = model.state_dict()

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=params['lr'],
                                 weight_decay=params['decay'])
    scheduler = lr_scheduler.StepLR(optimizer,
                                    step_size=params['step_size'],
                                    gamma=params['gamma'])

    pruner = Pruner(train_data, optimizer, params)

    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=params['batch_size'],
                                               shuffle=True)
    val_loader = torch.utils.data.DataLoader(val_data,
                                             batch_size=params['batch_size'],
                                             shuffle=True)
    # output_file = 'bs_'+str(batch_size)+'_lr_'+str(learning_rate)+'_wd_'+str(decay)+'.csv'
    # with open(output_file, 'w', newline="") as f_out:
    #     writer = csv.writer(f_out, delimiter=',')
    #     writer.writerow(["Epoch",'Train Loss','Val Loss','Val Acc'])

    df = pd.DataFrame(index=list(range(params['epochs'])),
                      columns=["trainacc", "trainloss", "valacc", "valloss"])

    fig = plt.gcf()
    fig.show()
    fig.canvas.draw()

    epoch_trainaccs, epoch_valaccs = [], []
    epoch_trainloss, epoch_valloss = [], []
    for epoch in range(
            params['epochs']):  # loop over the dataset multiple times
        t1 = time.perf_counter()
        model.train()
        train_losses, train_accs = [], []
        acc = 0
        for batch, (x_train, y_train) in enumerate(train_loader):

            model.zero_grad()
            pred, std = model(x_train)

            if (epoch > params['prune_milestones'][0]):
                d, yd = model.sample_sigma(pred, std, y_train)
                loss = F.cross_entropy(pred, y_train) + loss_l2(d, yd)
            else:
                loss = F.cross_entropy(pred, y_train)

            loss.backward()
            optimizer.step()

            acc = (pred.argmax(dim=-1) == y_train).to(torch.float32).mean()
            train_accs.append(acc.mean().item())
            train_losses.append(loss.item())

        with torch.no_grad():
            model.eval()
            val_losses, val_accs = [], []
            acc = 0
            for i, (x_val, y_val) in enumerate(val_loader):
                val_pred, val_std = model(x_val)
                val_d, val_yd = model.sample_sigma(val_pred, val_std, y_val)
                loss = F.cross_entropy(val_pred, y_val) + loss_l2(
                    val_d, val_yd)
                acc = (val_pred.argmax(dim=-1) == y_val).to(
                    torch.float32).mean()
                val_losses.append(loss.item())
                val_accs.append(acc.mean().item())

            train_loader = pruner.prune_dataloaders(model, train_loader)

        scheduler.step()

        print(
            'Decay: {}, Epoch: {}, Loss: {}, TAccuracy: {}, Ignore_list_Size: {}, LR: {}'
            .format(params['decay'], epoch, np.mean(val_losses),
                    np.mean(train_accs), len(pruner.ignore_list),
                    optimizer.param_groups[0]['lr']))

        df.loc[epoch, 'trainacc'] = np.mean(train_accs)
        df.loc[epoch, 'trainloss'] = np.mean(train_losses)
        df.loc[epoch, 'valacc'] = np.mean(val_accs)
        df.loc[epoch, 'valloss'] = np.mean(val_losses)

        plt.plot(df[:epoch + 1]['valloss'])
        plt.plot(df[:epoch + 1]['trainloss'])

        plt.pause(0.01)  # I ain't needed!!!
        fig.canvas.draw()

        epoch_trainaccs.append(np.mean(train_accs))
        epoch_valaccs.append(np.mean(val_accs))
        epoch_trainloss.append(np.mean(train_losses))
        epoch_valloss.append(np.mean(val_losses))
        _results = [
            time.perf_counter() - t1, epoch,
            np.mean(train_losses),
            np.mean(val_losses),
            np.mean(val_accs)
        ]
        # with open(output_file, 'a', newline="") as f_out:
        #     writer = csv.writer(f_out, delimiter=',')
        #     writer.writerow(_results)

    # with torch.no_grad():
    #     unc = []
    #     model.eval()
    #     for  b,(x,y) in enumerate(train_loader):
    #         pred,s = model(x)
    #         c = pred.argmax(dim=1)
    #         for i in range(len(c)):
    #             unc.append(torch.sqrt(F.softplus(s[i,c[i]])).item())
    #     fig = plt.figure()
    #     ax = fig.add_subplot()
    #     ax.hist(unc,100)
    #     ax.set_title('Aleatoric Uncertainty distribution of training data after pruning')

    return model, df, train_loader
Exemplo n.º 17
0
#coding:utf-8
'''
Author:Wang Haibo
At: 
Email: [email protected]
'''

from pruner import Pruner
import tensorflow as tf

model = Pruner()

a = tf.placeholder(shape=[None, 32, 32, 3], dtype=tf.float32)
Exemplo n.º 18
0
def main():
    setup_default_logging()
    args, args_text = _parse_args()

    args.prefetcher = not args.no_prefetcher
    args.distributed = False
    if 'WORLD_SIZE' in os.environ:
        args.distributed = int(os.environ['WORLD_SIZE']) > 1
        if args.distributed and args.num_gpu > 1:
            logging.warning('Using more than one GPU per process in distributed mode is not allowed. Setting num_gpu to 1.')
            args.num_gpu = 1

    args.device = 'cuda:0'
    args.world_size = 1
    args.rank = 0  # global rank
    if args.distributed:
        args.num_gpu = 1
        args.device = 'cuda:%d' % args.local_rank
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend='nccl', init_method='env://')
        args.world_size = torch.distributed.get_world_size()
        args.rank = torch.distributed.get_rank()
    assert args.rank >= 0

    if args.distributed:
        logging.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'
                     % (args.rank, args.world_size))
    else:
        logging.info('Training with a single process on %d GPUs.' % args.num_gpu)

    torch.manual_seed(args.seed + args.rank)

    model = create_model(
        args.model,
        pretrained=args.pretrained,
        num_classes=args.num_classes,
        drop_rate=args.drop,
        global_pool=args.gp,
        bn_tf=args.bn_tf,
        bn_momentum=args.bn_momentum,
        bn_eps=args.bn_eps,
        checkpoint_path=args.initial_checkpoint)

    if args.binarizable:
        Model_binary_patch(model)


    if args.local_rank == 0:
        logging.info('Model %s created, param count: %d' %
                     (args.model, sum([m.numel() for m in model.parameters()])))

    data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0)

    if args.num_gpu > 1:
        if args.amp:
            logging.warning(
                'AMP does not work well with nn.DataParallel, disabling. Use distributed mode for multi-GPU AMP.')
            args.amp = False
        model = nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda()

    else:
        model.cuda()


    optimizer = create_optimizer(args, model)

    use_amp = False
    if has_apex and args.amp:
        print('Using amp.')
        model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
        use_amp = True
    else:
        print('Do NOT use amp.')
    if args.local_rank == 0:
        logging.info('NVIDIA APEX {}. AMP {}.'.format(
            'installed' if has_apex else 'not installed', 'on' if use_amp else 'off'))

    # optionally resume from a checkpoint
    resume_state = {}
    resume_epoch = None
    if args.resume:
        resume_state, resume_epoch = resume_checkpoint(model, args.resume)
    if resume_state and not args.no_resume_opt:
        if 'optimizer' in resume_state:
            if args.local_rank == 0:
                logging.info('Restoring Optimizer state from checkpoint')
            optimizer.load_state_dict(resume_state['optimizer'])
        if use_amp and 'amp' in resume_state and 'load_state_dict' in amp.__dict__:
            if args.local_rank == 0:
                logging.info('Restoring NVIDIA AMP state from checkpoint')
            amp.load_state_dict(resume_state['amp'])
    resume_state = None

    if args.freeze_binary:
        Model_freeze_binary(model)



    if args.distributed:
        if args.sync_bn:
            try:
                if has_apex:
                    model = convert_syncbn_model(model)
                else:
                    model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
                if args.local_rank == 0:
                    logging.info('Converted model to use Synchronized BatchNorm.')
            except Exception as e:
                logging.error('Failed to enable Synchronized BatchNorm. Install Apex or Torch >= 1.1')
        if has_apex:
            model = DDP(model, delay_allreduce=True)
        else:
            if args.local_rank == 0:
                logging.info("Using torch DistributedDataParallel. Install NVIDIA Apex for Apex DDP.")
            model = DDP(model, device_ids=[args.local_rank])  # can use device str in Torch >= 1.1
        # NOTE: EMA model does not need to be wrapped by DDP

    lr_scheduler, num_epochs = create_scheduler(args, optimizer)
    # start_epoch = 0 #
    if args.start_epoch is not None:
        # a specified start_epoch will always override the resume epoch
        start_epoch = args.start_epoch
    elif resume_epoch is not None:
        start_epoch = resume_epoch
    if args.reset_lr_scheduler is not None:
        lr_scheduler.base_values = len(lr_scheduler.base_values)*[args.reset_lr_scheduler]
        lr_scheduler.step(start_epoch)

    if lr_scheduler is not None and start_epoch > 0:
        lr_scheduler.step(start_epoch)

    if args.local_rank == 0:
        logging.info('Scheduled epochs: {}'.format(num_epochs))

    # Using pruner to get sparse weights
    if args.prune:
        pruner = Pruner(model, 0, 100, 0.75)
    else:
        pruner = None

    dataset_train = torchvision.datasets.CIFAR100(root='~/Downloads/CIFAR100', train=True, download=True)

    collate_fn = None
    if args.prefetcher and args.mixup > 0:
        collate_fn = FastCollateMixup(args.mixup, args.smoothing, args.num_classes)

    loader_train = create_loader_CIFAR100(
        dataset_train,
        input_size=data_config['input_size'],
        batch_size=args.batch_size,
        is_training=True,
        use_prefetcher=args.prefetcher,
        rand_erase_prob=args.reprob,
        rand_erase_mode=args.remode,
        rand_erase_count=args.recount,
        color_jitter=args.color_jitter,
        auto_augment=args.aa,
        interpolation='random',
        mean=data_config['mean'],
        std=data_config['std'],
        num_workers=args.workers,
        distributed=args.distributed,
        collate_fn=collate_fn,
        is_clean_data=args.clean_train,
    )


    dataset_eval = torchvision.datasets.CIFAR100(root='~/Downloads/CIFAR100', train=False, download=True)

    loader_eval = create_loader_CIFAR100(
        dataset_eval,
        input_size=data_config['input_size'],
        batch_size=4 * args.batch_size,
        is_training=False,
        use_prefetcher=args.prefetcher,
        interpolation=data_config['interpolation'],
        mean=data_config['mean'],
        std=data_config['std'],
        num_workers=args.workers,
        distributed=args.distributed,
    )

    if args.mixup > 0.:
        # smoothing is handled with mixup label transform
        train_loss_fn = SoftTargetCrossEntropy(multiplier=args.softmax_multiplier).cuda()
        validate_loss_fn = nn.CrossEntropyLoss().cuda()
    elif args.smoothing:
        train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing).cuda()
        validate_loss_fn = nn.CrossEntropyLoss().cuda()
    else:
        train_loss_fn = nn.CrossEntropyLoss().cuda()
        validate_loss_fn = train_loss_fn

    eval_metric = args.eval_metric
    best_metric = None
    best_epoch = None
    saver = None
    saver_last_10_epochs = None
    output_dir = ''
    if args.local_rank == 0:
        output_base = args.output if args.output else './output'
        exp_name = '-'.join([
            datetime.now().strftime("%Y%m%d-%H%M%S"),
            args.model,
            str(data_config['input_size'][-1])
        ])
        output_dir = get_outdir(output_base, 'train', exp_name)
        decreasing = True if eval_metric == 'loss' else False
        os.makedirs(output_dir+'/Top')
        os.makedirs(output_dir+'/Last')
        saver = CheckpointSaver(checkpoint_dir=output_dir + '/Top', decreasing=decreasing, max_history=10)  # Save the results of the top 10 epochs
        saver_last_10_epochs = CheckpointSaver(checkpoint_dir=output_dir + '/Last', decreasing=decreasing, max_history=10) # Save the results of the last 10 epochs
        with open(os.path.join(output_dir, 'args.yaml'), 'w') as f:
            f.write(args_text)
            f.write('==============================')
            f.write(model.__str__())

    tensorboard_writer = SummaryWriter(output_dir)

    try:
        for epoch in range(start_epoch, num_epochs):

            global alpha
            alpha = get_alpha(epoch, args)

            if args.distributed:
                loader_train.sampler.set_epoch(epoch)

            if pruner:
                pruner.on_epoch_begin(epoch)  # pruning

            train_metrics = train_epoch(
                epoch, model, loader_train, optimizer, train_loss_fn, args,
                lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir,
                use_amp=use_amp, tensorboard_writer=tensorboard_writer,
                pruner = pruner)

            if pruner:
                pruner.print_statistics()

            eval_metrics = validate(model, loader_eval, validate_loss_fn, args, tensorboard_writer=tensorboard_writer, epoch=epoch)

            if lr_scheduler is not None:
                # step LR for next epoch
                lr_scheduler.step(epoch + 1, eval_metrics[eval_metric])

            update_summary(
                epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'),
                write_header=best_metric is None)

            if saver is not None:
                # save proper checkpoint with eval metric
                save_metric = eval_metrics[eval_metric]
                best_metric, best_epoch = saver.save_checkpoint(
                    model, optimizer, args,
                    epoch=epoch, metric=save_metric, use_amp=use_amp)
            if saver_last_10_epochs is not None:
                # save the checkpoint in the last 5 epochs
                _, _ = saver_last_10_epochs.save_checkpoint(
                    model, optimizer, args,
                    epoch=epoch, metric=epoch, use_amp=use_amp)



    except KeyboardInterrupt:
        pass
    if best_metric is not None:
        logging.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))
    logging.info('The checkpoint of the last epoch is: \n')
    logging.info(saver_last_10_epochs.checkpoint_files[0][0])
Exemplo n.º 19
0
log_dir = "{home}/logs/".format(home=Path.home())

if not os.path.isdir(log_dir):
    print("no logging directory exists. Creating one...")
    os.makedirs(log_dir)

formatting = logging.Formatter(
    '%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler = RotatingFileHandler(
    "{home}/logs/Spotify-pruner.log".format(home=Path.home()),
    maxBytes=1024 * 1024,
    backupCount=5)
handler.setFormatter(formatting)
logger = logging.Logger(name="API-calls")
logger.addHandler(handler)

p = Pruner()
m = Mirror()
if "prune" in sys.argv:
    p.prune_playlist()
    logger.info("playlist prune call completed")
if "image" in sys.argv:
    m.update_image()
    logger.info("image update call completed")
if "tracks" in sys.argv:
    m.update_tracks()
    logger.info("tracks update call completed")
if "title" in sys.argv:
    m.update_title()
    logger.info("title update call completed")
Exemplo n.º 20
0
    def vgg19(self,
              inputs=None,
              is_train=True,
              reload_w=None,
              num_classes=None):

        model = Pruner(reload_file=reload_w)

        x = inputs

        init_size = 64

        for i in range(4):
            x = model._add_layer(x,
                                 mode="conv",
                                 out_c=init_size * (2**i),
                                 k_size=3,
                                 strides=1,
                                 with_bn=False)
            x = model._add_layer(x,
                                 mode="conv",
                                 out_c=init_size * (2**i),
                                 k_size=3,
                                 strides=1,
                                 with_bn=False)
            x = model.pool_layer(x, "max", pool_size=2, strides=2)

        x = model._add_layer(x,
                             mode="conv",
                             out_c=512,
                             k_size=3,
                             strides=1,
                             with_bn=False)
        x = model._add_layer(x,
                             mode="conv",
                             out_c=512,
                             k_size=3,
                             strides=1,
                             with_bn=False)
        x = model._add_layer(x,
                             mode="conv",
                             out_c=512,
                             k_size=3,
                             strides=1,
                             with_bn=False)
        x = model.gmp_layer(x)

        x = model._add_layer(x, mode="fc", out_c=1024, with_bn=False)
        x = model._add_layer(x, mode="fc", out_c=1024, with_bn=False)
        x = model._add_layer(x,
                             mode="fc",
                             out_c=num_classes,
                             with_bn=False,
                             act=None)

        return x, model