Example #1
0
    def evaluate_with_refreshed_validation_set(self, data):
        X_val_backup = data["X_val_backup"]
        y_val_backup = data["y_val_backup"]
        # FIXME if dataset is smaller than 5000, an error will occur
        ivb = np.random.choice(len(X_val_backup), 5000, False)
        X_val_backup = X_val_backup[ivb]
        y_val_backup = y_val_backup[ivb]

        scores = self.model.evaluate(X_val_backup, y_val_backup, verbose=2)

        test_loss = scores[0]
        test_acc = scores[1]
        log_and_printt(f"Test loss:{test_loss}")
        log_and_print(f"Test accuracy:{test_acc}")
        return test_loss, test_acc
Example #2
0
    def build_prepared_model(self):

        if self.config["model"].lower()=="mobilenetv2":
            base_model = MobileNetV2(
                input_shape=self.input_shape,
                weights=self.config['weights'],
                include_top=False
            )
        elif self.config["model"].lower()=="inceptionv3":
            base_model = InceptionV3(
                input_shape=self.input_shape,
                weights=self.config['weights'],
                include_top=False
            )

        # add a global spatial average pooling layer
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        # add a fully-connected layer
        x = Dense(256, activation="relu")(x)
        x = Dropout(0.7)(x)
        # and a logistic layer
        predictions = Dense(self.num_classes, activation="softmax")(x)

        model = Model(inputs=base_model.input, outputs=predictions)

        for layer in model.layers:
            layer.trainable = True

        adam_opt = optimizers.Adam(
            lr=0.00001,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=None,
            decay=0.0,
            amsgrad=False,
            clipnorm=1.0,
        )
        model.compile(
            loss="categorical_crossentropy", optimizer=adam_opt, metrics=["accuracy"]
        )
        log_and_print(
            f"{self.config['model']} model built as child model.\n Model summary:",
            self.config['logging'],
        )
        print(model.summary())
        return model
Example #3
0
    def evaluate(self, trial_no, trial_hyperparams):
        """Evaluates objective function

        Trains the child model k times with same augmentation hyperparameters.
        k is determined by the user by `opt_samples` argument.

        Args:
            trial_no (int): no of trial. needed for recording to notebook
            trial_hyperparams (list)
        Returns:
            float: trial-cost = 1 - avg. rewards from samples
        """

        augmented_data = augment_by_policy(
            self.data["X_train"], self.data["y_train"], *trial_hyperparams
        )

        sample_rewards = []
        for sample_no in range(1, self.opt_samples + 1):
            self.child_model.load_pre_augment_weights()
            # TRAIN
            history = self.child_model.fit(self.data, augmented_data)
            #
            reward = self.calculate_reward(history)
            sample_rewards.append(reward)
            self.notebook.record(
                trial_no, trial_hyperparams, sample_no, reward, history
            )

        trial_cost = 1 - np.mean(sample_rewards)
        self.notebook.save()

        log_and_print(
            f"{str(trial_no)}, {str(trial_cost)}, {str(trial_hyperparams)}",
            self.logging,
        )

        return trial_cost
Example #4
0
    def build_mobilenetv2(self):
        mobilenet_v2 = MobileNetV2(
            input_shape=self.input_shape, weights=None, include_top=False
        )

        # add a global spatial average pooling layer
        x = mobilenet_v2.output
        x = GlobalAveragePooling2D()(x)
        # add a fully-connected layer
        x = Dense(512, activation="relu")(x)
        x = Dropout(0.1)(x)
        # and a logistic layer
        predictions = Dense(self.num_classes, activation="softmax")(x)

        model = Model(inputs=mobilenet_v2.input, outputs=predictions)

        for layer in model.layers:
            layer.trainable = True

        adam_opt = optimizers.Adam(
            lr=0.001,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=None,
            decay=0.0,
            amsgrad=False,
            clipnorm=1.0,
        )
        model.compile(
            loss="categorical_crossentropy", optimizer=adam_opt, metrics=["accuracy"]
        )
        log_and_print(
            f"{self.model_name} model built as child model.\n Model summary:",
            self.logging,
        )
        print(model.summary())
        return model
Example #5
0
    def build_wrn(self):
        # For WRN-16-8 put N = 2, k = 8
        # For WRN-28-10 put N = 4, k = 10
        # For WRN-40-4 put N = 6, k = 4
        _depth = int(self.model_name.split("_")[1])  # e.g. wrn_[40]_4
        _width = int(self.model_name.split("_")[2])  # e.g. wrn_40_[4]
        model = WideResidualNetwork(
            depth=_depth,
            width=_width,
            dropout_rate=0.0,
            include_top=True,
            weights=None,
            input_tensor=None,
            input_shape=self.input_shape,
            classes=self.num_classes,
            activation="softmax",
        )

        adam_opt = optimizers.Adam(
            lr=0.001,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=None,
            decay=0.0,
            amsgrad=False,
            clipnorm=1.0,
        )
        model.compile(
            loss="categorical_crossentropy", optimizer=adam_opt, metrics=["accuracy"]
        )
        log_and_print(
            f"{self.model_name} model built as child model.\n Model summary:",
            self.logging,
        )
        print(model.summary())
        return model
Example #6
0
    def evaluate(self, trial_no, trial_hyperparams):
        """Evaluates objective function

        Trains the child model k times with same augmentation hyperparameters.
        k is determined by the user by `opt_samples` argument.

        Args:
            trial_no (int): no of trial. needed for recording to notebook
            trial_hyperparams (list)
        Returns:
            float: trial-cost = 1 - avg. rewards from samples
        """

        augmented_data = augment_by_policy(self.data["X_train"],
                                           self.data["y_train"],
                                           *trial_hyperparams)

        sample_rewards = []
        #pytorch
        layers = 2
        init_channels = 24
        use_aux = True
        epochs = 30
        lr = 0.01
        momentum = 0.995
        weight_decay = 0.995
        drop_path_prob = 0.2
        genotype = "Genotype(normal=[[('dil_conv_3x3', 0), ('sep_conv_5x5', 1)], [('sep_conv_3x3', 1), ('avg_pool_3x3', 0)],[('dil_conv_3x3', 1), ('dil_conv_3x3', 0)], [('sep_conv_3x3', 3), ('skip_connect', 1)]], normal_concat=range(2, 6), reduce=[[('sep_conv_3x3', 1), ('dil_conv_5x5', 0)], [('skip_connect', 0), ('sep_conv_5x5', 1)], [('sep_conv_5x5', 1),('sep_conv_5x5', 0)], [('max_pool_3x3', 1), ('sep_conv_3x3', 0)]], reduce_concat=range(2, 6))"
        model = AugmentCNN(self.input_size, self.input_channels, init_channels,
                           self.n_classes, layers, use_aux, genotype)
        model = nn.DataParallel(model, device_ids='0').to(device)

        # model size
        mb_params = utils.param_size(model)
        logger.info("Model size = {:.3f} MB".format(mb_params))

        # weights optimizer
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr,
                                    momentum=momentum,
                                    weight_decay=weight_decay)
        a = 2 / 0
        """
        for sample_no in range(1, self.opt_samples + 1):
            self.child_model.load_pre_augment_weights()
            # TRAIN
            history = self.child_model.fit(self.data, augmented_data)
            #
            reward = self.calculate_reward(history)
            sample_rewards.append(reward)
            self.notebook.record(
                trial_no, trial_hyperparams, sample_no, reward, history
            )

        """
        best_top1 = -9999
        for epoch in range(epochs):
            lr_scheduler.step()
            drop_prob = drop_path_prob * epoch / epochs
            model.module.drop_path_prob(drop_prob)

            # training
            train(train_loader, model, optimizer, criterion, epoch)

            # validation
            cur_step = (epoch + 1) * len(train_loader)
            top1 = validate(valid_loader, model, criterion, epoch, cur_step)

            # save
            if best_top1 < top1:
                best_top1 = top1
                is_best = True
            else:
                is_best = False
        print('best_top1:', best_top1)
        #sample_rewards.append(reward)
        #self.notebook.record(
        #    trial_no, trial_hyperparams, sample_no, reward, history
        #)
        #trial_cost = 1 - np.mean(sample_rewards)
        #self.notebook.save()

        log_and_print(
            f"{str(trial_no)}, {str(trial_cost)}, {str(trial_hyperparams)}",
            self.logging,
        )

        #return trial_cost
        return best_top1